Merge branch 'kvm-ppc-queue' of git://github.com/agraf/linux-2.6 into queue
Conflicts: arch/powerpc/include/asm/processor.h
This commit is contained in:
Коммит
95f328d3ad
|
@ -1810,6 +1810,50 @@ registers, find a list below:
|
|||
PPC | KVM_REG_PPC_TLB3PS | 32
|
||||
PPC | KVM_REG_PPC_EPTCFG | 32
|
||||
PPC | KVM_REG_PPC_ICP_STATE | 64
|
||||
PPC | KVM_REG_PPC_TB_OFFSET | 64
|
||||
PPC | KVM_REG_PPC_SPMC1 | 32
|
||||
PPC | KVM_REG_PPC_SPMC2 | 32
|
||||
PPC | KVM_REG_PPC_IAMR | 64
|
||||
PPC | KVM_REG_PPC_TFHAR | 64
|
||||
PPC | KVM_REG_PPC_TFIAR | 64
|
||||
PPC | KVM_REG_PPC_TEXASR | 64
|
||||
PPC | KVM_REG_PPC_FSCR | 64
|
||||
PPC | KVM_REG_PPC_PSPB | 32
|
||||
PPC | KVM_REG_PPC_EBBHR | 64
|
||||
PPC | KVM_REG_PPC_EBBRR | 64
|
||||
PPC | KVM_REG_PPC_BESCR | 64
|
||||
PPC | KVM_REG_PPC_TAR | 64
|
||||
PPC | KVM_REG_PPC_DPDES | 64
|
||||
PPC | KVM_REG_PPC_DAWR | 64
|
||||
PPC | KVM_REG_PPC_DAWRX | 64
|
||||
PPC | KVM_REG_PPC_CIABR | 64
|
||||
PPC | KVM_REG_PPC_IC | 64
|
||||
PPC | KVM_REG_PPC_VTB | 64
|
||||
PPC | KVM_REG_PPC_CSIGR | 64
|
||||
PPC | KVM_REG_PPC_TACR | 64
|
||||
PPC | KVM_REG_PPC_TCSCR | 64
|
||||
PPC | KVM_REG_PPC_PID | 64
|
||||
PPC | KVM_REG_PPC_ACOP | 64
|
||||
PPC | KVM_REG_PPC_VRSAVE | 32
|
||||
PPC | KVM_REG_PPC_LPCR | 64
|
||||
PPC | KVM_REG_PPC_PPR | 64
|
||||
PPC | KVM_REG_PPC_ARCH_COMPAT 32
|
||||
PPC | KVM_REG_PPC_TM_GPR0 | 64
|
||||
...
|
||||
PPC | KVM_REG_PPC_TM_GPR31 | 64
|
||||
PPC | KVM_REG_PPC_TM_VSR0 | 128
|
||||
...
|
||||
PPC | KVM_REG_PPC_TM_VSR63 | 128
|
||||
PPC | KVM_REG_PPC_TM_CR | 64
|
||||
PPC | KVM_REG_PPC_TM_LR | 64
|
||||
PPC | KVM_REG_PPC_TM_CTR | 64
|
||||
PPC | KVM_REG_PPC_TM_FPSCR | 64
|
||||
PPC | KVM_REG_PPC_TM_AMR | 64
|
||||
PPC | KVM_REG_PPC_TM_PPR | 64
|
||||
PPC | KVM_REG_PPC_TM_VRSAVE | 64
|
||||
PPC | KVM_REG_PPC_TM_VSCR | 32
|
||||
PPC | KVM_REG_PPC_TM_DSCR | 64
|
||||
PPC | KVM_REG_PPC_TM_TAR | 64
|
||||
|
||||
ARM registers are mapped using the lower 32 bits. The upper 16 of that
|
||||
is the register group type, or coprocessor number:
|
||||
|
|
|
@ -152,12 +152,13 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
|
|||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
void kvm_arch_free_memslot(struct kvm_memory_slot *free,
|
||||
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
|
||||
struct kvm_memory_slot *dont)
|
||||
{
|
||||
}
|
||||
|
||||
int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
|
||||
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||
unsigned long npages)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1550,12 +1550,13 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
|
|||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
void kvm_arch_free_memslot(struct kvm_memory_slot *free,
|
||||
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
|
||||
struct kvm_memory_slot *dont)
|
||||
{
|
||||
}
|
||||
|
||||
int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
|
||||
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||
unsigned long npages)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -198,12 +198,13 @@ kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
|
|||
return -ENOIOCTLCMD;
|
||||
}
|
||||
|
||||
void kvm_arch_free_memslot(struct kvm_memory_slot *free,
|
||||
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
|
||||
struct kvm_memory_slot *dont)
|
||||
{
|
||||
}
|
||||
|
||||
int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
|
||||
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||
unsigned long npages)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -77,4 +77,8 @@ static inline unsigned int get_d(u32 inst)
|
|||
return inst & 0xffff;
|
||||
}
|
||||
|
||||
static inline unsigned int get_oc(u32 inst)
|
||||
{
|
||||
return (inst >> 11) & 0x7fff;
|
||||
}
|
||||
#endif /* __ASM_PPC_DISASSEMBLE_H__ */
|
||||
|
|
|
@ -198,12 +198,27 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
|
|||
cmpwi r10,0; \
|
||||
bne do_kvm_##n
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
/*
|
||||
* If hv is possible, interrupts come into to the hv version
|
||||
* of the kvmppc_interrupt code, which then jumps to the PR handler,
|
||||
* kvmppc_interrupt_pr, if the guest is a PR guest.
|
||||
*/
|
||||
#define kvmppc_interrupt kvmppc_interrupt_hv
|
||||
#else
|
||||
#define kvmppc_interrupt kvmppc_interrupt_pr
|
||||
#endif
|
||||
|
||||
#define __KVM_HANDLER(area, h, n) \
|
||||
do_kvm_##n: \
|
||||
BEGIN_FTR_SECTION_NESTED(947) \
|
||||
ld r10,area+EX_CFAR(r13); \
|
||||
std r10,HSTATE_CFAR(r13); \
|
||||
END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947); \
|
||||
BEGIN_FTR_SECTION_NESTED(948) \
|
||||
ld r10,area+EX_PPR(r13); \
|
||||
std r10,HSTATE_PPR(r13); \
|
||||
END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \
|
||||
ld r10,area+EX_R10(r13); \
|
||||
stw r9,HSTATE_SCRATCH1(r13); \
|
||||
ld r9,area+EX_R9(r13); \
|
||||
|
@ -217,6 +232,10 @@ do_kvm_##n: \
|
|||
ld r10,area+EX_R10(r13); \
|
||||
beq 89f; \
|
||||
stw r9,HSTATE_SCRATCH1(r13); \
|
||||
BEGIN_FTR_SECTION_NESTED(948) \
|
||||
ld r9,area+EX_PPR(r13); \
|
||||
std r9,HSTATE_PPR(r13); \
|
||||
END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \
|
||||
ld r9,area+EX_R9(r13); \
|
||||
std r12,HSTATE_SCRATCH0(r13); \
|
||||
li r12,n; \
|
||||
|
@ -236,7 +255,7 @@ do_kvm_##n: \
|
|||
#define KVM_HANDLER_SKIP(area, h, n)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
|
||||
#define KVMTEST_PR(n) __KVMTEST(n)
|
||||
#define KVM_HANDLER_PR(area, h, n) __KVM_HANDLER(area, h, n)
|
||||
#define KVM_HANDLER_PR_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n)
|
||||
|
|
|
@ -123,6 +123,8 @@
|
|||
#define BOOK3S_HFLAG_SLB 0x2
|
||||
#define BOOK3S_HFLAG_PAIRED_SINGLE 0x4
|
||||
#define BOOK3S_HFLAG_NATIVE_PS 0x8
|
||||
#define BOOK3S_HFLAG_MULTI_PGSIZE 0x10
|
||||
#define BOOK3S_HFLAG_NEW_TLBIE 0x20
|
||||
|
||||
#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
|
||||
#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
|
||||
|
@ -136,6 +138,8 @@
|
|||
#define KVM_GUEST_MODE_NONE 0
|
||||
#define KVM_GUEST_MODE_GUEST 1
|
||||
#define KVM_GUEST_MODE_SKIP 2
|
||||
#define KVM_GUEST_MODE_GUEST_HV 3
|
||||
#define KVM_GUEST_MODE_HOST_HV 4
|
||||
|
||||
#define KVM_INST_FETCH_FAILED -1
|
||||
|
||||
|
|
|
@ -58,16 +58,18 @@ struct hpte_cache {
|
|||
struct hlist_node list_pte_long;
|
||||
struct hlist_node list_vpte;
|
||||
struct hlist_node list_vpte_long;
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
struct hlist_node list_vpte_64k;
|
||||
#endif
|
||||
struct rcu_head rcu_head;
|
||||
u64 host_vpn;
|
||||
u64 pfn;
|
||||
ulong slot;
|
||||
struct kvmppc_pte pte;
|
||||
int pagesize;
|
||||
};
|
||||
|
||||
struct kvmppc_vcpu_book3s {
|
||||
struct kvm_vcpu vcpu;
|
||||
struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
|
||||
struct kvmppc_sid_map sid_map[SID_MAP_NUM];
|
||||
struct {
|
||||
u64 esid;
|
||||
|
@ -99,6 +101,9 @@ struct kvmppc_vcpu_book3s {
|
|||
struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
|
||||
struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
|
||||
struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K];
|
||||
#endif
|
||||
int hpte_cache_count;
|
||||
spinlock_t mmu_lock;
|
||||
};
|
||||
|
@ -107,8 +112,9 @@ struct kvmppc_vcpu_book3s {
|
|||
#define CONTEXT_GUEST 1
|
||||
#define CONTEXT_GUEST_END 2
|
||||
|
||||
#define VSID_REAL 0x0fffffffffc00000ULL
|
||||
#define VSID_BAT 0x0fffffffffb00000ULL
|
||||
#define VSID_REAL 0x07ffffffffc00000ULL
|
||||
#define VSID_BAT 0x07ffffffffb00000ULL
|
||||
#define VSID_64K 0x0800000000000000ULL
|
||||
#define VSID_1T 0x1000000000000000ULL
|
||||
#define VSID_REAL_DR 0x2000000000000000ULL
|
||||
#define VSID_REAL_IR 0x4000000000000000ULL
|
||||
|
@ -118,11 +124,12 @@ extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask)
|
|||
extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
|
||||
extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
|
||||
extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
|
||||
extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr);
|
||||
extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
|
||||
extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
|
||||
bool iswrite);
|
||||
extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
|
||||
extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
|
||||
extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
|
||||
extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
|
||||
|
@ -134,6 +141,7 @@ extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
|
|||
|
||||
extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
|
||||
extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte);
|
||||
extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
|
||||
|
@ -151,7 +159,8 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
|
|||
bool upper, u32 val);
|
||||
extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
|
||||
extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
|
||||
extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||
extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing,
|
||||
bool *writable);
|
||||
extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
|
||||
unsigned long *rmap, long pte_index, int realmode);
|
||||
extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
|
||||
|
@ -172,6 +181,8 @@ extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
|
|||
unsigned long *hpret);
|
||||
extern long kvmppc_hv_get_dirty_log(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot, unsigned long *map);
|
||||
extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
|
||||
unsigned long mask);
|
||||
|
||||
extern void kvmppc_entry_trampoline(void);
|
||||
extern void kvmppc_hv_entry_trampoline(void);
|
||||
|
@ -184,11 +195,9 @@ extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
|
|||
|
||||
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu);
|
||||
return vcpu->arch.book3s;
|
||||
}
|
||||
|
||||
extern void kvm_return_point(void);
|
||||
|
||||
/* Also add subarch specific defines */
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
|
||||
|
@ -198,203 +207,6 @@ extern void kvm_return_point(void);
|
|||
#include <asm/kvm_book3s_64.h>
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR
|
||||
|
||||
static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return to_book3s(vcpu)->hior;
|
||||
}
|
||||
|
||||
static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
|
||||
unsigned long pending_now, unsigned long old_pending)
|
||||
{
|
||||
if (pending_now)
|
||||
vcpu->arch.shared->int_pending = 1;
|
||||
else if (old_pending)
|
||||
vcpu->arch.shared->int_pending = 0;
|
||||
}
|
||||
|
||||
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
|
||||
{
|
||||
if ( num < 14 ) {
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
svcpu->gpr[num] = val;
|
||||
svcpu_put(svcpu);
|
||||
to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
|
||||
} else
|
||||
vcpu->arch.gpr[num] = val;
|
||||
}
|
||||
|
||||
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
|
||||
{
|
||||
if ( num < 14 ) {
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
ulong r = svcpu->gpr[num];
|
||||
svcpu_put(svcpu);
|
||||
return r;
|
||||
} else
|
||||
return vcpu->arch.gpr[num];
|
||||
}
|
||||
|
||||
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
|
||||
{
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
svcpu->cr = val;
|
||||
svcpu_put(svcpu);
|
||||
to_book3s(vcpu)->shadow_vcpu->cr = val;
|
||||
}
|
||||
|
||||
static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
u32 r;
|
||||
r = svcpu->cr;
|
||||
svcpu_put(svcpu);
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
|
||||
{
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
svcpu->xer = val;
|
||||
to_book3s(vcpu)->shadow_vcpu->xer = val;
|
||||
svcpu_put(svcpu);
|
||||
}
|
||||
|
||||
static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
u32 r;
|
||||
r = svcpu->xer;
|
||||
svcpu_put(svcpu);
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
|
||||
{
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
svcpu->ctr = val;
|
||||
svcpu_put(svcpu);
|
||||
}
|
||||
|
||||
static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
ulong r;
|
||||
r = svcpu->ctr;
|
||||
svcpu_put(svcpu);
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
|
||||
{
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
svcpu->lr = val;
|
||||
svcpu_put(svcpu);
|
||||
}
|
||||
|
||||
static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
ulong r;
|
||||
r = svcpu->lr;
|
||||
svcpu_put(svcpu);
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
|
||||
{
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
svcpu->pc = val;
|
||||
svcpu_put(svcpu);
|
||||
}
|
||||
|
||||
static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
ulong r;
|
||||
r = svcpu->pc;
|
||||
svcpu_put(svcpu);
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
ulong pc = kvmppc_get_pc(vcpu);
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
u32 r;
|
||||
|
||||
/* Load the instruction manually if it failed to do so in the
|
||||
* exit path */
|
||||
if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
|
||||
kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
|
||||
|
||||
r = svcpu->last_inst;
|
||||
svcpu_put(svcpu);
|
||||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
* Like kvmppc_get_last_inst(), but for fetching a sc instruction.
|
||||
* Because the sc instruction sets SRR0 to point to the following
|
||||
* instruction, we have to fetch from pc - 4.
|
||||
*/
|
||||
static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
ulong pc = kvmppc_get_pc(vcpu) - 4;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
u32 r;
|
||||
|
||||
/* Load the instruction manually if it failed to do so in the
|
||||
* exit path */
|
||||
if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
|
||||
kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
|
||||
|
||||
r = svcpu->last_inst;
|
||||
svcpu_put(svcpu);
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
ulong r;
|
||||
r = svcpu->fault_dar;
|
||||
svcpu_put(svcpu);
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
ulong crit_raw = vcpu->arch.shared->critical;
|
||||
ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
|
||||
bool crit;
|
||||
|
||||
/* Truncate crit indicators in 32 bit mode */
|
||||
if (!(vcpu->arch.shared->msr & MSR_SF)) {
|
||||
crit_raw &= 0xffffffff;
|
||||
crit_r1 &= 0xffffffff;
|
||||
}
|
||||
|
||||
/* Critical section when crit == r1 */
|
||||
crit = (crit_raw == crit_r1);
|
||||
/* ... and we're in supervisor mode */
|
||||
crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
|
||||
|
||||
return crit;
|
||||
}
|
||||
#else /* CONFIG_KVM_BOOK3S_PR */
|
||||
|
||||
static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
|
||||
unsigned long pending_now, unsigned long old_pending)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
|
||||
{
|
||||
vcpu->arch.gpr[num] = val;
|
||||
|
@ -489,12 +301,6 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
|
|||
return vcpu->arch.fault_dar;
|
||||
}
|
||||
|
||||
static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Magic register values loaded into r3 and r4 before the 'sc' assembly
|
||||
* instruction for the OSI hypercalls */
|
||||
#define OSI_SC_MAGIC_R3 0x113724FA
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return to_book3s(vcpu)->shadow_vcpu;
|
||||
return vcpu->arch.shadow_vcpu;
|
||||
}
|
||||
|
||||
static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
#ifndef __ASM_KVM_BOOK3S_64_H__
|
||||
#define __ASM_KVM_BOOK3S_64_H__
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
|
||||
static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
preempt_disable();
|
||||
|
@ -35,7 +35,7 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
|
|||
|
||||
#define SPAPR_TCE_SHIFT 12
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
|
||||
extern unsigned long kvm_rma_pages;
|
||||
#endif
|
||||
|
@ -278,7 +278,7 @@ static inline int is_vrma_hpte(unsigned long hpte_v)
|
|||
(HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
/*
|
||||
* Note modification of an HPTE; set the HPTE modified bit
|
||||
* if anyone is interested.
|
||||
|
@ -289,6 +289,6 @@ static inline void note_hpte_modification(struct kvm *kvm,
|
|||
if (atomic_read(&kvm->arch.hpte_mod_interest))
|
||||
rev->guest_rpte |= HPTE_GR_MODIFIED;
|
||||
}
|
||||
#endif /* CONFIG_KVM_BOOK3S_64_HV */
|
||||
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
|
||||
|
||||
#endif /* __ASM_KVM_BOOK3S_64_H__ */
|
||||
|
|
|
@ -83,7 +83,7 @@ struct kvmppc_host_state {
|
|||
u8 restore_hid5;
|
||||
u8 napping;
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
u8 hwthread_req;
|
||||
u8 hwthread_state;
|
||||
u8 host_ipi;
|
||||
|
@ -101,6 +101,7 @@ struct kvmppc_host_state {
|
|||
#endif
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
u64 cfar;
|
||||
u64 ppr;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
@ -108,14 +109,14 @@ struct kvmppc_book3s_shadow_vcpu {
|
|||
ulong gpr[14];
|
||||
u32 cr;
|
||||
u32 xer;
|
||||
|
||||
u32 fault_dsisr;
|
||||
u32 last_inst;
|
||||
ulong ctr;
|
||||
ulong lr;
|
||||
ulong pc;
|
||||
|
||||
ulong shadow_srr1;
|
||||
ulong fault_dar;
|
||||
u32 fault_dsisr;
|
||||
u32 last_inst;
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
u32 sr[16]; /* Guest SRs */
|
||||
|
|
|
@ -26,7 +26,12 @@
|
|||
/* LPIDs we support with this build -- runtime limit may be lower */
|
||||
#define KVMPPC_NR_LPIDS 64
|
||||
|
||||
#define KVMPPC_INST_EHPRIV 0x7c00021c
|
||||
#define KVMPPC_INST_EHPRIV 0x7c00021c
|
||||
#define EHPRIV_OC_SHIFT 11
|
||||
/* "ehpriv 1" : ehpriv with OC = 1 is used for debug emulation */
|
||||
#define EHPRIV_OC_DEBUG 1
|
||||
#define KVMPPC_INST_EHPRIV_DEBUG (KVMPPC_INST_EHPRIV | \
|
||||
(EHPRIV_OC_DEBUG << EHPRIV_OC_SHIFT))
|
||||
|
||||
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
|
||||
{
|
||||
|
|
|
@ -68,10 +68,12 @@ extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
|||
#define HPTEG_HASH_BITS_PTE_LONG 12
|
||||
#define HPTEG_HASH_BITS_VPTE 13
|
||||
#define HPTEG_HASH_BITS_VPTE_LONG 5
|
||||
#define HPTEG_HASH_BITS_VPTE_64K 11
|
||||
#define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE)
|
||||
#define HPTEG_HASH_NUM_PTE_LONG (1 << HPTEG_HASH_BITS_PTE_LONG)
|
||||
#define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE)
|
||||
#define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG)
|
||||
#define HPTEG_HASH_NUM_VPTE_64K (1 << HPTEG_HASH_BITS_VPTE_64K)
|
||||
|
||||
/* Physical Address Mask - allowed range of real mode RAM access */
|
||||
#define KVM_PAM 0x0fffffffffffffffULL
|
||||
|
@ -84,6 +86,9 @@ struct lppaca;
|
|||
struct slb_shadow;
|
||||
struct dtl_entry;
|
||||
|
||||
struct kvmppc_vcpu_book3s;
|
||||
struct kvmppc_book3s_shadow_vcpu;
|
||||
|
||||
struct kvm_vm_stat {
|
||||
u32 remote_tlb_flush;
|
||||
};
|
||||
|
@ -219,15 +224,15 @@ struct revmap_entry {
|
|||
#define KVMPPC_GOT_PAGE 0x80
|
||||
|
||||
struct kvm_arch_memory_slot {
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
unsigned long *rmap;
|
||||
unsigned long *slot_phys;
|
||||
#endif /* CONFIG_KVM_BOOK3S_64_HV */
|
||||
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
|
||||
};
|
||||
|
||||
struct kvm_arch {
|
||||
unsigned int lpid;
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
unsigned long hpt_virt;
|
||||
struct revmap_entry *revmap;
|
||||
unsigned int host_lpid;
|
||||
|
@ -251,7 +256,10 @@ struct kvm_arch {
|
|||
cpumask_t need_tlb_flush;
|
||||
struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
|
||||
int hpt_cma_alloc;
|
||||
#endif /* CONFIG_KVM_BOOK3S_64_HV */
|
||||
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
|
||||
struct mutex hpt_mutex;
|
||||
#endif
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
struct list_head spapr_tce_tables;
|
||||
struct list_head rtas_tokens;
|
||||
|
@ -262,6 +270,7 @@ struct kvm_arch {
|
|||
#ifdef CONFIG_KVM_XICS
|
||||
struct kvmppc_xics *xics;
|
||||
#endif
|
||||
struct kvmppc_ops *kvm_ops;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -289,6 +298,10 @@ struct kvmppc_vcore {
|
|||
u64 stolen_tb;
|
||||
u64 preempt_tb;
|
||||
struct kvm_vcpu *runner;
|
||||
u64 tb_offset; /* guest timebase - host timebase */
|
||||
ulong lpcr;
|
||||
u32 arch_compat;
|
||||
ulong pcr;
|
||||
};
|
||||
|
||||
#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
|
||||
|
@ -323,6 +336,7 @@ struct kvmppc_pte {
|
|||
bool may_read : 1;
|
||||
bool may_write : 1;
|
||||
bool may_execute : 1;
|
||||
u8 page_size; /* MMU_PAGE_xxx */
|
||||
};
|
||||
|
||||
struct kvmppc_mmu {
|
||||
|
@ -335,7 +349,8 @@ struct kvmppc_mmu {
|
|||
/* book3s */
|
||||
void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value);
|
||||
u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum);
|
||||
int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data);
|
||||
int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr,
|
||||
struct kvmppc_pte *pte, bool data, bool iswrite);
|
||||
void (*reset_msr)(struct kvm_vcpu *vcpu);
|
||||
void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
|
||||
int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid);
|
||||
|
@ -355,6 +370,7 @@ struct kvmppc_slb {
|
|||
bool large : 1; /* PTEs are 16MB */
|
||||
bool tb : 1; /* 1TB segment */
|
||||
bool class : 1;
|
||||
u8 base_page_size; /* MMU_PAGE_xxx */
|
||||
};
|
||||
|
||||
# ifdef CONFIG_PPC_FSL_BOOK3E
|
||||
|
@ -372,17 +388,6 @@ struct kvmppc_slb {
|
|||
#define KVMPPC_EPR_USER 1 /* exit to userspace to fill EPR */
|
||||
#define KVMPPC_EPR_KERNEL 2 /* in-kernel irqchip */
|
||||
|
||||
struct kvmppc_booke_debug_reg {
|
||||
u32 dbcr0;
|
||||
u32 dbcr1;
|
||||
u32 dbcr2;
|
||||
#ifdef CONFIG_KVM_E500MC
|
||||
u32 dbcr4;
|
||||
#endif
|
||||
u64 iac[KVMPPC_BOOKE_MAX_IAC];
|
||||
u64 dac[KVMPPC_BOOKE_MAX_DAC];
|
||||
};
|
||||
|
||||
#define KVMPPC_IRQ_DEFAULT 0
|
||||
#define KVMPPC_IRQ_MPIC 1
|
||||
#define KVMPPC_IRQ_XICS 2
|
||||
|
@ -397,6 +402,10 @@ struct kvm_vcpu_arch {
|
|||
int slb_max; /* 1 + index of last valid entry in slb[] */
|
||||
int slb_nr; /* total number of entries in SLB */
|
||||
struct kvmppc_mmu mmu;
|
||||
struct kvmppc_vcpu_book3s *book3s;
|
||||
#endif
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
|
||||
#endif
|
||||
|
||||
ulong gpr[32];
|
||||
|
@ -458,6 +467,8 @@ struct kvm_vcpu_arch {
|
|||
u32 ctrl;
|
||||
ulong dabr;
|
||||
ulong cfar;
|
||||
ulong ppr;
|
||||
ulong shadow_srr1;
|
||||
#endif
|
||||
u32 vrsave; /* also USPRG0 */
|
||||
u32 mmucr;
|
||||
|
@ -493,6 +504,8 @@ struct kvm_vcpu_arch {
|
|||
|
||||
u64 mmcr[3];
|
||||
u32 pmc[8];
|
||||
u64 siar;
|
||||
u64 sdar;
|
||||
|
||||
#ifdef CONFIG_KVM_EXIT_TIMING
|
||||
struct mutex exit_timing_lock;
|
||||
|
@ -526,7 +539,10 @@ struct kvm_vcpu_arch {
|
|||
u32 eptcfg;
|
||||
u32 epr;
|
||||
u32 crit_save;
|
||||
struct kvmppc_booke_debug_reg dbg_reg;
|
||||
/* guest debug registers*/
|
||||
struct debug_reg dbg_reg;
|
||||
/* hardware visible debug registers when in guest state */
|
||||
struct debug_reg shadow_dbg_reg;
|
||||
#endif
|
||||
gpa_t paddr_accessed;
|
||||
gva_t vaddr_accessed;
|
||||
|
@ -577,7 +593,7 @@ struct kvm_vcpu_arch {
|
|||
struct kvmppc_icp *icp; /* XICS presentation controller */
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
struct kvm_vcpu_arch_shared shregs;
|
||||
|
||||
unsigned long pgfault_addr;
|
||||
|
|
|
@ -106,13 +106,6 @@ extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
|
|||
struct kvm_interrupt *irq);
|
||||
extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
|
||||
|
||||
extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int op, int *advance);
|
||||
extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn,
|
||||
ulong val);
|
||||
extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn,
|
||||
ulong *val);
|
||||
extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
|
||||
|
||||
extern int kvmppc_booke_init(void);
|
||||
|
@ -135,17 +128,17 @@ extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
|
|||
struct kvm_create_spapr_tce *args);
|
||||
extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
||||
unsigned long ioba, unsigned long tce);
|
||||
extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
|
||||
struct kvm_allocate_rma *rma);
|
||||
extern struct kvm_rma_info *kvm_alloc_rma(void);
|
||||
extern void kvm_release_rma(struct kvm_rma_info *ri);
|
||||
extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
|
||||
extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
|
||||
extern int kvmppc_core_init_vm(struct kvm *kvm);
|
||||
extern void kvmppc_core_destroy_vm(struct kvm *kvm);
|
||||
extern void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
|
||||
extern void kvmppc_core_free_memslot(struct kvm *kvm,
|
||||
struct kvm_memory_slot *free,
|
||||
struct kvm_memory_slot *dont);
|
||||
extern int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
|
||||
extern int kvmppc_core_create_memslot(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot,
|
||||
unsigned long npages);
|
||||
extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot,
|
||||
|
@ -177,6 +170,72 @@ extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
|
|||
extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
|
||||
extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
|
||||
|
||||
union kvmppc_one_reg {
|
||||
u32 wval;
|
||||
u64 dval;
|
||||
vector128 vval;
|
||||
u64 vsxval[2];
|
||||
struct {
|
||||
u64 addr;
|
||||
u64 length;
|
||||
} vpaval;
|
||||
};
|
||||
|
||||
struct kvmppc_ops {
|
||||
struct module *owner;
|
||||
int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
|
||||
int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
|
||||
int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
|
||||
union kvmppc_one_reg *val);
|
||||
int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
|
||||
union kvmppc_one_reg *val);
|
||||
void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
|
||||
void (*vcpu_put)(struct kvm_vcpu *vcpu);
|
||||
void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
|
||||
int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
|
||||
struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
|
||||
void (*vcpu_free)(struct kvm_vcpu *vcpu);
|
||||
int (*check_requests)(struct kvm_vcpu *vcpu);
|
||||
int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
|
||||
void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
|
||||
int (*prepare_memory_region)(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot,
|
||||
struct kvm_userspace_memory_region *mem);
|
||||
void (*commit_memory_region)(struct kvm *kvm,
|
||||
struct kvm_userspace_memory_region *mem,
|
||||
const struct kvm_memory_slot *old);
|
||||
int (*unmap_hva)(struct kvm *kvm, unsigned long hva);
|
||||
int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
|
||||
unsigned long end);
|
||||
int (*age_hva)(struct kvm *kvm, unsigned long hva);
|
||||
int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
|
||||
void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||
void (*mmu_destroy)(struct kvm_vcpu *vcpu);
|
||||
void (*free_memslot)(struct kvm_memory_slot *free,
|
||||
struct kvm_memory_slot *dont);
|
||||
int (*create_memslot)(struct kvm_memory_slot *slot,
|
||||
unsigned long npages);
|
||||
int (*init_vm)(struct kvm *kvm);
|
||||
void (*destroy_vm)(struct kvm *kvm);
|
||||
int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
|
||||
int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int inst, int *advance);
|
||||
int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
|
||||
int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
|
||||
void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
|
||||
long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
|
||||
unsigned long arg);
|
||||
|
||||
};
|
||||
|
||||
extern struct kvmppc_ops *kvmppc_hv_ops;
|
||||
extern struct kvmppc_ops *kvmppc_pr_ops;
|
||||
|
||||
static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
|
||||
{
|
||||
return kvm->arch.kvm_ops == kvmppc_hv_ops;
|
||||
}
|
||||
|
||||
/*
|
||||
* Cuts out inst bits with ordering according to spec.
|
||||
* That means the leftmost bit is zero. All given bits are included.
|
||||
|
@ -210,17 +269,6 @@ static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
|
|||
return r;
|
||||
}
|
||||
|
||||
union kvmppc_one_reg {
|
||||
u32 wval;
|
||||
u64 dval;
|
||||
vector128 vval;
|
||||
u64 vsxval[2];
|
||||
struct {
|
||||
u64 addr;
|
||||
u64 length;
|
||||
} vpaval;
|
||||
};
|
||||
|
||||
#define one_reg_size(id) \
|
||||
(1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
|
||||
|
||||
|
@ -245,10 +293,10 @@ union kvmppc_one_reg {
|
|||
__v; \
|
||||
})
|
||||
|
||||
void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
|
||||
int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
|
||||
int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
|
||||
|
||||
void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
|
||||
int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
|
||||
int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
|
||||
|
||||
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
|
||||
|
@ -260,7 +308,7 @@ void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
|
|||
|
||||
struct openpic;
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
extern void kvm_cma_reserve(void) __init;
|
||||
static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
|
||||
{
|
||||
|
@ -269,10 +317,10 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
|
|||
|
||||
static inline u32 kvmppc_get_xics_latch(void)
|
||||
{
|
||||
u32 xirr = get_paca()->kvm_hstate.saved_xirr;
|
||||
u32 xirr;
|
||||
|
||||
xirr = get_paca()->kvm_hstate.saved_xirr;
|
||||
get_paca()->kvm_hstate.saved_xirr = 0;
|
||||
|
||||
return xirr;
|
||||
}
|
||||
|
||||
|
@ -281,7 +329,10 @@ static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
|
|||
paca[cpu].kvm_hstate.host_ipi = host_ipi;
|
||||
}
|
||||
|
||||
extern void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu);
|
||||
static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
|
||||
}
|
||||
|
||||
#else
|
||||
static inline void __init kvm_cma_reserve(void)
|
||||
|
|
|
@ -166,7 +166,7 @@ struct paca_struct {
|
|||
struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_HANDLER
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
|
||||
/* We use this to store guest state in */
|
||||
struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
|
||||
#endif
|
||||
|
|
|
@ -147,21 +147,7 @@ typedef struct {
|
|||
#define TS_FPR(i) fpr[i][TS_FPROFFSET]
|
||||
#define TS_TRANS_FPR(i) transact_fpr[i][TS_FPROFFSET]
|
||||
|
||||
struct thread_struct {
|
||||
unsigned long ksp; /* Kernel stack pointer */
|
||||
#ifdef CONFIG_PPC64
|
||||
unsigned long ksp_vsid;
|
||||
#endif
|
||||
struct pt_regs *regs; /* Pointer to saved register state */
|
||||
mm_segment_t fs; /* for get_fs() validation */
|
||||
#ifdef CONFIG_BOOKE
|
||||
/* BookE base exception scratch space; align on cacheline */
|
||||
unsigned long normsave[8] ____cacheline_aligned;
|
||||
#endif
|
||||
#ifdef CONFIG_PPC32
|
||||
void *pgdir; /* root of page-table tree */
|
||||
unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
|
||||
#endif
|
||||
struct debug_reg {
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
||||
/*
|
||||
* The following help to manage the use of Debug Control Registers
|
||||
|
@ -198,6 +184,27 @@ struct thread_struct {
|
|||
unsigned long dvc2;
|
||||
#endif
|
||||
#endif
|
||||
};
|
||||
|
||||
struct thread_struct {
|
||||
unsigned long ksp; /* Kernel stack pointer */
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
unsigned long ksp_vsid;
|
||||
#endif
|
||||
struct pt_regs *regs; /* Pointer to saved register state */
|
||||
mm_segment_t fs; /* for get_fs() validation */
|
||||
#ifdef CONFIG_BOOKE
|
||||
/* BookE base exception scratch space; align on cacheline */
|
||||
unsigned long normsave[8] ____cacheline_aligned;
|
||||
#endif
|
||||
#ifdef CONFIG_PPC32
|
||||
void *pgdir; /* root of page-table tree */
|
||||
unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
|
||||
#endif
|
||||
/* Debug Registers */
|
||||
struct debug_reg debug;
|
||||
|
||||
/* FP and VSX 0-31 register set */
|
||||
double fpr[32][TS_FPRWIDTH] __attribute__((aligned(16)));
|
||||
struct {
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
#define _PAGE_U1 0x010000
|
||||
#define _PAGE_U0 0x020000
|
||||
#define _PAGE_ACCESSED 0x040000
|
||||
#define _PAGE_LENDIAN 0x080000
|
||||
#define _PAGE_ENDIAN 0x080000
|
||||
#define _PAGE_GUARDED 0x100000
|
||||
#define _PAGE_COHERENT 0x200000 /* M: enforce memory coherence */
|
||||
#define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */
|
||||
|
|
|
@ -243,6 +243,7 @@
|
|||
#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
|
||||
#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */
|
||||
#define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */
|
||||
#define SPRN_TBU40 0x11E /* Timebase upper 40 bits (hyper, R/W) */
|
||||
#define SPRN_SPURR 0x134 /* Scaled PURR */
|
||||
#define SPRN_HSPRG0 0x130 /* Hypervisor Scratch 0 */
|
||||
#define SPRN_HSPRG1 0x131 /* Hypervisor Scratch 1 */
|
||||
|
@ -283,6 +284,7 @@
|
|||
#define LPCR_ISL (1ul << (63-2))
|
||||
#define LPCR_VC_SH (63-2)
|
||||
#define LPCR_DPFD_SH (63-11)
|
||||
#define LPCR_DPFD (7ul << LPCR_DPFD_SH)
|
||||
#define LPCR_VRMASD (0x1ful << (63-16))
|
||||
#define LPCR_VRMA_L (1ul << (63-12))
|
||||
#define LPCR_VRMA_LP0 (1ul << (63-15))
|
||||
|
@ -299,6 +301,7 @@
|
|||
#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */
|
||||
#define LPCR_MER 0x00000800 /* Mediated External Exception */
|
||||
#define LPCR_MER_SH 11
|
||||
#define LPCR_TC 0x00000200 /* Translation control */
|
||||
#define LPCR_LPES 0x0000000c
|
||||
#define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */
|
||||
#define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */
|
||||
|
@ -311,6 +314,10 @@
|
|||
#define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */
|
||||
#define SPRN_HMER 0x150 /* Hardware m? error recovery */
|
||||
#define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */
|
||||
#define SPRN_PCR 0x152 /* Processor compatibility register */
|
||||
#define PCR_VEC_DIS (1ul << (63-0)) /* Vec. disable (bit NA since POWER8) */
|
||||
#define PCR_VSX_DIS (1ul << (63-1)) /* VSX disable (bit NA since POWER8) */
|
||||
#define PCR_ARCH_205 0x2 /* Architecture 2.05 */
|
||||
#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */
|
||||
#define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */
|
||||
#define SPRN_TLBVPNR 0x155 /* P7 TLB control register */
|
||||
|
@ -420,6 +427,7 @@
|
|||
#define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */
|
||||
#define HID4_LPID5_SH (63 - 6) /* partition ID bottom 4 bits */
|
||||
#define HID4_RMOR_SH (63 - 22) /* real mode offset (16 bits) */
|
||||
#define HID4_RMOR (0xFFFFul << HID4_RMOR_SH)
|
||||
#define HID4_LPES1 (1 << (63-57)) /* LPAR env. sel. bit 1 */
|
||||
#define HID4_RMLS0_SH (63 - 58) /* Real mode limit top bit */
|
||||
#define HID4_LPID1_SH 0 /* partition ID top 2 bits */
|
||||
|
@ -1102,6 +1110,13 @@
|
|||
#define PVR_BE 0x0070
|
||||
#define PVR_PA6T 0x0090
|
||||
|
||||
/* "Logical" PVR values defined in PAPR, representing architecture levels */
|
||||
#define PVR_ARCH_204 0x0f000001
|
||||
#define PVR_ARCH_205 0x0f000002
|
||||
#define PVR_ARCH_206 0x0f000003
|
||||
#define PVR_ARCH_206p 0x0f100003
|
||||
#define PVR_ARCH_207 0x0f000004
|
||||
|
||||
/* Macros for setting and retrieving special purpose registers */
|
||||
#ifndef __ASSEMBLY__
|
||||
#define mfmsr() ({unsigned long rval; \
|
||||
|
|
|
@ -381,7 +381,7 @@
|
|||
#define DBCR0_IA34T 0x00004000 /* Instr Addr 3-4 range Toggle */
|
||||
#define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */
|
||||
|
||||
#define dbcr_iac_range(task) ((task)->thread.dbcr0)
|
||||
#define dbcr_iac_range(task) ((task)->thread.debug.dbcr0)
|
||||
#define DBCR_IAC12I DBCR0_IA12 /* Range Inclusive */
|
||||
#define DBCR_IAC12X (DBCR0_IA12 | DBCR0_IA12X) /* Range Exclusive */
|
||||
#define DBCR_IAC12MODE (DBCR0_IA12 | DBCR0_IA12X) /* IAC 1-2 Mode Bits */
|
||||
|
@ -395,7 +395,7 @@
|
|||
#define DBCR1_DAC1W 0x20000000 /* DAC1 Write Debug Event */
|
||||
#define DBCR1_DAC2W 0x10000000 /* DAC2 Write Debug Event */
|
||||
|
||||
#define dbcr_dac(task) ((task)->thread.dbcr1)
|
||||
#define dbcr_dac(task) ((task)->thread.debug.dbcr1)
|
||||
#define DBCR_DAC1R DBCR1_DAC1R
|
||||
#define DBCR_DAC1W DBCR1_DAC1W
|
||||
#define DBCR_DAC2R DBCR1_DAC2R
|
||||
|
@ -441,7 +441,7 @@
|
|||
#define DBCR0_CRET 0x00000020 /* Critical Return Debug Event */
|
||||
#define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */
|
||||
|
||||
#define dbcr_dac(task) ((task)->thread.dbcr0)
|
||||
#define dbcr_dac(task) ((task)->thread.debug.dbcr0)
|
||||
#define DBCR_DAC1R DBCR0_DAC1R
|
||||
#define DBCR_DAC1W DBCR0_DAC1W
|
||||
#define DBCR_DAC2R DBCR0_DAC2R
|
||||
|
@ -475,7 +475,7 @@
|
|||
#define DBCR1_IAC34MX 0x000000C0 /* Instr Addr 3-4 range eXclusive */
|
||||
#define DBCR1_IAC34AT 0x00000001 /* Instr Addr 3-4 range Toggle */
|
||||
|
||||
#define dbcr_iac_range(task) ((task)->thread.dbcr1)
|
||||
#define dbcr_iac_range(task) ((task)->thread.debug.dbcr1)
|
||||
#define DBCR_IAC12I DBCR1_IAC12M /* Range Inclusive */
|
||||
#define DBCR_IAC12X DBCR1_IAC12MX /* Range Exclusive */
|
||||
#define DBCR_IAC12MODE DBCR1_IAC12MX /* IAC 1-2 Mode Bits */
|
||||
|
|
|
@ -35,6 +35,7 @@ extern void giveup_vsx(struct task_struct *);
|
|||
extern void enable_kernel_spe(void);
|
||||
extern void giveup_spe(struct task_struct *);
|
||||
extern void load_up_spe(struct task_struct *);
|
||||
extern void switch_booke_debug_regs(struct thread_struct *new_thread);
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
extern void discard_lazy_cpu_state(void);
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#define __KVM_HAVE_PPC_SMT
|
||||
#define __KVM_HAVE_IRQCHIP
|
||||
#define __KVM_HAVE_IRQ_LINE
|
||||
#define __KVM_HAVE_GUEST_DEBUG
|
||||
|
||||
struct kvm_regs {
|
||||
__u64 pc;
|
||||
|
@ -269,7 +270,24 @@ struct kvm_fpu {
|
|||
__u64 fpr[32];
|
||||
};
|
||||
|
||||
/*
|
||||
* Defines for h/w breakpoint, watchpoint (read, write or both) and
|
||||
* software breakpoint.
|
||||
* These are used as "type" in KVM_SET_GUEST_DEBUG ioctl and "status"
|
||||
* for KVM_DEBUG_EXIT.
|
||||
*/
|
||||
#define KVMPPC_DEBUG_NONE 0x0
|
||||
#define KVMPPC_DEBUG_BREAKPOINT (1UL << 1)
|
||||
#define KVMPPC_DEBUG_WATCH_WRITE (1UL << 2)
|
||||
#define KVMPPC_DEBUG_WATCH_READ (1UL << 3)
|
||||
struct kvm_debug_exit_arch {
|
||||
__u64 address;
|
||||
/*
|
||||
* exiting to userspace because of h/w breakpoint, watchpoint
|
||||
* (read, write or both) and software breakpoint.
|
||||
*/
|
||||
__u32 status;
|
||||
__u32 reserved;
|
||||
};
|
||||
|
||||
/* for KVM_SET_GUEST_DEBUG */
|
||||
|
@ -281,10 +299,6 @@ struct kvm_guest_debug_arch {
|
|||
* Type denotes h/w breakpoint, read watchpoint, write
|
||||
* watchpoint or watchpoint (both read and write).
|
||||
*/
|
||||
#define KVMPPC_DEBUG_NONE 0x0
|
||||
#define KVMPPC_DEBUG_BREAKPOINT (1UL << 1)
|
||||
#define KVMPPC_DEBUG_WATCH_WRITE (1UL << 2)
|
||||
#define KVMPPC_DEBUG_WATCH_READ (1UL << 3)
|
||||
__u32 type;
|
||||
__u32 reserved;
|
||||
} bp[16];
|
||||
|
@ -429,6 +443,11 @@ struct kvm_get_htab_header {
|
|||
#define KVM_REG_PPC_MMCR0 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10)
|
||||
#define KVM_REG_PPC_MMCR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11)
|
||||
#define KVM_REG_PPC_MMCRA (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12)
|
||||
#define KVM_REG_PPC_MMCR2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x13)
|
||||
#define KVM_REG_PPC_MMCRS (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x14)
|
||||
#define KVM_REG_PPC_SIAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x15)
|
||||
#define KVM_REG_PPC_SDAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x16)
|
||||
#define KVM_REG_PPC_SIER (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x17)
|
||||
|
||||
#define KVM_REG_PPC_PMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18)
|
||||
#define KVM_REG_PPC_PMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19)
|
||||
|
@ -499,6 +518,65 @@ struct kvm_get_htab_header {
|
|||
#define KVM_REG_PPC_TLB3PS (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9a)
|
||||
#define KVM_REG_PPC_EPTCFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9b)
|
||||
|
||||
/* Timebase offset */
|
||||
#define KVM_REG_PPC_TB_OFFSET (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9c)
|
||||
|
||||
/* POWER8 registers */
|
||||
#define KVM_REG_PPC_SPMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9d)
|
||||
#define KVM_REG_PPC_SPMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9e)
|
||||
#define KVM_REG_PPC_IAMR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9f)
|
||||
#define KVM_REG_PPC_TFHAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa0)
|
||||
#define KVM_REG_PPC_TFIAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa1)
|
||||
#define KVM_REG_PPC_TEXASR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa2)
|
||||
#define KVM_REG_PPC_FSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa3)
|
||||
#define KVM_REG_PPC_PSPB (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xa4)
|
||||
#define KVM_REG_PPC_EBBHR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa5)
|
||||
#define KVM_REG_PPC_EBBRR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa6)
|
||||
#define KVM_REG_PPC_BESCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa7)
|
||||
#define KVM_REG_PPC_TAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa8)
|
||||
#define KVM_REG_PPC_DPDES (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa9)
|
||||
#define KVM_REG_PPC_DAWR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xaa)
|
||||
#define KVM_REG_PPC_DAWRX (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xab)
|
||||
#define KVM_REG_PPC_CIABR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xac)
|
||||
#define KVM_REG_PPC_IC (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xad)
|
||||
#define KVM_REG_PPC_VTB (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xae)
|
||||
#define KVM_REG_PPC_CSIGR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xaf)
|
||||
#define KVM_REG_PPC_TACR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb0)
|
||||
#define KVM_REG_PPC_TCSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1)
|
||||
#define KVM_REG_PPC_PID (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2)
|
||||
#define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3)
|
||||
|
||||
#define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4)
|
||||
#define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5)
|
||||
#define KVM_REG_PPC_PPR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb6)
|
||||
|
||||
/* Architecture compatibility level */
|
||||
#define KVM_REG_PPC_ARCH_COMPAT (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7)
|
||||
|
||||
/* Transactional Memory checkpointed state:
|
||||
* This is all GPRs, all VSX regs and a subset of SPRs
|
||||
*/
|
||||
#define KVM_REG_PPC_TM (KVM_REG_PPC | 0x80000000)
|
||||
/* TM GPRs */
|
||||
#define KVM_REG_PPC_TM_GPR0 (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0)
|
||||
#define KVM_REG_PPC_TM_GPR(n) (KVM_REG_PPC_TM_GPR0 + (n))
|
||||
#define KVM_REG_PPC_TM_GPR31 (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x1f)
|
||||
/* TM VSX */
|
||||
#define KVM_REG_PPC_TM_VSR0 (KVM_REG_PPC_TM | KVM_REG_SIZE_U128 | 0x20)
|
||||
#define KVM_REG_PPC_TM_VSR(n) (KVM_REG_PPC_TM_VSR0 + (n))
|
||||
#define KVM_REG_PPC_TM_VSR63 (KVM_REG_PPC_TM | KVM_REG_SIZE_U128 | 0x5f)
|
||||
/* TM SPRS */
|
||||
#define KVM_REG_PPC_TM_CR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x60)
|
||||
#define KVM_REG_PPC_TM_LR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x61)
|
||||
#define KVM_REG_PPC_TM_CTR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x62)
|
||||
#define KVM_REG_PPC_TM_FPSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x63)
|
||||
#define KVM_REG_PPC_TM_AMR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x64)
|
||||
#define KVM_REG_PPC_TM_PPR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x65)
|
||||
#define KVM_REG_PPC_TM_VRSAVE (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x66)
|
||||
#define KVM_REG_PPC_TM_VSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67)
|
||||
#define KVM_REG_PPC_TM_DSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68)
|
||||
#define KVM_REG_PPC_TM_TAR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69)
|
||||
|
||||
/* PPC64 eXternal Interrupt Controller Specification */
|
||||
#define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */
|
||||
|
||||
|
|
|
@ -114,7 +114,7 @@ int main(void)
|
|||
#endif /* CONFIG_SPE */
|
||||
#endif /* CONFIG_PPC64 */
|
||||
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
|
||||
DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0));
|
||||
DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, debug.dbcr0));
|
||||
#endif
|
||||
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
|
||||
DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu));
|
||||
|
@ -446,7 +446,7 @@ int main(void)
|
|||
DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
|
||||
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
|
||||
DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr));
|
||||
DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0));
|
||||
DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1));
|
||||
|
@ -477,7 +477,7 @@ int main(void)
|
|||
DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
|
||||
|
||||
/* book3s */
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1));
|
||||
DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
|
||||
DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
|
||||
|
@ -509,6 +509,8 @@ int main(void)
|
|||
DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
|
||||
DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
|
||||
DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
|
||||
DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar));
|
||||
DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar));
|
||||
DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
|
||||
DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
|
||||
DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
|
||||
|
@ -518,18 +520,22 @@ int main(void)
|
|||
DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
|
||||
DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
|
||||
DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar));
|
||||
DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr));
|
||||
DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
|
||||
DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
|
||||
DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
|
||||
DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
|
||||
DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads));
|
||||
DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) -
|
||||
offsetof(struct kvmppc_vcpu_book3s, vcpu));
|
||||
DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset));
|
||||
DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr));
|
||||
DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr));
|
||||
DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
|
||||
DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
|
||||
DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
|
||||
DEFINE(PACA_SVCPU, offsetof(struct paca_struct, shadow_vcpu));
|
||||
# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f))
|
||||
#else
|
||||
# define SVCPU_FIELD(x, f)
|
||||
|
@ -581,7 +587,7 @@ int main(void)
|
|||
HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
|
||||
HSTATE_FIELD(HSTATE_NAPPING, napping);
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
HSTATE_FIELD(HSTATE_HWTHREAD_REQ, hwthread_req);
|
||||
HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state);
|
||||
HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
|
||||
|
@ -597,10 +603,11 @@ int main(void)
|
|||
HSTATE_FIELD(HSTATE_DABR, dabr);
|
||||
HSTATE_FIELD(HSTATE_DECEXP, dec_expires);
|
||||
DEFINE(IPI_PRIORITY, IPI_PRIORITY);
|
||||
#endif /* CONFIG_KVM_BOOK3S_64_HV */
|
||||
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
HSTATE_FIELD(HSTATE_CFAR, cfar);
|
||||
HSTATE_FIELD(HSTATE_PPR, ppr);
|
||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
|
||||
#else /* CONFIG_PPC_BOOK3S */
|
||||
|
|
|
@ -126,7 +126,7 @@ BEGIN_FTR_SECTION
|
|||
bgt cr1,.
|
||||
GET_PACA(r13)
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
li r0,KVM_HWTHREAD_IN_KERNEL
|
||||
stb r0,HSTATE_HWTHREAD_STATE(r13)
|
||||
/* Order setting hwthread_state vs. testing hwthread_req */
|
||||
|
@ -425,7 +425,7 @@ data_access_check_stab:
|
|||
mfspr r9,SPRN_DSISR
|
||||
srdi r10,r10,60
|
||||
rlwimi r10,r9,16,0x20
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
|
||||
lbz r9,HSTATE_IN_GUEST(r13)
|
||||
rlwimi r10,r9,8,0x300
|
||||
#endif
|
||||
|
@ -650,6 +650,32 @@ slb_miss_user_pseries:
|
|||
b . /* prevent spec. execution */
|
||||
#endif /* __DISABLED__ */
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
|
||||
kvmppc_skip_interrupt:
|
||||
/*
|
||||
* Here all GPRs are unchanged from when the interrupt happened
|
||||
* except for r13, which is saved in SPRG_SCRATCH0.
|
||||
*/
|
||||
mfspr r13, SPRN_SRR0
|
||||
addi r13, r13, 4
|
||||
mtspr SPRN_SRR0, r13
|
||||
GET_SCRATCH0(r13)
|
||||
rfid
|
||||
b .
|
||||
|
||||
kvmppc_skip_Hinterrupt:
|
||||
/*
|
||||
* Here all GPRs are unchanged from when the interrupt happened
|
||||
* except for r13, which is saved in SPRG_SCRATCH0.
|
||||
*/
|
||||
mfspr r13, SPRN_HSRR0
|
||||
addi r13, r13, 4
|
||||
mtspr SPRN_HSRR0, r13
|
||||
GET_SCRATCH0(r13)
|
||||
hrfid
|
||||
b .
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Code from here down to __end_handlers is invoked from the
|
||||
* exception prologs above. Because the prologs assemble the
|
||||
|
|
|
@ -84,7 +84,7 @@ _GLOBAL(power7_nap)
|
|||
std r9,_MSR(r1)
|
||||
std r1,PACAR1(r13)
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
/* Tell KVM we're napping */
|
||||
li r4,KVM_HWTHREAD_IN_NAP
|
||||
stb r4,HSTATE_HWTHREAD_STATE(r13)
|
||||
|
|
|
@ -314,28 +314,28 @@ static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
|
|||
*/
|
||||
static void set_debug_reg_defaults(struct thread_struct *thread)
|
||||
{
|
||||
thread->iac1 = thread->iac2 = 0;
|
||||
thread->debug.iac1 = thread->debug.iac2 = 0;
|
||||
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
|
||||
thread->iac3 = thread->iac4 = 0;
|
||||
thread->debug.iac3 = thread->debug.iac4 = 0;
|
||||
#endif
|
||||
thread->dac1 = thread->dac2 = 0;
|
||||
thread->debug.dac1 = thread->debug.dac2 = 0;
|
||||
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
|
||||
thread->dvc1 = thread->dvc2 = 0;
|
||||
thread->debug.dvc1 = thread->debug.dvc2 = 0;
|
||||
#endif
|
||||
thread->dbcr0 = 0;
|
||||
thread->debug.dbcr0 = 0;
|
||||
#ifdef CONFIG_BOOKE
|
||||
/*
|
||||
* Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
|
||||
*/
|
||||
thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | \
|
||||
thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
|
||||
DBCR1_IAC3US | DBCR1_IAC4US;
|
||||
/*
|
||||
* Force Data Address Compare User/Supervisor bits to be User-only
|
||||
* (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
|
||||
*/
|
||||
thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
|
||||
thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
|
||||
#else
|
||||
thread->dbcr1 = 0;
|
||||
thread->debug.dbcr1 = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -348,22 +348,22 @@ static void prime_debug_regs(struct thread_struct *thread)
|
|||
*/
|
||||
mtmsr(mfmsr() & ~MSR_DE);
|
||||
|
||||
mtspr(SPRN_IAC1, thread->iac1);
|
||||
mtspr(SPRN_IAC2, thread->iac2);
|
||||
mtspr(SPRN_IAC1, thread->debug.iac1);
|
||||
mtspr(SPRN_IAC2, thread->debug.iac2);
|
||||
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
|
||||
mtspr(SPRN_IAC3, thread->iac3);
|
||||
mtspr(SPRN_IAC4, thread->iac4);
|
||||
mtspr(SPRN_IAC3, thread->debug.iac3);
|
||||
mtspr(SPRN_IAC4, thread->debug.iac4);
|
||||
#endif
|
||||
mtspr(SPRN_DAC1, thread->dac1);
|
||||
mtspr(SPRN_DAC2, thread->dac2);
|
||||
mtspr(SPRN_DAC1, thread->debug.dac1);
|
||||
mtspr(SPRN_DAC2, thread->debug.dac2);
|
||||
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
|
||||
mtspr(SPRN_DVC1, thread->dvc1);
|
||||
mtspr(SPRN_DVC2, thread->dvc2);
|
||||
mtspr(SPRN_DVC1, thread->debug.dvc1);
|
||||
mtspr(SPRN_DVC2, thread->debug.dvc2);
|
||||
#endif
|
||||
mtspr(SPRN_DBCR0, thread->dbcr0);
|
||||
mtspr(SPRN_DBCR1, thread->dbcr1);
|
||||
mtspr(SPRN_DBCR0, thread->debug.dbcr0);
|
||||
mtspr(SPRN_DBCR1, thread->debug.dbcr1);
|
||||
#ifdef CONFIG_BOOKE
|
||||
mtspr(SPRN_DBCR2, thread->dbcr2);
|
||||
mtspr(SPRN_DBCR2, thread->debug.dbcr2);
|
||||
#endif
|
||||
}
|
||||
/*
|
||||
|
@ -371,12 +371,13 @@ static void prime_debug_regs(struct thread_struct *thread)
|
|||
* debug registers, set the debug registers from the values
|
||||
* stored in the new thread.
|
||||
*/
|
||||
static void switch_booke_debug_regs(struct thread_struct *new_thread)
|
||||
void switch_booke_debug_regs(struct thread_struct *new_thread)
|
||||
{
|
||||
if ((current->thread.dbcr0 & DBCR0_IDM)
|
||||
|| (new_thread->dbcr0 & DBCR0_IDM))
|
||||
if ((current->thread.debug.dbcr0 & DBCR0_IDM)
|
||||
|| (new_thread->debug.dbcr0 & DBCR0_IDM))
|
||||
prime_debug_regs(new_thread);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
|
||||
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
|
||||
#ifndef CONFIG_HAVE_HW_BREAKPOINT
|
||||
static void set_debug_reg_defaults(struct thread_struct *thread)
|
||||
|
|
|
@ -854,8 +854,8 @@ void user_enable_single_step(struct task_struct *task)
|
|||
|
||||
if (regs != NULL) {
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
||||
task->thread.dbcr0 &= ~DBCR0_BT;
|
||||
task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC;
|
||||
task->thread.debug.dbcr0 &= ~DBCR0_BT;
|
||||
task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
|
||||
regs->msr |= MSR_DE;
|
||||
#else
|
||||
regs->msr &= ~MSR_BE;
|
||||
|
@ -871,8 +871,8 @@ void user_enable_block_step(struct task_struct *task)
|
|||
|
||||
if (regs != NULL) {
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
||||
task->thread.dbcr0 &= ~DBCR0_IC;
|
||||
task->thread.dbcr0 = DBCR0_IDM | DBCR0_BT;
|
||||
task->thread.debug.dbcr0 &= ~DBCR0_IC;
|
||||
task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
|
||||
regs->msr |= MSR_DE;
|
||||
#else
|
||||
regs->msr &= ~MSR_SE;
|
||||
|
@ -894,16 +894,16 @@ void user_disable_single_step(struct task_struct *task)
|
|||
* And, after doing so, if all debug flags are off, turn
|
||||
* off DBCR0(IDM) and MSR(DE) .... Torez
|
||||
*/
|
||||
task->thread.dbcr0 &= ~DBCR0_IC;
|
||||
task->thread.debug.dbcr0 &= ~DBCR0_IC;
|
||||
/*
|
||||
* Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
|
||||
*/
|
||||
if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0,
|
||||
task->thread.dbcr1)) {
|
||||
if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
|
||||
task->thread.debug.dbcr1)) {
|
||||
/*
|
||||
* All debug events were off.....
|
||||
*/
|
||||
task->thread.dbcr0 &= ~DBCR0_IDM;
|
||||
task->thread.debug.dbcr0 &= ~DBCR0_IDM;
|
||||
regs->msr &= ~MSR_DE;
|
||||
}
|
||||
#else
|
||||
|
@ -1022,14 +1022,14 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
|
|||
*/
|
||||
|
||||
/* DAC's hold the whole address without any mode flags */
|
||||
task->thread.dac1 = data & ~0x3UL;
|
||||
task->thread.debug.dac1 = data & ~0x3UL;
|
||||
|
||||
if (task->thread.dac1 == 0) {
|
||||
if (task->thread.debug.dac1 == 0) {
|
||||
dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
|
||||
if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0,
|
||||
task->thread.dbcr1)) {
|
||||
if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
|
||||
task->thread.debug.dbcr1)) {
|
||||
task->thread.regs->msr &= ~MSR_DE;
|
||||
task->thread.dbcr0 &= ~DBCR0_IDM;
|
||||
task->thread.debug.dbcr0 &= ~DBCR0_IDM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1041,7 +1041,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
|
|||
|
||||
/* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
|
||||
register */
|
||||
task->thread.dbcr0 |= DBCR0_IDM;
|
||||
task->thread.debug.dbcr0 |= DBCR0_IDM;
|
||||
|
||||
/* Check for write and read flags and set DBCR0
|
||||
accordingly */
|
||||
|
@ -1071,10 +1071,10 @@ static long set_instruction_bp(struct task_struct *child,
|
|||
struct ppc_hw_breakpoint *bp_info)
|
||||
{
|
||||
int slot;
|
||||
int slot1_in_use = ((child->thread.dbcr0 & DBCR0_IAC1) != 0);
|
||||
int slot2_in_use = ((child->thread.dbcr0 & DBCR0_IAC2) != 0);
|
||||
int slot3_in_use = ((child->thread.dbcr0 & DBCR0_IAC3) != 0);
|
||||
int slot4_in_use = ((child->thread.dbcr0 & DBCR0_IAC4) != 0);
|
||||
int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
|
||||
int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
|
||||
int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
|
||||
int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
|
||||
|
||||
if (dbcr_iac_range(child) & DBCR_IAC12MODE)
|
||||
slot2_in_use = 1;
|
||||
|
@ -1093,9 +1093,9 @@ static long set_instruction_bp(struct task_struct *child,
|
|||
/* We need a pair of IAC regsisters */
|
||||
if ((!slot1_in_use) && (!slot2_in_use)) {
|
||||
slot = 1;
|
||||
child->thread.iac1 = bp_info->addr;
|
||||
child->thread.iac2 = bp_info->addr2;
|
||||
child->thread.dbcr0 |= DBCR0_IAC1;
|
||||
child->thread.debug.iac1 = bp_info->addr;
|
||||
child->thread.debug.iac2 = bp_info->addr2;
|
||||
child->thread.debug.dbcr0 |= DBCR0_IAC1;
|
||||
if (bp_info->addr_mode ==
|
||||
PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
|
||||
dbcr_iac_range(child) |= DBCR_IAC12X;
|
||||
|
@ -1104,9 +1104,9 @@ static long set_instruction_bp(struct task_struct *child,
|
|||
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
|
||||
} else if ((!slot3_in_use) && (!slot4_in_use)) {
|
||||
slot = 3;
|
||||
child->thread.iac3 = bp_info->addr;
|
||||
child->thread.iac4 = bp_info->addr2;
|
||||
child->thread.dbcr0 |= DBCR0_IAC3;
|
||||
child->thread.debug.iac3 = bp_info->addr;
|
||||
child->thread.debug.iac4 = bp_info->addr2;
|
||||
child->thread.debug.dbcr0 |= DBCR0_IAC3;
|
||||
if (bp_info->addr_mode ==
|
||||
PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
|
||||
dbcr_iac_range(child) |= DBCR_IAC34X;
|
||||
|
@ -1126,30 +1126,30 @@ static long set_instruction_bp(struct task_struct *child,
|
|||
*/
|
||||
if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
|
||||
slot = 1;
|
||||
child->thread.iac1 = bp_info->addr;
|
||||
child->thread.dbcr0 |= DBCR0_IAC1;
|
||||
child->thread.debug.iac1 = bp_info->addr;
|
||||
child->thread.debug.dbcr0 |= DBCR0_IAC1;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (!slot2_in_use) {
|
||||
slot = 2;
|
||||
child->thread.iac2 = bp_info->addr;
|
||||
child->thread.dbcr0 |= DBCR0_IAC2;
|
||||
child->thread.debug.iac2 = bp_info->addr;
|
||||
child->thread.debug.dbcr0 |= DBCR0_IAC2;
|
||||
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
|
||||
} else if (!slot3_in_use) {
|
||||
slot = 3;
|
||||
child->thread.iac3 = bp_info->addr;
|
||||
child->thread.dbcr0 |= DBCR0_IAC3;
|
||||
child->thread.debug.iac3 = bp_info->addr;
|
||||
child->thread.debug.dbcr0 |= DBCR0_IAC3;
|
||||
} else if (!slot4_in_use) {
|
||||
slot = 4;
|
||||
child->thread.iac4 = bp_info->addr;
|
||||
child->thread.dbcr0 |= DBCR0_IAC4;
|
||||
child->thread.debug.iac4 = bp_info->addr;
|
||||
child->thread.debug.dbcr0 |= DBCR0_IAC4;
|
||||
#endif
|
||||
} else
|
||||
return -ENOSPC;
|
||||
}
|
||||
out:
|
||||
child->thread.dbcr0 |= DBCR0_IDM;
|
||||
child->thread.debug.dbcr0 |= DBCR0_IDM;
|
||||
child->thread.regs->msr |= MSR_DE;
|
||||
|
||||
return slot;
|
||||
|
@ -1159,49 +1159,49 @@ static int del_instruction_bp(struct task_struct *child, int slot)
|
|||
{
|
||||
switch (slot) {
|
||||
case 1:
|
||||
if ((child->thread.dbcr0 & DBCR0_IAC1) == 0)
|
||||
if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
|
||||
return -ENOENT;
|
||||
|
||||
if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
|
||||
/* address range - clear slots 1 & 2 */
|
||||
child->thread.iac2 = 0;
|
||||
child->thread.debug.iac2 = 0;
|
||||
dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
|
||||
}
|
||||
child->thread.iac1 = 0;
|
||||
child->thread.dbcr0 &= ~DBCR0_IAC1;
|
||||
child->thread.debug.iac1 = 0;
|
||||
child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
|
||||
break;
|
||||
case 2:
|
||||
if ((child->thread.dbcr0 & DBCR0_IAC2) == 0)
|
||||
if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
|
||||
return -ENOENT;
|
||||
|
||||
if (dbcr_iac_range(child) & DBCR_IAC12MODE)
|
||||
/* used in a range */
|
||||
return -EINVAL;
|
||||
child->thread.iac2 = 0;
|
||||
child->thread.dbcr0 &= ~DBCR0_IAC2;
|
||||
child->thread.debug.iac2 = 0;
|
||||
child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
|
||||
break;
|
||||
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
|
||||
case 3:
|
||||
if ((child->thread.dbcr0 & DBCR0_IAC3) == 0)
|
||||
if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
|
||||
return -ENOENT;
|
||||
|
||||
if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
|
||||
/* address range - clear slots 3 & 4 */
|
||||
child->thread.iac4 = 0;
|
||||
child->thread.debug.iac4 = 0;
|
||||
dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
|
||||
}
|
||||
child->thread.iac3 = 0;
|
||||
child->thread.dbcr0 &= ~DBCR0_IAC3;
|
||||
child->thread.debug.iac3 = 0;
|
||||
child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
|
||||
break;
|
||||
case 4:
|
||||
if ((child->thread.dbcr0 & DBCR0_IAC4) == 0)
|
||||
if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
|
||||
return -ENOENT;
|
||||
|
||||
if (dbcr_iac_range(child) & DBCR_IAC34MODE)
|
||||
/* Used in a range */
|
||||
return -EINVAL;
|
||||
child->thread.iac4 = 0;
|
||||
child->thread.dbcr0 &= ~DBCR0_IAC4;
|
||||
child->thread.debug.iac4 = 0;
|
||||
child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
|
@ -1231,18 +1231,18 @@ static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
|
|||
dbcr_dac(child) |= DBCR_DAC1R;
|
||||
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
|
||||
dbcr_dac(child) |= DBCR_DAC1W;
|
||||
child->thread.dac1 = (unsigned long)bp_info->addr;
|
||||
child->thread.debug.dac1 = (unsigned long)bp_info->addr;
|
||||
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
|
||||
if (byte_enable) {
|
||||
child->thread.dvc1 =
|
||||
child->thread.debug.dvc1 =
|
||||
(unsigned long)bp_info->condition_value;
|
||||
child->thread.dbcr2 |=
|
||||
child->thread.debug.dbcr2 |=
|
||||
((byte_enable << DBCR2_DVC1BE_SHIFT) |
|
||||
(condition_mode << DBCR2_DVC1M_SHIFT));
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
|
||||
} else if (child->thread.dbcr2 & DBCR2_DAC12MODE) {
|
||||
} else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
|
||||
/* Both dac1 and dac2 are part of a range */
|
||||
return -ENOSPC;
|
||||
#endif
|
||||
|
@ -1252,19 +1252,19 @@ static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
|
|||
dbcr_dac(child) |= DBCR_DAC2R;
|
||||
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
|
||||
dbcr_dac(child) |= DBCR_DAC2W;
|
||||
child->thread.dac2 = (unsigned long)bp_info->addr;
|
||||
child->thread.debug.dac2 = (unsigned long)bp_info->addr;
|
||||
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
|
||||
if (byte_enable) {
|
||||
child->thread.dvc2 =
|
||||
child->thread.debug.dvc2 =
|
||||
(unsigned long)bp_info->condition_value;
|
||||
child->thread.dbcr2 |=
|
||||
child->thread.debug.dbcr2 |=
|
||||
((byte_enable << DBCR2_DVC2BE_SHIFT) |
|
||||
(condition_mode << DBCR2_DVC2M_SHIFT));
|
||||
}
|
||||
#endif
|
||||
} else
|
||||
return -ENOSPC;
|
||||
child->thread.dbcr0 |= DBCR0_IDM;
|
||||
child->thread.debug.dbcr0 |= DBCR0_IDM;
|
||||
child->thread.regs->msr |= MSR_DE;
|
||||
|
||||
return slot + 4;
|
||||
|
@ -1276,32 +1276,32 @@ static int del_dac(struct task_struct *child, int slot)
|
|||
if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
|
||||
return -ENOENT;
|
||||
|
||||
child->thread.dac1 = 0;
|
||||
child->thread.debug.dac1 = 0;
|
||||
dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
|
||||
if (child->thread.dbcr2 & DBCR2_DAC12MODE) {
|
||||
child->thread.dac2 = 0;
|
||||
child->thread.dbcr2 &= ~DBCR2_DAC12MODE;
|
||||
if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
|
||||
child->thread.debug.dac2 = 0;
|
||||
child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
|
||||
}
|
||||
child->thread.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
|
||||
child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
|
||||
#endif
|
||||
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
|
||||
child->thread.dvc1 = 0;
|
||||
child->thread.debug.dvc1 = 0;
|
||||
#endif
|
||||
} else if (slot == 2) {
|
||||
if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
|
||||
return -ENOENT;
|
||||
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
|
||||
if (child->thread.dbcr2 & DBCR2_DAC12MODE)
|
||||
if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
|
||||
/* Part of a range */
|
||||
return -EINVAL;
|
||||
child->thread.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
|
||||
child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
|
||||
#endif
|
||||
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
|
||||
child->thread.dvc2 = 0;
|
||||
child->thread.debug.dvc2 = 0;
|
||||
#endif
|
||||
child->thread.dac2 = 0;
|
||||
child->thread.debug.dac2 = 0;
|
||||
dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
|
||||
} else
|
||||
return -EINVAL;
|
||||
|
@ -1343,22 +1343,22 @@ static int set_dac_range(struct task_struct *child,
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
if (child->thread.dbcr0 &
|
||||
if (child->thread.debug.dbcr0 &
|
||||
(DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
|
||||
return -ENOSPC;
|
||||
|
||||
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
|
||||
child->thread.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
|
||||
child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
|
||||
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
|
||||
child->thread.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
|
||||
child->thread.dac1 = bp_info->addr;
|
||||
child->thread.dac2 = bp_info->addr2;
|
||||
child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
|
||||
child->thread.debug.dac1 = bp_info->addr;
|
||||
child->thread.debug.dac2 = bp_info->addr2;
|
||||
if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
|
||||
child->thread.dbcr2 |= DBCR2_DAC12M;
|
||||
child->thread.debug.dbcr2 |= DBCR2_DAC12M;
|
||||
else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
|
||||
child->thread.dbcr2 |= DBCR2_DAC12MX;
|
||||
child->thread.debug.dbcr2 |= DBCR2_DAC12MX;
|
||||
else /* PPC_BREAKPOINT_MODE_MASK */
|
||||
child->thread.dbcr2 |= DBCR2_DAC12MM;
|
||||
child->thread.debug.dbcr2 |= DBCR2_DAC12MM;
|
||||
child->thread.regs->msr |= MSR_DE;
|
||||
|
||||
return 5;
|
||||
|
@ -1489,9 +1489,9 @@ static long ppc_del_hwdebug(struct task_struct *child, long data)
|
|||
rc = del_dac(child, (int)data - 4);
|
||||
|
||||
if (!rc) {
|
||||
if (!DBCR_ACTIVE_EVENTS(child->thread.dbcr0,
|
||||
child->thread.dbcr1)) {
|
||||
child->thread.dbcr0 &= ~DBCR0_IDM;
|
||||
if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
|
||||
child->thread.debug.dbcr1)) {
|
||||
child->thread.debug.dbcr0 &= ~DBCR0_IDM;
|
||||
child->thread.regs->msr &= ~MSR_DE;
|
||||
}
|
||||
}
|
||||
|
@ -1669,7 +1669,7 @@ long arch_ptrace(struct task_struct *child, long request,
|
|||
if (addr > 0)
|
||||
break;
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
||||
ret = put_user(child->thread.dac1, datalp);
|
||||
ret = put_user(child->thread.debug.dac1, datalp);
|
||||
#else
|
||||
dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
|
||||
(child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
|
||||
|
|
|
@ -269,7 +269,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
|||
if (addr > 0)
|
||||
break;
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
||||
ret = put_user(child->thread.dac1, (u32 __user *)data);
|
||||
ret = put_user(child->thread.debug.dac1, (u32 __user *)data);
|
||||
#else
|
||||
dabr_fake = (
|
||||
(child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
|
||||
|
|
|
@ -1309,7 +1309,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
|
|||
unsigned char tmp;
|
||||
unsigned long new_msr = regs->msr;
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
||||
unsigned long new_dbcr0 = current->thread.dbcr0;
|
||||
unsigned long new_dbcr0 = current->thread.debug.dbcr0;
|
||||
#endif
|
||||
|
||||
for (i=0; i<ndbg; i++) {
|
||||
|
@ -1324,7 +1324,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
|
|||
} else {
|
||||
new_dbcr0 &= ~DBCR0_IC;
|
||||
if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
|
||||
current->thread.dbcr1)) {
|
||||
current->thread.debug.dbcr1)) {
|
||||
new_msr &= ~MSR_DE;
|
||||
new_dbcr0 &= ~DBCR0_IDM;
|
||||
}
|
||||
|
@ -1359,7 +1359,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
|
|||
the user is really doing something wrong. */
|
||||
regs->msr = new_msr;
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
||||
current->thread.dbcr0 = new_dbcr0;
|
||||
current->thread.debug.dbcr0 = new_dbcr0;
|
||||
#endif
|
||||
|
||||
if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
|
||||
|
|
|
@ -351,8 +351,8 @@ static inline int check_io_access(struct pt_regs *regs)
|
|||
#define REASON_TRAP ESR_PTR
|
||||
|
||||
/* single-step stuff */
|
||||
#define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC)
|
||||
#define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC)
|
||||
#define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC)
|
||||
#define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC)
|
||||
|
||||
#else
|
||||
/* On non-4xx, the reason for the machine check or program
|
||||
|
@ -1486,7 +1486,7 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
|
|||
if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
|
||||
dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
|
||||
current->thread.dbcr2 &= ~DBCR2_DAC12MODE;
|
||||
current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
|
||||
#endif
|
||||
do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT,
|
||||
5);
|
||||
|
@ -1497,24 +1497,24 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
|
|||
6);
|
||||
changed |= 0x01;
|
||||
} else if (debug_status & DBSR_IAC1) {
|
||||
current->thread.dbcr0 &= ~DBCR0_IAC1;
|
||||
current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
|
||||
dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
|
||||
do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT,
|
||||
1);
|
||||
changed |= 0x01;
|
||||
} else if (debug_status & DBSR_IAC2) {
|
||||
current->thread.dbcr0 &= ~DBCR0_IAC2;
|
||||
current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
|
||||
do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT,
|
||||
2);
|
||||
changed |= 0x01;
|
||||
} else if (debug_status & DBSR_IAC3) {
|
||||
current->thread.dbcr0 &= ~DBCR0_IAC3;
|
||||
current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
|
||||
dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
|
||||
do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT,
|
||||
3);
|
||||
changed |= 0x01;
|
||||
} else if (debug_status & DBSR_IAC4) {
|
||||
current->thread.dbcr0 &= ~DBCR0_IAC4;
|
||||
current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
|
||||
do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT,
|
||||
4);
|
||||
changed |= 0x01;
|
||||
|
@ -1524,19 +1524,20 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
|
|||
* Check all other debug flags and see if that bit needs to be turned
|
||||
* back on or not.
|
||||
*/
|
||||
if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, current->thread.dbcr1))
|
||||
if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
|
||||
current->thread.debug.dbcr1))
|
||||
regs->msr |= MSR_DE;
|
||||
else
|
||||
/* Make sure the IDM flag is off */
|
||||
current->thread.dbcr0 &= ~DBCR0_IDM;
|
||||
current->thread.debug.dbcr0 &= ~DBCR0_IDM;
|
||||
|
||||
if (changed & 0x01)
|
||||
mtspr(SPRN_DBCR0, current->thread.dbcr0);
|
||||
mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
|
||||
}
|
||||
|
||||
void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
|
||||
{
|
||||
current->thread.dbsr = debug_status;
|
||||
current->thread.debug.dbsr = debug_status;
|
||||
|
||||
/* Hack alert: On BookE, Branch Taken stops on the branch itself, while
|
||||
* on server, it stops on the target of the branch. In order to simulate
|
||||
|
@ -1553,8 +1554,8 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
|
|||
|
||||
/* Do the single step trick only when coming from userspace */
|
||||
if (user_mode(regs)) {
|
||||
current->thread.dbcr0 &= ~DBCR0_BT;
|
||||
current->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC;
|
||||
current->thread.debug.dbcr0 &= ~DBCR0_BT;
|
||||
current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
|
||||
regs->msr |= MSR_DE;
|
||||
return;
|
||||
}
|
||||
|
@ -1582,13 +1583,13 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
|
|||
return;
|
||||
|
||||
if (user_mode(regs)) {
|
||||
current->thread.dbcr0 &= ~DBCR0_IC;
|
||||
if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0,
|
||||
current->thread.dbcr1))
|
||||
current->thread.debug.dbcr0 &= ~DBCR0_IC;
|
||||
if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
|
||||
current->thread.debug.dbcr1))
|
||||
regs->msr |= MSR_DE;
|
||||
else
|
||||
/* Make sure the IDM bit is off */
|
||||
current->thread.dbcr0 &= ~DBCR0_IDM;
|
||||
current->thread.debug.dbcr0 &= ~DBCR0_IDM;
|
||||
}
|
||||
|
||||
_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
|
||||
|
|
|
@ -31,13 +31,13 @@
|
|||
#include "44x_tlb.h"
|
||||
#include "booke.h"
|
||||
|
||||
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
static void kvmppc_core_vcpu_load_44x(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
kvmppc_booke_vcpu_load(vcpu, cpu);
|
||||
kvmppc_44x_tlb_load(vcpu);
|
||||
}
|
||||
|
||||
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
static void kvmppc_core_vcpu_put_44x(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvmppc_44x_tlb_put(vcpu);
|
||||
kvmppc_booke_vcpu_put(vcpu);
|
||||
|
@ -114,29 +114,32 @@ int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
|
||||
static int kvmppc_core_get_sregs_44x(struct kvm_vcpu *vcpu,
|
||||
struct kvm_sregs *sregs)
|
||||
{
|
||||
kvmppc_get_sregs_ivor(vcpu, sregs);
|
||||
return kvmppc_get_sregs_ivor(vcpu, sregs);
|
||||
}
|
||||
|
||||
int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
|
||||
static int kvmppc_core_set_sregs_44x(struct kvm_vcpu *vcpu,
|
||||
struct kvm_sregs *sregs)
|
||||
{
|
||||
return kvmppc_set_sregs_ivor(vcpu, sregs);
|
||||
}
|
||||
|
||||
int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
|
||||
union kvmppc_one_reg *val)
|
||||
static int kvmppc_get_one_reg_44x(struct kvm_vcpu *vcpu, u64 id,
|
||||
union kvmppc_one_reg *val)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
|
||||
union kvmppc_one_reg *val)
|
||||
static int kvmppc_set_one_reg_44x(struct kvm_vcpu *vcpu, u64 id,
|
||||
union kvmppc_one_reg *val)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
|
||||
static struct kvm_vcpu *kvmppc_core_vcpu_create_44x(struct kvm *kvm,
|
||||
unsigned int id)
|
||||
{
|
||||
struct kvmppc_vcpu_44x *vcpu_44x;
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
@ -167,7 +170,7 @@ out:
|
|||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
|
||||
static void kvmppc_core_vcpu_free_44x(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
|
||||
|
||||
|
@ -176,28 +179,53 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
|
|||
kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
|
||||
}
|
||||
|
||||
int kvmppc_core_init_vm(struct kvm *kvm)
|
||||
static int kvmppc_core_init_vm_44x(struct kvm *kvm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvmppc_core_destroy_vm(struct kvm *kvm)
|
||||
static void kvmppc_core_destroy_vm_44x(struct kvm *kvm)
|
||||
{
|
||||
}
|
||||
|
||||
static struct kvmppc_ops kvm_ops_44x = {
|
||||
.get_sregs = kvmppc_core_get_sregs_44x,
|
||||
.set_sregs = kvmppc_core_set_sregs_44x,
|
||||
.get_one_reg = kvmppc_get_one_reg_44x,
|
||||
.set_one_reg = kvmppc_set_one_reg_44x,
|
||||
.vcpu_load = kvmppc_core_vcpu_load_44x,
|
||||
.vcpu_put = kvmppc_core_vcpu_put_44x,
|
||||
.vcpu_create = kvmppc_core_vcpu_create_44x,
|
||||
.vcpu_free = kvmppc_core_vcpu_free_44x,
|
||||
.mmu_destroy = kvmppc_mmu_destroy_44x,
|
||||
.init_vm = kvmppc_core_init_vm_44x,
|
||||
.destroy_vm = kvmppc_core_destroy_vm_44x,
|
||||
.emulate_op = kvmppc_core_emulate_op_44x,
|
||||
.emulate_mtspr = kvmppc_core_emulate_mtspr_44x,
|
||||
.emulate_mfspr = kvmppc_core_emulate_mfspr_44x,
|
||||
};
|
||||
|
||||
static int __init kvmppc_44x_init(void)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = kvmppc_booke_init();
|
||||
if (r)
|
||||
return r;
|
||||
goto err_out;
|
||||
|
||||
return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE);
|
||||
r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE);
|
||||
if (r)
|
||||
goto err_out;
|
||||
kvm_ops_44x.owner = THIS_MODULE;
|
||||
kvmppc_pr_ops = &kvm_ops_44x;
|
||||
|
||||
err_out:
|
||||
return r;
|
||||
}
|
||||
|
||||
static void __exit kvmppc_44x_exit(void)
|
||||
{
|
||||
kvmppc_pr_ops = NULL;
|
||||
kvmppc_booke_exit();
|
||||
}
|
||||
|
||||
|
|
|
@ -91,8 +91,8 @@ static int emulate_mfdcr(struct kvm_vcpu *vcpu, int rt, int dcrn)
|
|||
return EMULATE_DONE;
|
||||
}
|
||||
|
||||
int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int inst, int *advance)
|
||||
int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int inst, int *advance)
|
||||
{
|
||||
int emulated = EMULATE_DONE;
|
||||
int dcrn = get_dcrn(inst);
|
||||
|
@ -152,7 +152,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
return emulated;
|
||||
}
|
||||
|
||||
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
|
||||
int kvmppc_core_emulate_mtspr_44x(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
|
||||
{
|
||||
int emulated = EMULATE_DONE;
|
||||
|
||||
|
@ -172,7 +172,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
|
|||
return emulated;
|
||||
}
|
||||
|
||||
int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
|
||||
int kvmppc_core_emulate_mfspr_44x(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
|
||||
{
|
||||
int emulated = EMULATE_DONE;
|
||||
|
||||
|
|
|
@ -268,7 +268,7 @@ static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x,
|
|||
trace_kvm_stlb_inval(stlb_index);
|
||||
}
|
||||
|
||||
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
|
||||
void kvmppc_mmu_destroy_44x(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
|
||||
int i;
|
||||
|
|
|
@ -34,17 +34,20 @@ config KVM_BOOK3S_64_HANDLER
|
|||
bool
|
||||
select KVM_BOOK3S_HANDLER
|
||||
|
||||
config KVM_BOOK3S_PR
|
||||
config KVM_BOOK3S_PR_POSSIBLE
|
||||
bool
|
||||
select KVM_MMIO
|
||||
select MMU_NOTIFIER
|
||||
|
||||
config KVM_BOOK3S_HV_POSSIBLE
|
||||
bool
|
||||
|
||||
config KVM_BOOK3S_32
|
||||
tristate "KVM support for PowerPC book3s_32 processors"
|
||||
depends on PPC_BOOK3S_32 && !SMP && !PTE_64BIT
|
||||
select KVM
|
||||
select KVM_BOOK3S_32_HANDLER
|
||||
select KVM_BOOK3S_PR
|
||||
select KVM_BOOK3S_PR_POSSIBLE
|
||||
---help---
|
||||
Support running unmodified book3s_32 guest kernels
|
||||
in virtual machines on book3s_32 host processors.
|
||||
|
@ -59,6 +62,7 @@ config KVM_BOOK3S_64
|
|||
depends on PPC_BOOK3S_64
|
||||
select KVM_BOOK3S_64_HANDLER
|
||||
select KVM
|
||||
select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE
|
||||
---help---
|
||||
Support running unmodified book3s_64 and book3s_32 guest kernels
|
||||
in virtual machines on book3s_64 host processors.
|
||||
|
@ -69,8 +73,9 @@ config KVM_BOOK3S_64
|
|||
If unsure, say N.
|
||||
|
||||
config KVM_BOOK3S_64_HV
|
||||
bool "KVM support for POWER7 and PPC970 using hypervisor mode in host"
|
||||
tristate "KVM support for POWER7 and PPC970 using hypervisor mode in host"
|
||||
depends on KVM_BOOK3S_64
|
||||
select KVM_BOOK3S_HV_POSSIBLE
|
||||
select MMU_NOTIFIER
|
||||
select CMA
|
||||
---help---
|
||||
|
@ -89,9 +94,20 @@ config KVM_BOOK3S_64_HV
|
|||
If unsure, say N.
|
||||
|
||||
config KVM_BOOK3S_64_PR
|
||||
def_bool y
|
||||
depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV
|
||||
select KVM_BOOK3S_PR
|
||||
tristate "KVM support without using hypervisor mode in host"
|
||||
depends on KVM_BOOK3S_64
|
||||
select KVM_BOOK3S_PR_POSSIBLE
|
||||
---help---
|
||||
Support running guest kernels in virtual machines on processors
|
||||
without using hypervisor mode in the host, by running the
|
||||
guest in user mode (problem state) and emulating all
|
||||
privileged instructions and registers.
|
||||
|
||||
This is not as fast as using hypervisor mode, but works on
|
||||
machines where hypervisor mode is not available or not usable,
|
||||
and can emulate processors that are different from the host
|
||||
processor, including emulating 32-bit processors on a 64-bit
|
||||
host.
|
||||
|
||||
config KVM_BOOKE_HV
|
||||
bool
|
||||
|
|
|
@ -53,41 +53,51 @@ kvm-e500mc-objs := \
|
|||
e500_emulate.o
|
||||
kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs)
|
||||
|
||||
kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
|
||||
$(KVM)/coalesced_mmio.o \
|
||||
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) := \
|
||||
book3s_64_vio_hv.o
|
||||
|
||||
kvm-pr-y := \
|
||||
fpu.o \
|
||||
book3s_paired_singles.o \
|
||||
book3s_pr.o \
|
||||
book3s_pr_papr.o \
|
||||
book3s_64_vio_hv.o \
|
||||
book3s_emulate.o \
|
||||
book3s_interrupts.o \
|
||||
book3s_mmu_hpte.o \
|
||||
book3s_64_mmu_host.o \
|
||||
book3s_64_mmu.o \
|
||||
book3s_32_mmu.o
|
||||
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
|
||||
book3s_rmhandlers.o
|
||||
|
||||
kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
|
||||
ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
|
||||
kvm-book3s_64-module-objs := \
|
||||
$(KVM)/coalesced_mmio.o
|
||||
|
||||
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
|
||||
book3s_rmhandlers.o
|
||||
endif
|
||||
|
||||
kvm-hv-y += \
|
||||
book3s_hv.o \
|
||||
book3s_hv_interrupts.o \
|
||||
book3s_64_mmu_hv.o
|
||||
|
||||
kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \
|
||||
book3s_hv_rm_xics.o
|
||||
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
|
||||
|
||||
ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
|
||||
book3s_hv_rmhandlers.o \
|
||||
book3s_hv_rm_mmu.o \
|
||||
book3s_64_vio_hv.o \
|
||||
book3s_hv_ras.o \
|
||||
book3s_hv_builtin.o \
|
||||
book3s_hv_cma.o \
|
||||
$(kvm-book3s_64-builtin-xics-objs-y)
|
||||
endif
|
||||
|
||||
kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
|
||||
book3s_xics.o
|
||||
|
||||
kvm-book3s_64-module-objs := \
|
||||
kvm-book3s_64-module-objs += \
|
||||
$(KVM)/kvm_main.o \
|
||||
$(KVM)/eventfd.o \
|
||||
powerpc.o \
|
||||
|
@ -123,4 +133,7 @@ obj-$(CONFIG_KVM_E500MC) += kvm.o
|
|||
obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o
|
||||
obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o
|
||||
|
||||
obj-$(CONFIG_KVM_BOOK3S_64_PR) += kvm-pr.o
|
||||
obj-$(CONFIG_KVM_BOOK3S_64_HV) += kvm-hv.o
|
||||
|
||||
obj-y += $(kvm-book3s_64-builtin-objs-y)
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include <linux/vmalloc.h>
|
||||
#include <linux/highmem.h>
|
||||
|
||||
#include "book3s.h"
|
||||
#include "trace.h"
|
||||
|
||||
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
|
||||
|
@ -69,6 +70,50 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
}
|
||||
|
||||
static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!is_kvmppc_hv_enabled(vcpu->kvm))
|
||||
return to_book3s(vcpu)->hior;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
|
||||
unsigned long pending_now, unsigned long old_pending)
|
||||
{
|
||||
if (is_kvmppc_hv_enabled(vcpu->kvm))
|
||||
return;
|
||||
if (pending_now)
|
||||
vcpu->arch.shared->int_pending = 1;
|
||||
else if (old_pending)
|
||||
vcpu->arch.shared->int_pending = 0;
|
||||
}
|
||||
|
||||
static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
ulong crit_raw;
|
||||
ulong crit_r1;
|
||||
bool crit;
|
||||
|
||||
if (is_kvmppc_hv_enabled(vcpu->kvm))
|
||||
return false;
|
||||
|
||||
crit_raw = vcpu->arch.shared->critical;
|
||||
crit_r1 = kvmppc_get_gpr(vcpu, 1);
|
||||
|
||||
/* Truncate crit indicators in 32 bit mode */
|
||||
if (!(vcpu->arch.shared->msr & MSR_SF)) {
|
||||
crit_raw &= 0xffffffff;
|
||||
crit_r1 &= 0xffffffff;
|
||||
}
|
||||
|
||||
/* Critical section when crit == r1 */
|
||||
crit = (crit_raw == crit_r1);
|
||||
/* ... and we're in supervisor mode */
|
||||
crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
|
||||
|
||||
return crit;
|
||||
}
|
||||
|
||||
void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
|
||||
{
|
||||
vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu);
|
||||
|
@ -126,28 +171,32 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
|
|||
printk(KERN_INFO "Queueing interrupt %x\n", vec);
|
||||
#endif
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
|
||||
|
||||
void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
|
||||
{
|
||||
/* might as well deliver this straight away */
|
||||
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
|
||||
|
||||
void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
|
||||
|
||||
int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
|
||||
|
||||
void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
|
||||
|
||||
void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
|
||||
struct kvm_interrupt *irq)
|
||||
|
@ -285,8 +334,10 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
|
||||
|
||||
pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
|
||||
pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing,
|
||||
bool *writable)
|
||||
{
|
||||
ulong mp_pa = vcpu->arch.magic_page_pa;
|
||||
|
||||
|
@ -302,20 +353,23 @@ pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
|
|||
|
||||
pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
|
||||
get_page(pfn_to_page(pfn));
|
||||
if (writable)
|
||||
*writable = true;
|
||||
return pfn;
|
||||
}
|
||||
|
||||
return gfn_to_pfn(vcpu->kvm, gfn);
|
||||
return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_gfn_to_pfn);
|
||||
|
||||
static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
|
||||
struct kvmppc_pte *pte)
|
||||
bool iswrite, struct kvmppc_pte *pte)
|
||||
{
|
||||
int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR));
|
||||
int r;
|
||||
|
||||
if (relocated) {
|
||||
r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data);
|
||||
r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
|
||||
} else {
|
||||
pte->eaddr = eaddr;
|
||||
pte->raddr = eaddr & KVM_PAM;
|
||||
|
@ -361,7 +415,7 @@ int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
|
|||
|
||||
vcpu->stat.st++;
|
||||
|
||||
if (kvmppc_xlate(vcpu, *eaddr, data, &pte))
|
||||
if (kvmppc_xlate(vcpu, *eaddr, data, true, &pte))
|
||||
return -ENOENT;
|
||||
|
||||
*eaddr = pte.raddr;
|
||||
|
@ -374,6 +428,7 @@ int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
|
|||
|
||||
return EMULATE_DONE;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_st);
|
||||
|
||||
int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
|
||||
bool data)
|
||||
|
@ -383,7 +438,7 @@ int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
|
|||
|
||||
vcpu->stat.ld++;
|
||||
|
||||
if (kvmppc_xlate(vcpu, *eaddr, data, &pte))
|
||||
if (kvmppc_xlate(vcpu, *eaddr, data, false, &pte))
|
||||
goto nopte;
|
||||
|
||||
*eaddr = pte.raddr;
|
||||
|
@ -404,6 +459,7 @@ nopte:
|
|||
mmio:
|
||||
return EMULATE_DO_MMIO;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_ld);
|
||||
|
||||
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
@ -419,6 +475,18 @@ void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
|
||||
struct kvm_sregs *sregs)
|
||||
{
|
||||
return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
|
||||
struct kvm_sregs *sregs)
|
||||
{
|
||||
return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
||||
{
|
||||
int i;
|
||||
|
@ -495,8 +563,7 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
|||
if (size > sizeof(val))
|
||||
return -EINVAL;
|
||||
|
||||
r = kvmppc_get_one_reg(vcpu, reg->id, &val);
|
||||
|
||||
r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val);
|
||||
if (r == -EINVAL) {
|
||||
r = 0;
|
||||
switch (reg->id) {
|
||||
|
@ -528,6 +595,9 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
|||
}
|
||||
val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]);
|
||||
break;
|
||||
case KVM_REG_PPC_VRSAVE:
|
||||
val = get_reg_val(reg->id, vcpu->arch.vrsave);
|
||||
break;
|
||||
#endif /* CONFIG_ALTIVEC */
|
||||
case KVM_REG_PPC_DEBUG_INST: {
|
||||
u32 opcode = INS_TW;
|
||||
|
@ -572,8 +642,7 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
|||
if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
|
||||
return -EFAULT;
|
||||
|
||||
r = kvmppc_set_one_reg(vcpu, reg->id, &val);
|
||||
|
||||
r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val);
|
||||
if (r == -EINVAL) {
|
||||
r = 0;
|
||||
switch (reg->id) {
|
||||
|
@ -605,6 +674,13 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
|||
}
|
||||
vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val);
|
||||
break;
|
||||
case KVM_REG_PPC_VRSAVE:
|
||||
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
|
||||
r = -ENXIO;
|
||||
break;
|
||||
}
|
||||
vcpu->arch.vrsave = set_reg_val(reg->id, val);
|
||||
break;
|
||||
#endif /* CONFIG_ALTIVEC */
|
||||
#ifdef CONFIG_KVM_XICS
|
||||
case KVM_REG_PPC_ICP_STATE:
|
||||
|
@ -625,6 +701,27 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
|||
return r;
|
||||
}
|
||||
|
||||
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
|
||||
}
|
||||
|
||||
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
|
||||
}
|
||||
|
||||
void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
|
||||
{
|
||||
vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_set_msr);
|
||||
|
||||
int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
|
||||
struct kvm_translation *tr)
|
||||
{
|
||||
|
@ -644,3 +741,141 @@ void kvmppc_decrementer_func(unsigned long data)
|
|||
kvmppc_core_queue_dec(vcpu);
|
||||
kvm_vcpu_kick(vcpu);
|
||||
}
|
||||
|
||||
struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
|
||||
{
|
||||
return kvm->arch.kvm_ops->vcpu_create(kvm, id);
|
||||
}
|
||||
|
||||
void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
|
||||
}
|
||||
|
||||
int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
|
||||
}
|
||||
|
||||
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
|
||||
{
|
||||
return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
|
||||
}
|
||||
|
||||
void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
|
||||
struct kvm_memory_slot *dont)
|
||||
{
|
||||
kvm->arch.kvm_ops->free_memslot(free, dont);
|
||||
}
|
||||
|
||||
int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||
unsigned long npages)
|
||||
{
|
||||
return kvm->arch.kvm_ops->create_memslot(slot, npages);
|
||||
}
|
||||
|
||||
void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
|
||||
{
|
||||
kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
|
||||
}
|
||||
|
||||
int kvmppc_core_prepare_memory_region(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot,
|
||||
struct kvm_userspace_memory_region *mem)
|
||||
{
|
||||
return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem);
|
||||
}
|
||||
|
||||
void kvmppc_core_commit_memory_region(struct kvm *kvm,
|
||||
struct kvm_userspace_memory_region *mem,
|
||||
const struct kvm_memory_slot *old)
|
||||
{
|
||||
kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old);
|
||||
}
|
||||
|
||||
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
|
||||
{
|
||||
return kvm->arch.kvm_ops->unmap_hva(kvm, hva);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_unmap_hva);
|
||||
|
||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
|
||||
{
|
||||
return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
|
||||
}
|
||||
|
||||
int kvm_age_hva(struct kvm *kvm, unsigned long hva)
|
||||
{
|
||||
return kvm->arch.kvm_ops->age_hva(kvm, hva);
|
||||
}
|
||||
|
||||
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
|
||||
{
|
||||
return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
|
||||
}
|
||||
|
||||
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
|
||||
{
|
||||
kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
|
||||
}
|
||||
|
||||
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
|
||||
}
|
||||
|
||||
int kvmppc_core_init_vm(struct kvm *kvm)
|
||||
{
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
|
||||
INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
|
||||
#endif
|
||||
|
||||
return kvm->arch.kvm_ops->init_vm(kvm);
|
||||
}
|
||||
|
||||
void kvmppc_core_destroy_vm(struct kvm *kvm)
|
||||
{
|
||||
kvm->arch.kvm_ops->destroy_vm(kvm);
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
kvmppc_rtas_tokens_free(kvm);
|
||||
WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
|
||||
#endif
|
||||
}
|
||||
|
||||
int kvmppc_core_check_processor_compat(void)
|
||||
{
|
||||
/*
|
||||
* We always return 0 for book3s. We check
|
||||
* for compatability while loading the HV
|
||||
* or PR module
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvmppc_book3s_init(void)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
|
||||
if (r)
|
||||
return r;
|
||||
#ifdef CONFIG_KVM_BOOK3S_32
|
||||
r = kvmppc_book3s_init_pr();
|
||||
#endif
|
||||
return r;
|
||||
|
||||
}
|
||||
|
||||
static void kvmppc_book3s_exit(void)
|
||||
{
|
||||
#ifdef CONFIG_KVM_BOOK3S_32
|
||||
kvmppc_book3s_exit_pr();
|
||||
#endif
|
||||
kvm_exit();
|
||||
}
|
||||
|
||||
module_init(kvmppc_book3s_init);
|
||||
module_exit(kvmppc_book3s_exit);
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
* Copyright IBM Corporation, 2013
|
||||
* Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation; either version 2 of the
|
||||
* License or (at your optional) any later version of the license.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __POWERPC_KVM_BOOK3S_H__
|
||||
#define __POWERPC_KVM_BOOK3S_H__
|
||||
|
||||
extern void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot);
|
||||
extern int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva);
|
||||
extern int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start,
|
||||
unsigned long end);
|
||||
extern int kvm_age_hva_hv(struct kvm *kvm, unsigned long hva);
|
||||
extern int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva);
|
||||
extern void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||
|
||||
extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int inst, int *advance);
|
||||
extern int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu,
|
||||
int sprn, ulong spr_val);
|
||||
extern int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu,
|
||||
int sprn, ulong *spr_val);
|
||||
extern int kvmppc_book3s_init_pr(void);
|
||||
extern void kvmppc_book3s_exit_pr(void);
|
||||
|
||||
#endif
|
|
@ -84,7 +84,8 @@ static inline bool sr_nx(u32 sr_raw)
|
|||
}
|
||||
|
||||
static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
|
||||
struct kvmppc_pte *pte, bool data);
|
||||
struct kvmppc_pte *pte, bool data,
|
||||
bool iswrite);
|
||||
static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
|
||||
u64 *vsid);
|
||||
|
||||
|
@ -99,7 +100,7 @@ static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
|
|||
u64 vsid;
|
||||
struct kvmppc_pte pte;
|
||||
|
||||
if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data))
|
||||
if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data, false))
|
||||
return pte.vpage;
|
||||
|
||||
kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
|
||||
|
@ -111,10 +112,11 @@ static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu)
|
|||
kvmppc_set_msr(vcpu, 0);
|
||||
}
|
||||
|
||||
static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3s,
|
||||
static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvm_vcpu *vcpu,
|
||||
u32 sre, gva_t eaddr,
|
||||
bool primary)
|
||||
{
|
||||
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
|
||||
u32 page, hash, pteg, htabmask;
|
||||
hva_t r;
|
||||
|
||||
|
@ -132,7 +134,7 @@ static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3
|
|||
kvmppc_get_pc(&vcpu_book3s->vcpu), eaddr, vcpu_book3s->sdr1, pteg,
|
||||
sr_vsid(sre));
|
||||
|
||||
r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT);
|
||||
r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
|
||||
if (kvm_is_error_hva(r))
|
||||
return r;
|
||||
return r | (pteg & ~PAGE_MASK);
|
||||
|
@ -145,7 +147,8 @@ static u32 kvmppc_mmu_book3s_32_get_ptem(u32 sre, gva_t eaddr, bool primary)
|
|||
}
|
||||
|
||||
static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
|
||||
struct kvmppc_pte *pte, bool data)
|
||||
struct kvmppc_pte *pte, bool data,
|
||||
bool iswrite)
|
||||
{
|
||||
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
|
||||
struct kvmppc_bat *bat;
|
||||
|
@ -186,8 +189,7 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
|
|||
printk(KERN_INFO "BAT is not readable!\n");
|
||||
continue;
|
||||
}
|
||||
if (!pte->may_write) {
|
||||
/* let's treat r/o BATs as not-readable for now */
|
||||
if (iswrite && !pte->may_write) {
|
||||
dprintk_pte("BAT is read-only!\n");
|
||||
continue;
|
||||
}
|
||||
|
@ -201,9 +203,8 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
|
|||
|
||||
static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
|
||||
struct kvmppc_pte *pte, bool data,
|
||||
bool primary)
|
||||
bool iswrite, bool primary)
|
||||
{
|
||||
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
|
||||
u32 sre;
|
||||
hva_t ptegp;
|
||||
u32 pteg[16];
|
||||
|
@ -218,7 +219,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
|
|||
|
||||
pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data);
|
||||
|
||||
ptegp = kvmppc_mmu_book3s_32_get_pteg(vcpu_book3s, sre, eaddr, primary);
|
||||
ptegp = kvmppc_mmu_book3s_32_get_pteg(vcpu, sre, eaddr, primary);
|
||||
if (kvm_is_error_hva(ptegp)) {
|
||||
printk(KERN_INFO "KVM: Invalid PTEG!\n");
|
||||
goto no_page_found;
|
||||
|
@ -258,9 +259,6 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
|
|||
break;
|
||||
}
|
||||
|
||||
if ( !pte->may_read )
|
||||
continue;
|
||||
|
||||
dprintk_pte("MMU: Found PTE -> %x %x - %x\n",
|
||||
pteg[i], pteg[i+1], pp);
|
||||
found = 1;
|
||||
|
@ -271,19 +269,23 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
|
|||
/* Update PTE C and A bits, so the guest's swapper knows we used the
|
||||
page */
|
||||
if (found) {
|
||||
u32 oldpte = pteg[i+1];
|
||||
|
||||
if (pte->may_read)
|
||||
pteg[i+1] |= PTEG_FLAG_ACCESSED;
|
||||
if (pte->may_write)
|
||||
pteg[i+1] |= PTEG_FLAG_DIRTY;
|
||||
else
|
||||
dprintk_pte("KVM: Mapping read-only page!\n");
|
||||
|
||||
/* Write back into the PTEG */
|
||||
if (pteg[i+1] != oldpte)
|
||||
copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
|
||||
u32 pte_r = pteg[i+1];
|
||||
char __user *addr = (char __user *) &pteg[i+1];
|
||||
|
||||
/*
|
||||
* Use single-byte writes to update the HPTE, to
|
||||
* conform to what real hardware does.
|
||||
*/
|
||||
if (pte->may_read && !(pte_r & PTEG_FLAG_ACCESSED)) {
|
||||
pte_r |= PTEG_FLAG_ACCESSED;
|
||||
put_user(pte_r >> 8, addr + 2);
|
||||
}
|
||||
if (iswrite && pte->may_write && !(pte_r & PTEG_FLAG_DIRTY)) {
|
||||
pte_r |= PTEG_FLAG_DIRTY;
|
||||
put_user(pte_r, addr + 3);
|
||||
}
|
||||
if (!pte->may_read || (iswrite && !pte->may_write))
|
||||
return -EPERM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -302,12 +304,14 @@ no_page_found:
|
|||
}
|
||||
|
||||
static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
|
||||
struct kvmppc_pte *pte, bool data)
|
||||
struct kvmppc_pte *pte, bool data,
|
||||
bool iswrite)
|
||||
{
|
||||
int r;
|
||||
ulong mp_ea = vcpu->arch.magic_page_ea;
|
||||
|
||||
pte->eaddr = eaddr;
|
||||
pte->page_size = MMU_PAGE_4K;
|
||||
|
||||
/* Magic page override */
|
||||
if (unlikely(mp_ea) &&
|
||||
|
@ -323,11 +327,13 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
|
|||
return 0;
|
||||
}
|
||||
|
||||
r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data);
|
||||
r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data, iswrite);
|
||||
if (r < 0)
|
||||
r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, true);
|
||||
r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte,
|
||||
data, iswrite, true);
|
||||
if (r < 0)
|
||||
r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, false);
|
||||
r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte,
|
||||
data, iswrite, false);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -347,7 +353,12 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
|
|||
|
||||
static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large)
|
||||
{
|
||||
kvmppc_mmu_pte_flush(vcpu, ea, 0x0FFFF000);
|
||||
int i;
|
||||
struct kvm_vcpu *v;
|
||||
|
||||
/* flush this VA on all cpus */
|
||||
kvm_for_each_vcpu(i, v, vcpu->kvm)
|
||||
kvmppc_mmu_pte_flush(v, ea, 0x0FFFF000);
|
||||
}
|
||||
|
||||
static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
|
||||
|
|
|
@ -138,7 +138,8 @@ static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr,
|
|||
|
||||
extern char etext[];
|
||||
|
||||
int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
|
||||
int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
|
||||
bool iswrite)
|
||||
{
|
||||
pfn_t hpaddr;
|
||||
u64 vpn;
|
||||
|
@ -152,9 +153,11 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
|
|||
bool evict = false;
|
||||
struct hpte_cache *pte;
|
||||
int r = 0;
|
||||
bool writable;
|
||||
|
||||
/* Get host physical address for gpa */
|
||||
hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
|
||||
hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT,
|
||||
iswrite, &writable);
|
||||
if (is_error_noslot_pfn(hpaddr)) {
|
||||
printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
|
||||
orig_pte->eaddr);
|
||||
|
@ -204,7 +207,7 @@ next_pteg:
|
|||
(primary ? 0 : PTE_SEC);
|
||||
pteg1 = hpaddr | PTE_M | PTE_R | PTE_C;
|
||||
|
||||
if (orig_pte->may_write) {
|
||||
if (orig_pte->may_write && writable) {
|
||||
pteg1 |= PP_RWRW;
|
||||
mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
|
||||
} else {
|
||||
|
@ -259,6 +262,11 @@ out:
|
|||
return r;
|
||||
}
|
||||
|
||||
void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
|
||||
{
|
||||
kvmppc_mmu_pte_vflush(vcpu, pte->vpage, 0xfffffffffULL);
|
||||
}
|
||||
|
||||
static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
|
||||
{
|
||||
struct kvmppc_sid_map *map;
|
||||
|
@ -341,7 +349,7 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
|
|||
svcpu_put(svcpu);
|
||||
}
|
||||
|
||||
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
|
||||
void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
|
|
@ -107,9 +107,20 @@ static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
|
|||
return kvmppc_slb_calc_vpn(slb, eaddr);
|
||||
}
|
||||
|
||||
static int mmu_pagesize(int mmu_pg)
|
||||
{
|
||||
switch (mmu_pg) {
|
||||
case MMU_PAGE_64K:
|
||||
return 16;
|
||||
case MMU_PAGE_16M:
|
||||
return 24;
|
||||
}
|
||||
return 12;
|
||||
}
|
||||
|
||||
static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe)
|
||||
{
|
||||
return slbe->large ? 24 : 12;
|
||||
return mmu_pagesize(slbe->base_page_size);
|
||||
}
|
||||
|
||||
static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
|
||||
|
@ -119,11 +130,11 @@ static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
|
|||
return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p);
|
||||
}
|
||||
|
||||
static hva_t kvmppc_mmu_book3s_64_get_pteg(
|
||||
struct kvmppc_vcpu_book3s *vcpu_book3s,
|
||||
static hva_t kvmppc_mmu_book3s_64_get_pteg(struct kvm_vcpu *vcpu,
|
||||
struct kvmppc_slb *slbe, gva_t eaddr,
|
||||
bool second)
|
||||
{
|
||||
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
|
||||
u64 hash, pteg, htabsize;
|
||||
u32 ssize;
|
||||
hva_t r;
|
||||
|
@ -148,10 +159,10 @@ static hva_t kvmppc_mmu_book3s_64_get_pteg(
|
|||
|
||||
/* When running a PAPR guest, SDR1 contains a HVA address instead
|
||||
of a GPA */
|
||||
if (vcpu_book3s->vcpu.arch.papr_enabled)
|
||||
if (vcpu->arch.papr_enabled)
|
||||
r = pteg;
|
||||
else
|
||||
r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT);
|
||||
r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
|
||||
|
||||
if (kvm_is_error_hva(r))
|
||||
return r;
|
||||
|
@ -166,18 +177,38 @@ static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr)
|
|||
avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
|
||||
avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p);
|
||||
|
||||
if (p < 24)
|
||||
avpn >>= ((80 - p) - 56) - 8;
|
||||
if (p < 16)
|
||||
avpn >>= ((80 - p) - 56) - 8; /* 16 - p */
|
||||
else
|
||||
avpn <<= 8;
|
||||
avpn <<= p - 16;
|
||||
|
||||
return avpn;
|
||||
}
|
||||
|
||||
static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
|
||||
struct kvmppc_pte *gpte, bool data)
|
||||
/*
|
||||
* Return page size encoded in the second word of a HPTE, or
|
||||
* -1 for an invalid encoding for the base page size indicated by
|
||||
* the SLB entry. This doesn't handle mixed pagesize segments yet.
|
||||
*/
|
||||
static int decode_pagesize(struct kvmppc_slb *slbe, u64 r)
|
||||
{
|
||||
switch (slbe->base_page_size) {
|
||||
case MMU_PAGE_64K:
|
||||
if ((r & 0xf000) == 0x1000)
|
||||
return MMU_PAGE_64K;
|
||||
break;
|
||||
case MMU_PAGE_16M:
|
||||
if ((r & 0xff000) == 0)
|
||||
return MMU_PAGE_16M;
|
||||
break;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
|
||||
struct kvmppc_pte *gpte, bool data,
|
||||
bool iswrite)
|
||||
{
|
||||
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
|
||||
struct kvmppc_slb *slbe;
|
||||
hva_t ptegp;
|
||||
u64 pteg[16];
|
||||
|
@ -189,6 +220,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
|
|||
u8 pp, key = 0;
|
||||
bool found = false;
|
||||
bool second = false;
|
||||
int pgsize;
|
||||
ulong mp_ea = vcpu->arch.magic_page_ea;
|
||||
|
||||
/* Magic page override */
|
||||
|
@ -202,6 +234,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
|
|||
gpte->may_execute = true;
|
||||
gpte->may_read = true;
|
||||
gpte->may_write = true;
|
||||
gpte->page_size = MMU_PAGE_4K;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -222,8 +255,12 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
|
|||
v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID |
|
||||
HPTE_V_SECONDARY;
|
||||
|
||||
pgsize = slbe->large ? MMU_PAGE_16M : MMU_PAGE_4K;
|
||||
|
||||
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
|
||||
|
||||
do_second:
|
||||
ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second);
|
||||
ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu, slbe, eaddr, second);
|
||||
if (kvm_is_error_hva(ptegp))
|
||||
goto no_page_found;
|
||||
|
||||
|
@ -240,6 +277,13 @@ do_second:
|
|||
for (i=0; i<16; i+=2) {
|
||||
/* Check all relevant fields of 1st dword */
|
||||
if ((pteg[i] & v_mask) == v_val) {
|
||||
/* If large page bit is set, check pgsize encoding */
|
||||
if (slbe->large &&
|
||||
(vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
|
||||
pgsize = decode_pagesize(slbe, pteg[i+1]);
|
||||
if (pgsize < 0)
|
||||
continue;
|
||||
}
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
|
@ -256,13 +300,15 @@ do_second:
|
|||
v = pteg[i];
|
||||
r = pteg[i+1];
|
||||
pp = (r & HPTE_R_PP) | key;
|
||||
eaddr_mask = 0xFFF;
|
||||
if (r & HPTE_R_PP0)
|
||||
pp |= 8;
|
||||
|
||||
gpte->eaddr = eaddr;
|
||||
gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
|
||||
if (slbe->large)
|
||||
eaddr_mask = 0xFFFFFF;
|
||||
|
||||
eaddr_mask = (1ull << mmu_pagesize(pgsize)) - 1;
|
||||
gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask);
|
||||
gpte->page_size = pgsize;
|
||||
gpte->may_execute = ((r & HPTE_R_N) ? false : true);
|
||||
gpte->may_read = false;
|
||||
gpte->may_write = false;
|
||||
|
@ -277,6 +323,7 @@ do_second:
|
|||
case 3:
|
||||
case 5:
|
||||
case 7:
|
||||
case 10:
|
||||
gpte->may_read = true;
|
||||
break;
|
||||
}
|
||||
|
@ -287,30 +334,37 @@ do_second:
|
|||
|
||||
/* Update PTE R and C bits, so the guest's swapper knows we used the
|
||||
* page */
|
||||
if (gpte->may_read) {
|
||||
/* Set the accessed flag */
|
||||
if (gpte->may_read && !(r & HPTE_R_R)) {
|
||||
/*
|
||||
* Set the accessed flag.
|
||||
* We have to write this back with a single byte write
|
||||
* because another vcpu may be accessing this on
|
||||
* non-PAPR platforms such as mac99, and this is
|
||||
* what real hardware does.
|
||||
*/
|
||||
char __user *addr = (char __user *) &pteg[i+1];
|
||||
r |= HPTE_R_R;
|
||||
put_user(r >> 8, addr + 6);
|
||||
}
|
||||
if (data && gpte->may_write) {
|
||||
/* Set the dirty flag -- XXX even if not writing */
|
||||
if (iswrite && gpte->may_write && !(r & HPTE_R_C)) {
|
||||
/* Set the dirty flag */
|
||||
/* Use a single byte write */
|
||||
char __user *addr = (char __user *) &pteg[i+1];
|
||||
r |= HPTE_R_C;
|
||||
put_user(r, addr + 7);
|
||||
}
|
||||
|
||||
/* Write back into the PTEG */
|
||||
if (pteg[i+1] != r) {
|
||||
pteg[i+1] = r;
|
||||
copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
|
||||
}
|
||||
mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
|
||||
|
||||
if (!gpte->may_read)
|
||||
if (!gpte->may_read || (iswrite && !gpte->may_write))
|
||||
return -EPERM;
|
||||
return 0;
|
||||
|
||||
no_page_found:
|
||||
mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
|
||||
return -ENOENT;
|
||||
|
||||
no_seg_found:
|
||||
|
||||
dprintk("KVM MMU: Trigger segment fault\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -345,6 +399,21 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
|
|||
slbe->nx = (rs & SLB_VSID_N) ? 1 : 0;
|
||||
slbe->class = (rs & SLB_VSID_C) ? 1 : 0;
|
||||
|
||||
slbe->base_page_size = MMU_PAGE_4K;
|
||||
if (slbe->large) {
|
||||
if (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE) {
|
||||
switch (rs & SLB_VSID_LP) {
|
||||
case SLB_VSID_LP_00:
|
||||
slbe->base_page_size = MMU_PAGE_16M;
|
||||
break;
|
||||
case SLB_VSID_LP_01:
|
||||
slbe->base_page_size = MMU_PAGE_64K;
|
||||
break;
|
||||
}
|
||||
} else
|
||||
slbe->base_page_size = MMU_PAGE_16M;
|
||||
}
|
||||
|
||||
slbe->orige = rb & (ESID_MASK | SLB_ESID_V);
|
||||
slbe->origv = rs;
|
||||
|
||||
|
@ -460,14 +529,45 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
|
|||
bool large)
|
||||
{
|
||||
u64 mask = 0xFFFFFFFFFULL;
|
||||
long i;
|
||||
struct kvm_vcpu *v;
|
||||
|
||||
dprintk("KVM MMU: tlbie(0x%lx)\n", va);
|
||||
|
||||
if (large)
|
||||
mask = 0xFFFFFF000ULL;
|
||||
kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask);
|
||||
/*
|
||||
* The tlbie instruction changed behaviour starting with
|
||||
* POWER6. POWER6 and later don't have the large page flag
|
||||
* in the instruction but in the RB value, along with bits
|
||||
* indicating page and segment sizes.
|
||||
*/
|
||||
if (vcpu->arch.hflags & BOOK3S_HFLAG_NEW_TLBIE) {
|
||||
/* POWER6 or later */
|
||||
if (va & 1) { /* L bit */
|
||||
if ((va & 0xf000) == 0x1000)
|
||||
mask = 0xFFFFFFFF0ULL; /* 64k page */
|
||||
else
|
||||
mask = 0xFFFFFF000ULL; /* 16M page */
|
||||
}
|
||||
} else {
|
||||
/* older processors, e.g. PPC970 */
|
||||
if (large)
|
||||
mask = 0xFFFFFF000ULL;
|
||||
}
|
||||
/* flush this VA on all vcpus */
|
||||
kvm_for_each_vcpu(i, v, vcpu->kvm)
|
||||
kvmppc_mmu_pte_vflush(v, va >> 12, mask);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_64K_PAGES
|
||||
static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid)
|
||||
{
|
||||
ulong mp_ea = vcpu->arch.magic_page_ea;
|
||||
|
||||
return mp_ea && !(vcpu->arch.shared->msr & MSR_PR) &&
|
||||
(mp_ea >> SID_SHIFT) == esid;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
|
||||
u64 *vsid)
|
||||
{
|
||||
|
@ -475,11 +575,13 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
|
|||
struct kvmppc_slb *slb;
|
||||
u64 gvsid = esid;
|
||||
ulong mp_ea = vcpu->arch.magic_page_ea;
|
||||
int pagesize = MMU_PAGE_64K;
|
||||
|
||||
if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
|
||||
slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
|
||||
if (slb) {
|
||||
gvsid = slb->vsid;
|
||||
pagesize = slb->base_page_size;
|
||||
if (slb->tb) {
|
||||
gvsid <<= SID_SHIFT_1T - SID_SHIFT;
|
||||
gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1);
|
||||
|
@ -490,28 +592,41 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
|
|||
|
||||
switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
|
||||
case 0:
|
||||
*vsid = VSID_REAL | esid;
|
||||
gvsid = VSID_REAL | esid;
|
||||
break;
|
||||
case MSR_IR:
|
||||
*vsid = VSID_REAL_IR | gvsid;
|
||||
gvsid |= VSID_REAL_IR;
|
||||
break;
|
||||
case MSR_DR:
|
||||
*vsid = VSID_REAL_DR | gvsid;
|
||||
gvsid |= VSID_REAL_DR;
|
||||
break;
|
||||
case MSR_DR|MSR_IR:
|
||||
if (!slb)
|
||||
goto no_slb;
|
||||
|
||||
*vsid = gvsid;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
|
||||
if (vcpu->arch.shared->msr & MSR_PR)
|
||||
*vsid |= VSID_PR;
|
||||
#ifdef CONFIG_PPC_64K_PAGES
|
||||
/*
|
||||
* Mark this as a 64k segment if the host is using
|
||||
* 64k pages, the host MMU supports 64k pages and
|
||||
* the guest segment page size is >= 64k,
|
||||
* but not if this segment contains the magic page.
|
||||
*/
|
||||
if (pagesize >= MMU_PAGE_64K &&
|
||||
mmu_psize_defs[MMU_PAGE_64K].shift &&
|
||||
!segment_contains_magic_page(vcpu, esid))
|
||||
gvsid |= VSID_64K;
|
||||
#endif
|
||||
|
||||
if (vcpu->arch.shared->msr & MSR_PR)
|
||||
gvsid |= VSID_PR;
|
||||
|
||||
*vsid = gvsid;
|
||||
return 0;
|
||||
|
||||
no_slb:
|
||||
|
|
|
@ -27,14 +27,14 @@
|
|||
#include <asm/machdep.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/hw_irq.h>
|
||||
#include "trace.h"
|
||||
#include "trace_pr.h"
|
||||
|
||||
#define PTE_SIZE 12
|
||||
|
||||
void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
|
||||
{
|
||||
ppc_md.hpte_invalidate(pte->slot, pte->host_vpn,
|
||||
MMU_PAGE_4K, MMU_PAGE_4K, MMU_SEGSIZE_256M,
|
||||
pte->pagesize, pte->pagesize, MMU_SEGSIZE_256M,
|
||||
false);
|
||||
}
|
||||
|
||||
|
@ -78,7 +78,8 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
|
||||
int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
|
||||
bool iswrite)
|
||||
{
|
||||
unsigned long vpn;
|
||||
pfn_t hpaddr;
|
||||
|
@ -90,16 +91,26 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
|
|||
int attempt = 0;
|
||||
struct kvmppc_sid_map *map;
|
||||
int r = 0;
|
||||
int hpsize = MMU_PAGE_4K;
|
||||
bool writable;
|
||||
unsigned long mmu_seq;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
struct hpte_cache *cpte;
|
||||
unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT;
|
||||
unsigned long pfn;
|
||||
|
||||
/* used to check for invalidations in progress */
|
||||
mmu_seq = kvm->mmu_notifier_seq;
|
||||
smp_rmb();
|
||||
|
||||
/* Get host physical address for gpa */
|
||||
hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
|
||||
if (is_error_noslot_pfn(hpaddr)) {
|
||||
printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
|
||||
pfn = kvmppc_gfn_to_pfn(vcpu, gfn, iswrite, &writable);
|
||||
if (is_error_noslot_pfn(pfn)) {
|
||||
printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", gfn);
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
hpaddr <<= PAGE_SHIFT;
|
||||
hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
|
||||
hpaddr = pfn << PAGE_SHIFT;
|
||||
|
||||
/* and write the mapping ea -> hpa into the pt */
|
||||
vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
|
||||
|
@ -117,20 +128,39 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
|
|||
goto out;
|
||||
}
|
||||
|
||||
vsid = map->host_vsid;
|
||||
vpn = hpt_vpn(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
|
||||
vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M);
|
||||
|
||||
if (!orig_pte->may_write)
|
||||
rflags |= HPTE_R_PP;
|
||||
else
|
||||
mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
|
||||
kvm_set_pfn_accessed(pfn);
|
||||
if (!orig_pte->may_write || !writable)
|
||||
rflags |= PP_RXRX;
|
||||
else {
|
||||
mark_page_dirty(vcpu->kvm, gfn);
|
||||
kvm_set_pfn_dirty(pfn);
|
||||
}
|
||||
|
||||
if (!orig_pte->may_execute)
|
||||
rflags |= HPTE_R_N;
|
||||
else
|
||||
kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
|
||||
kvmppc_mmu_flush_icache(pfn);
|
||||
|
||||
hash = hpt_hash(vpn, PTE_SIZE, MMU_SEGSIZE_256M);
|
||||
/*
|
||||
* Use 64K pages if possible; otherwise, on 64K page kernels,
|
||||
* we need to transfer 4 more bits from guest real to host real addr.
|
||||
*/
|
||||
if (vsid & VSID_64K)
|
||||
hpsize = MMU_PAGE_64K;
|
||||
else
|
||||
hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
|
||||
|
||||
hash = hpt_hash(vpn, mmu_psize_defs[hpsize].shift, MMU_SEGSIZE_256M);
|
||||
|
||||
cpte = kvmppc_mmu_hpte_cache_next(vcpu);
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) {
|
||||
r = -EAGAIN;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
map_again:
|
||||
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
|
||||
|
@ -139,11 +169,11 @@ map_again:
|
|||
if (attempt > 1)
|
||||
if (ppc_md.hpte_remove(hpteg) < 0) {
|
||||
r = -1;
|
||||
goto out;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
|
||||
MMU_PAGE_4K, MMU_PAGE_4K, MMU_SEGSIZE_256M);
|
||||
hpsize, hpsize, MMU_SEGSIZE_256M);
|
||||
|
||||
if (ret < 0) {
|
||||
/* If we couldn't map a primary PTE, try a secondary */
|
||||
|
@ -152,8 +182,6 @@ map_again:
|
|||
attempt++;
|
||||
goto map_again;
|
||||
} else {
|
||||
struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu);
|
||||
|
||||
trace_kvm_book3s_64_mmu_map(rflags, hpteg,
|
||||
vpn, hpaddr, orig_pte);
|
||||
|
||||
|
@ -164,19 +192,37 @@ map_again:
|
|||
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
|
||||
}
|
||||
|
||||
pte->slot = hpteg + (ret & 7);
|
||||
pte->host_vpn = vpn;
|
||||
pte->pte = *orig_pte;
|
||||
pte->pfn = hpaddr >> PAGE_SHIFT;
|
||||
cpte->slot = hpteg + (ret & 7);
|
||||
cpte->host_vpn = vpn;
|
||||
cpte->pte = *orig_pte;
|
||||
cpte->pfn = pfn;
|
||||
cpte->pagesize = hpsize;
|
||||
|
||||
kvmppc_mmu_hpte_cache_map(vcpu, pte);
|
||||
kvmppc_mmu_hpte_cache_map(vcpu, cpte);
|
||||
cpte = NULL;
|
||||
}
|
||||
kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
|
||||
|
||||
out_unlock:
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
kvm_release_pfn_clean(pfn);
|
||||
if (cpte)
|
||||
kvmppc_mmu_hpte_cache_free(cpte);
|
||||
|
||||
out:
|
||||
return r;
|
||||
}
|
||||
|
||||
void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
|
||||
{
|
||||
u64 mask = 0xfffffffffULL;
|
||||
u64 vsid;
|
||||
|
||||
vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid);
|
||||
if (vsid & VSID_64K)
|
||||
mask = 0xffffffff0ULL;
|
||||
kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask);
|
||||
}
|
||||
|
||||
static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
|
||||
{
|
||||
struct kvmppc_sid_map *map;
|
||||
|
@ -291,6 +337,12 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
|
|||
slb_vsid &= ~SLB_VSID_KP;
|
||||
slb_esid |= slb_index;
|
||||
|
||||
#ifdef CONFIG_PPC_64K_PAGES
|
||||
/* Set host segment base page size to 64K if possible */
|
||||
if (gvsid & VSID_64K)
|
||||
slb_vsid |= mmu_psize_defs[MMU_PAGE_64K].sllp;
|
||||
#endif
|
||||
|
||||
svcpu->slb[slb_index].esid = slb_esid;
|
||||
svcpu->slb[slb_index].vsid = slb_vsid;
|
||||
|
||||
|
@ -326,7 +378,7 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
|
|||
svcpu_put(svcpu);
|
||||
}
|
||||
|
||||
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
|
||||
void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvmppc_mmu_hpte_destroy(vcpu);
|
||||
__destroy_context(to_book3s(vcpu)->context_id[0]);
|
||||
|
|
|
@ -260,10 +260,6 @@ int kvmppc_mmu_hv_init(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
}
|
||||
|
||||
static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
|
||||
|
@ -451,7 +447,7 @@ static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r,
|
|||
}
|
||||
|
||||
static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
|
||||
struct kvmppc_pte *gpte, bool data)
|
||||
struct kvmppc_pte *gpte, bool data, bool iswrite)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
struct kvmppc_slb *slbe;
|
||||
|
@ -906,21 +902,22 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
|
||||
int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva)
|
||||
{
|
||||
if (kvm->arch.using_mmu_notifiers)
|
||||
kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
|
||||
int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end)
|
||||
{
|
||||
if (kvm->arch.using_mmu_notifiers)
|
||||
kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
|
||||
void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot)
|
||||
{
|
||||
unsigned long *rmapp;
|
||||
unsigned long gfn;
|
||||
|
@ -994,7 +991,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int kvm_age_hva(struct kvm *kvm, unsigned long hva)
|
||||
int kvm_age_hva_hv(struct kvm *kvm, unsigned long hva)
|
||||
{
|
||||
if (!kvm->arch.using_mmu_notifiers)
|
||||
return 0;
|
||||
|
@ -1032,14 +1029,14 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
|
||||
int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva)
|
||||
{
|
||||
if (!kvm->arch.using_mmu_notifiers)
|
||||
return 0;
|
||||
return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp);
|
||||
}
|
||||
|
||||
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
|
||||
void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte)
|
||||
{
|
||||
if (!kvm->arch.using_mmu_notifiers)
|
||||
return;
|
||||
|
@ -1512,9 +1509,8 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
|
|||
|
||||
kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
|
||||
(VRMA_VSID << SLB_VSID_SHIFT_1T);
|
||||
lpcr = kvm->arch.lpcr & ~LPCR_VRMASD;
|
||||
lpcr |= senc << (LPCR_VRMASD_SH - 4);
|
||||
kvm->arch.lpcr = lpcr;
|
||||
lpcr = senc << (LPCR_VRMASD_SH - 4);
|
||||
kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
|
||||
rma_setup = 1;
|
||||
}
|
||||
++i;
|
||||
|
|
|
@ -74,3 +74,4 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
|||
/* Didn't find the liobn, punt it to userspace */
|
||||
return H_TOO_HARD;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
|
||||
|
|
|
@ -86,8 +86,8 @@ static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level)
|
|||
return true;
|
||||
}
|
||||
|
||||
int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int inst, int *advance)
|
||||
int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int inst, int *advance)
|
||||
{
|
||||
int emulated = EMULATE_DONE;
|
||||
int rt = get_rt(inst);
|
||||
|
@ -172,7 +172,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
vcpu->arch.mmu.tlbie(vcpu, addr, large);
|
||||
break;
|
||||
}
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_PR
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
case OP_31_XOP_FAKE_SC1:
|
||||
{
|
||||
/* SC 1 papr hypercalls */
|
||||
|
@ -267,12 +267,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
|
||||
r = kvmppc_st(vcpu, &addr, 32, zeros, true);
|
||||
if ((r == -ENOENT) || (r == -EPERM)) {
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu;
|
||||
|
||||
svcpu = svcpu_get(vcpu);
|
||||
*advance = 0;
|
||||
vcpu->arch.shared->dar = vaddr;
|
||||
svcpu->fault_dar = vaddr;
|
||||
vcpu->arch.fault_dar = vaddr;
|
||||
|
||||
dsisr = DSISR_ISSTORE;
|
||||
if (r == -ENOENT)
|
||||
|
@ -281,8 +278,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
dsisr |= DSISR_PROTFAULT;
|
||||
|
||||
vcpu->arch.shared->dsisr = dsisr;
|
||||
svcpu->fault_dsisr = dsisr;
|
||||
svcpu_put(svcpu);
|
||||
vcpu->arch.fault_dsisr = dsisr;
|
||||
|
||||
kvmppc_book3s_queue_irqprio(vcpu,
|
||||
BOOK3S_INTERRUPT_DATA_STORAGE);
|
||||
|
@ -349,7 +345,7 @@ static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
|
|||
return bat;
|
||||
}
|
||||
|
||||
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
|
||||
int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
|
||||
{
|
||||
int emulated = EMULATE_DONE;
|
||||
|
||||
|
@ -472,7 +468,7 @@ unprivileged:
|
|||
return emulated;
|
||||
}
|
||||
|
||||
int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
|
||||
int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
|
||||
{
|
||||
int emulated = EMULATE_DONE;
|
||||
|
||||
|
|
|
@ -20,9 +20,10 @@
|
|||
#include <linux/export.h>
|
||||
#include <asm/kvm_book3s.h>
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline);
|
||||
#else
|
||||
#endif
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
|
||||
EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline);
|
||||
EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu);
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
|
|
|
@ -52,6 +52,9 @@
|
|||
#include <linux/vmalloc.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include "book3s.h"
|
||||
|
||||
/* #define EXIT_DEBUG */
|
||||
/* #define EXIT_DEBUG_SIMPLE */
|
||||
|
@ -66,7 +69,7 @@
|
|||
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
|
||||
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
|
||||
|
||||
void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
|
||||
static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int me;
|
||||
int cpu = vcpu->cpu;
|
||||
|
@ -125,7 +128,7 @@ void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
|
|||
* purely defensive; they should never fail.)
|
||||
*/
|
||||
|
||||
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
||||
|
||||
|
@ -143,7 +146,7 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||
spin_unlock(&vcpu->arch.tbacct_lock);
|
||||
}
|
||||
|
||||
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
||||
|
||||
|
@ -155,17 +158,46 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
|
|||
spin_unlock(&vcpu->arch.tbacct_lock);
|
||||
}
|
||||
|
||||
void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
|
||||
static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
|
||||
{
|
||||
vcpu->arch.shregs.msr = msr;
|
||||
kvmppc_end_cede(vcpu);
|
||||
}
|
||||
|
||||
void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
|
||||
void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
|
||||
{
|
||||
vcpu->arch.pvr = pvr;
|
||||
}
|
||||
|
||||
int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
|
||||
{
|
||||
unsigned long pcr = 0;
|
||||
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
||||
|
||||
if (arch_compat) {
|
||||
if (!cpu_has_feature(CPU_FTR_ARCH_206))
|
||||
return -EINVAL; /* 970 has no compat mode support */
|
||||
|
||||
switch (arch_compat) {
|
||||
case PVR_ARCH_205:
|
||||
pcr = PCR_ARCH_205;
|
||||
break;
|
||||
case PVR_ARCH_206:
|
||||
case PVR_ARCH_206p:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock(&vc->lock);
|
||||
vc->arch_compat = arch_compat;
|
||||
vc->pcr = pcr;
|
||||
spin_unlock(&vc->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int r;
|
||||
|
@ -195,7 +227,7 @@ void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
|
|||
pr_err(" ESID = %.16llx VSID = %.16llx\n",
|
||||
vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
|
||||
pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
|
||||
vcpu->kvm->arch.lpcr, vcpu->kvm->arch.sdr1,
|
||||
vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
|
||||
vcpu->arch.last_inst);
|
||||
}
|
||||
|
||||
|
@ -489,7 +521,7 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
|
|||
memset(dt, 0, sizeof(struct dtl_entry));
|
||||
dt->dispatch_reason = 7;
|
||||
dt->processor_id = vc->pcpu + vcpu->arch.ptid;
|
||||
dt->timebase = now;
|
||||
dt->timebase = now + vc->tb_offset;
|
||||
dt->enqueue_to_dispatch_time = stolen;
|
||||
dt->srr0 = kvmppc_get_pc(vcpu);
|
||||
dt->srr1 = vcpu->arch.shregs.msr;
|
||||
|
@ -538,6 +570,15 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
break;
|
||||
case H_CONFER:
|
||||
target = kvmppc_get_gpr(vcpu, 4);
|
||||
if (target == -1)
|
||||
break;
|
||||
tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
|
||||
if (!tvcpu) {
|
||||
ret = H_PARAMETER;
|
||||
break;
|
||||
}
|
||||
kvm_vcpu_yield_to(tvcpu);
|
||||
break;
|
||||
case H_REGISTER_VPA:
|
||||
ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
|
||||
|
@ -576,8 +617,8 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
|
|||
return RESUME_GUEST;
|
||||
}
|
||||
|
||||
static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
struct task_struct *tsk)
|
||||
static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
int r = RESUME_HOST;
|
||||
|
||||
|
@ -671,16 +712,16 @@ static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
|
||||
vcpu->arch.trap, kvmppc_get_pc(vcpu),
|
||||
vcpu->arch.shregs.msr);
|
||||
run->hw.hardware_exit_reason = vcpu->arch.trap;
|
||||
r = RESUME_HOST;
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
|
||||
struct kvm_sregs *sregs)
|
||||
static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu,
|
||||
struct kvm_sregs *sregs)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -694,12 +735,12 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
|
||||
struct kvm_sregs *sregs)
|
||||
static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
|
||||
struct kvm_sregs *sregs)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
kvmppc_set_pvr(vcpu, sregs->pvr);
|
||||
kvmppc_set_pvr_hv(vcpu, sregs->pvr);
|
||||
|
||||
j = 0;
|
||||
for (i = 0; i < vcpu->arch.slb_nr; i++) {
|
||||
|
@ -714,7 +755,23 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
|
||||
static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr)
|
||||
{
|
||||
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
||||
u64 mask;
|
||||
|
||||
spin_lock(&vc->lock);
|
||||
/*
|
||||
* Userspace can only modify DPFD (default prefetch depth),
|
||||
* ILE (interrupt little-endian) and TC (translation control).
|
||||
*/
|
||||
mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
|
||||
vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
|
||||
spin_unlock(&vc->lock);
|
||||
}
|
||||
|
||||
static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
|
||||
union kvmppc_one_reg *val)
|
||||
{
|
||||
int r = 0;
|
||||
long int i;
|
||||
|
@ -749,6 +806,12 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
|
|||
i = id - KVM_REG_PPC_PMC1;
|
||||
*val = get_reg_val(id, vcpu->arch.pmc[i]);
|
||||
break;
|
||||
case KVM_REG_PPC_SIAR:
|
||||
*val = get_reg_val(id, vcpu->arch.siar);
|
||||
break;
|
||||
case KVM_REG_PPC_SDAR:
|
||||
*val = get_reg_val(id, vcpu->arch.sdar);
|
||||
break;
|
||||
#ifdef CONFIG_VSX
|
||||
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
|
||||
if (cpu_has_feature(CPU_FTR_VSX)) {
|
||||
|
@ -787,6 +850,18 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
|
|||
val->vpaval.length = vcpu->arch.dtl.len;
|
||||
spin_unlock(&vcpu->arch.vpa_update_lock);
|
||||
break;
|
||||
case KVM_REG_PPC_TB_OFFSET:
|
||||
*val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
|
||||
break;
|
||||
case KVM_REG_PPC_LPCR:
|
||||
*val = get_reg_val(id, vcpu->arch.vcore->lpcr);
|
||||
break;
|
||||
case KVM_REG_PPC_PPR:
|
||||
*val = get_reg_val(id, vcpu->arch.ppr);
|
||||
break;
|
||||
case KVM_REG_PPC_ARCH_COMPAT:
|
||||
*val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
|
||||
break;
|
||||
default:
|
||||
r = -EINVAL;
|
||||
break;
|
||||
|
@ -795,7 +870,8 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
|
|||
return r;
|
||||
}
|
||||
|
||||
int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
|
||||
static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
|
||||
union kvmppc_one_reg *val)
|
||||
{
|
||||
int r = 0;
|
||||
long int i;
|
||||
|
@ -833,6 +909,12 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
|
|||
i = id - KVM_REG_PPC_PMC1;
|
||||
vcpu->arch.pmc[i] = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_SIAR:
|
||||
vcpu->arch.siar = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_SDAR:
|
||||
vcpu->arch.sdar = set_reg_val(id, *val);
|
||||
break;
|
||||
#ifdef CONFIG_VSX
|
||||
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
|
||||
if (cpu_has_feature(CPU_FTR_VSX)) {
|
||||
|
@ -880,6 +962,20 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
|
|||
len -= len % sizeof(struct dtl_entry);
|
||||
r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
|
||||
break;
|
||||
case KVM_REG_PPC_TB_OFFSET:
|
||||
/* round up to multiple of 2^24 */
|
||||
vcpu->arch.vcore->tb_offset =
|
||||
ALIGN(set_reg_val(id, *val), 1UL << 24);
|
||||
break;
|
||||
case KVM_REG_PPC_LPCR:
|
||||
kvmppc_set_lpcr(vcpu, set_reg_val(id, *val));
|
||||
break;
|
||||
case KVM_REG_PPC_PPR:
|
||||
vcpu->arch.ppr = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_ARCH_COMPAT:
|
||||
r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
|
||||
break;
|
||||
default:
|
||||
r = -EINVAL;
|
||||
break;
|
||||
|
@ -888,14 +984,8 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
|
|||
return r;
|
||||
}
|
||||
|
||||
int kvmppc_core_check_processor_compat(void)
|
||||
{
|
||||
if (cpu_has_feature(CPU_FTR_HVMODE))
|
||||
return 0;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
|
||||
static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
|
||||
unsigned int id)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
int err = -EINVAL;
|
||||
|
@ -919,8 +1009,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
|
|||
vcpu->arch.mmcr[0] = MMCR0_FC;
|
||||
vcpu->arch.ctrl = CTRL_RUNLATCH;
|
||||
/* default to host PVR, since we can't spoof it */
|
||||
vcpu->arch.pvr = mfspr(SPRN_PVR);
|
||||
kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
|
||||
kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
|
||||
spin_lock_init(&vcpu->arch.vpa_update_lock);
|
||||
spin_lock_init(&vcpu->arch.tbacct_lock);
|
||||
vcpu->arch.busy_preempt = TB_NIL;
|
||||
|
@ -940,6 +1029,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
|
|||
spin_lock_init(&vcore->lock);
|
||||
init_waitqueue_head(&vcore->wq);
|
||||
vcore->preempt_tb = TB_NIL;
|
||||
vcore->lpcr = kvm->arch.lpcr;
|
||||
}
|
||||
kvm->arch.vcores[core] = vcore;
|
||||
kvm->arch.online_vcores++;
|
||||
|
@ -972,7 +1062,7 @@ static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa)
|
|||
vpa->dirty);
|
||||
}
|
||||
|
||||
void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
|
||||
static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
spin_lock(&vcpu->arch.vpa_update_lock);
|
||||
unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
|
||||
|
@ -983,6 +1073,12 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
|
|||
kmem_cache_free(kvm_vcpu_cache, vcpu);
|
||||
}
|
||||
|
||||
static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/* Indicate we want to get back into the guest */
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long dec_nsec, now;
|
||||
|
@ -1264,8 +1360,8 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
|
|||
|
||||
ret = RESUME_GUEST;
|
||||
if (vcpu->arch.trap)
|
||||
ret = kvmppc_handle_exit(vcpu->arch.kvm_run, vcpu,
|
||||
vcpu->arch.run_task);
|
||||
ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
|
||||
vcpu->arch.run_task);
|
||||
|
||||
vcpu->arch.ret = ret;
|
||||
vcpu->arch.trap = 0;
|
||||
|
@ -1424,7 +1520,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
|||
return vcpu->arch.ret;
|
||||
}
|
||||
|
||||
int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int r;
|
||||
int srcu_idx;
|
||||
|
@ -1546,7 +1642,8 @@ static const struct file_operations kvm_rma_fops = {
|
|||
.release = kvm_rma_release,
|
||||
};
|
||||
|
||||
long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
|
||||
static long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
|
||||
struct kvm_allocate_rma *ret)
|
||||
{
|
||||
long fd;
|
||||
struct kvm_rma_info *ri;
|
||||
|
@ -1592,7 +1689,8 @@ static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
|
|||
(*sps)++;
|
||||
}
|
||||
|
||||
int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
|
||||
static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm,
|
||||
struct kvm_ppc_smmu_info *info)
|
||||
{
|
||||
struct kvm_ppc_one_seg_page_size *sps;
|
||||
|
||||
|
@ -1613,7 +1711,8 @@ int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
|
|||
/*
|
||||
* Get (and clear) the dirty memory log for a memory slot.
|
||||
*/
|
||||
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
|
||||
static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
|
||||
struct kvm_dirty_log *log)
|
||||
{
|
||||
struct kvm_memory_slot *memslot;
|
||||
int r;
|
||||
|
@ -1667,8 +1766,8 @@ static void unpin_slot(struct kvm_memory_slot *memslot)
|
|||
}
|
||||
}
|
||||
|
||||
void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
|
||||
struct kvm_memory_slot *dont)
|
||||
static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
|
||||
struct kvm_memory_slot *dont)
|
||||
{
|
||||
if (!dont || free->arch.rmap != dont->arch.rmap) {
|
||||
vfree(free->arch.rmap);
|
||||
|
@ -1681,8 +1780,8 @@ void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
|
|||
}
|
||||
}
|
||||
|
||||
int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
|
||||
unsigned long npages)
|
||||
static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
|
||||
unsigned long npages)
|
||||
{
|
||||
slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
|
||||
if (!slot->arch.rmap)
|
||||
|
@ -1692,9 +1791,9 @@ int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int kvmppc_core_prepare_memory_region(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot,
|
||||
struct kvm_userspace_memory_region *mem)
|
||||
static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot,
|
||||
struct kvm_userspace_memory_region *mem)
|
||||
{
|
||||
unsigned long *phys;
|
||||
|
||||
|
@ -1710,9 +1809,9 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void kvmppc_core_commit_memory_region(struct kvm *kvm,
|
||||
struct kvm_userspace_memory_region *mem,
|
||||
const struct kvm_memory_slot *old)
|
||||
static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
|
||||
struct kvm_userspace_memory_region *mem,
|
||||
const struct kvm_memory_slot *old)
|
||||
{
|
||||
unsigned long npages = mem->memory_size >> PAGE_SHIFT;
|
||||
struct kvm_memory_slot *memslot;
|
||||
|
@ -1729,6 +1828,37 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Update LPCR values in kvm->arch and in vcores.
|
||||
* Caller must hold kvm->lock.
|
||||
*/
|
||||
void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
|
||||
{
|
||||
long int i;
|
||||
u32 cores_done = 0;
|
||||
|
||||
if ((kvm->arch.lpcr & mask) == lpcr)
|
||||
return;
|
||||
|
||||
kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr;
|
||||
|
||||
for (i = 0; i < KVM_MAX_VCORES; ++i) {
|
||||
struct kvmppc_vcore *vc = kvm->arch.vcores[i];
|
||||
if (!vc)
|
||||
continue;
|
||||
spin_lock(&vc->lock);
|
||||
vc->lpcr = (vc->lpcr & ~mask) | lpcr;
|
||||
spin_unlock(&vc->lock);
|
||||
if (++cores_done >= kvm->arch.online_vcores)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int err = 0;
|
||||
|
@ -1737,7 +1867,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
|
|||
unsigned long hva;
|
||||
struct kvm_memory_slot *memslot;
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long lpcr, senc;
|
||||
unsigned long lpcr = 0, senc;
|
||||
unsigned long lpcr_mask = 0;
|
||||
unsigned long psize, porder;
|
||||
unsigned long rma_size;
|
||||
unsigned long rmls;
|
||||
|
@ -1802,9 +1933,9 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
|
|||
senc = slb_pgsize_encoding(psize);
|
||||
kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
|
||||
(VRMA_VSID << SLB_VSID_SHIFT_1T);
|
||||
lpcr = kvm->arch.lpcr & ~LPCR_VRMASD;
|
||||
lpcr |= senc << (LPCR_VRMASD_SH - 4);
|
||||
kvm->arch.lpcr = lpcr;
|
||||
lpcr_mask = LPCR_VRMASD;
|
||||
/* the -4 is to account for senc values starting at 0x10 */
|
||||
lpcr = senc << (LPCR_VRMASD_SH - 4);
|
||||
|
||||
/* Create HPTEs in the hash page table for the VRMA */
|
||||
kvmppc_map_vrma(vcpu, memslot, porder);
|
||||
|
@ -1825,23 +1956,21 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
|
|||
kvm->arch.rma = ri;
|
||||
|
||||
/* Update LPCR and RMOR */
|
||||
lpcr = kvm->arch.lpcr;
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_201)) {
|
||||
/* PPC970; insert RMLS value (split field) in HID4 */
|
||||
lpcr &= ~((1ul << HID4_RMLS0_SH) |
|
||||
(3ul << HID4_RMLS2_SH));
|
||||
lpcr |= ((rmls >> 2) << HID4_RMLS0_SH) |
|
||||
lpcr_mask = (1ul << HID4_RMLS0_SH) |
|
||||
(3ul << HID4_RMLS2_SH) | HID4_RMOR;
|
||||
lpcr = ((rmls >> 2) << HID4_RMLS0_SH) |
|
||||
((rmls & 3) << HID4_RMLS2_SH);
|
||||
/* RMOR is also in HID4 */
|
||||
lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
|
||||
<< HID4_RMOR_SH;
|
||||
} else {
|
||||
/* POWER7 */
|
||||
lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L);
|
||||
lpcr |= rmls << LPCR_RMLS_SH;
|
||||
lpcr_mask = LPCR_VPM0 | LPCR_VRMA_L | LPCR_RMLS;
|
||||
lpcr = rmls << LPCR_RMLS_SH;
|
||||
kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT;
|
||||
}
|
||||
kvm->arch.lpcr = lpcr;
|
||||
pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
|
||||
ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
|
||||
|
||||
|
@ -1860,6 +1989,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
}
|
||||
|
||||
kvmppc_update_lpcr(kvm, lpcr, lpcr_mask);
|
||||
|
||||
/* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
|
||||
smp_wmb();
|
||||
kvm->arch.rma_setup_done = 1;
|
||||
|
@ -1875,7 +2006,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
|
|||
goto out_srcu;
|
||||
}
|
||||
|
||||
int kvmppc_core_init_vm(struct kvm *kvm)
|
||||
static int kvmppc_core_init_vm_hv(struct kvm *kvm)
|
||||
{
|
||||
unsigned long lpcr, lpid;
|
||||
|
||||
|
@ -1893,9 +2024,6 @@ int kvmppc_core_init_vm(struct kvm *kvm)
|
|||
*/
|
||||
cpumask_setall(&kvm->arch.need_tlb_flush);
|
||||
|
||||
INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
|
||||
INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
|
||||
|
||||
kvm->arch.rma = NULL;
|
||||
|
||||
kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
|
||||
|
@ -1931,61 +2059,162 @@ int kvmppc_core_init_vm(struct kvm *kvm)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void kvmppc_core_destroy_vm(struct kvm *kvm)
|
||||
static void kvmppc_free_vcores(struct kvm *kvm)
|
||||
{
|
||||
long int i;
|
||||
|
||||
for (i = 0; i < KVM_MAX_VCORES; ++i)
|
||||
kfree(kvm->arch.vcores[i]);
|
||||
kvm->arch.online_vcores = 0;
|
||||
}
|
||||
|
||||
static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
|
||||
{
|
||||
uninhibit_secondary_onlining();
|
||||
|
||||
kvmppc_free_vcores(kvm);
|
||||
if (kvm->arch.rma) {
|
||||
kvm_release_rma(kvm->arch.rma);
|
||||
kvm->arch.rma = NULL;
|
||||
}
|
||||
|
||||
kvmppc_rtas_tokens_free(kvm);
|
||||
|
||||
kvmppc_free_hpt(kvm);
|
||||
WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
|
||||
}
|
||||
|
||||
/* These are stubs for now */
|
||||
void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
|
||||
{
|
||||
}
|
||||
|
||||
/* We don't need to emulate any privileged instructions or dcbz */
|
||||
int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int inst, int *advance)
|
||||
static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int inst, int *advance)
|
||||
{
|
||||
return EMULATE_FAIL;
|
||||
}
|
||||
|
||||
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
|
||||
static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn,
|
||||
ulong spr_val)
|
||||
{
|
||||
return EMULATE_FAIL;
|
||||
}
|
||||
|
||||
int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
|
||||
static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn,
|
||||
ulong *spr_val)
|
||||
{
|
||||
return EMULATE_FAIL;
|
||||
}
|
||||
|
||||
static int kvmppc_book3s_hv_init(void)
|
||||
static int kvmppc_core_check_processor_compat_hv(void)
|
||||
{
|
||||
int r;
|
||||
if (!cpu_has_feature(CPU_FTR_HVMODE))
|
||||
return -EIO;
|
||||
return 0;
|
||||
}
|
||||
|
||||
r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
|
||||
static long kvm_arch_vm_ioctl_hv(struct file *filp,
|
||||
unsigned int ioctl, unsigned long arg)
|
||||
{
|
||||
struct kvm *kvm __maybe_unused = filp->private_data;
|
||||
void __user *argp = (void __user *)arg;
|
||||
long r;
|
||||
|
||||
if (r)
|
||||
return r;
|
||||
switch (ioctl) {
|
||||
|
||||
r = kvmppc_mmu_hv_init();
|
||||
case KVM_ALLOCATE_RMA: {
|
||||
struct kvm_allocate_rma rma;
|
||||
struct kvm *kvm = filp->private_data;
|
||||
|
||||
r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
|
||||
if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
|
||||
r = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
case KVM_PPC_ALLOCATE_HTAB: {
|
||||
u32 htab_order;
|
||||
|
||||
r = -EFAULT;
|
||||
if (get_user(htab_order, (u32 __user *)argp))
|
||||
break;
|
||||
r = kvmppc_alloc_reset_hpt(kvm, &htab_order);
|
||||
if (r)
|
||||
break;
|
||||
r = -EFAULT;
|
||||
if (put_user(htab_order, (u32 __user *)argp))
|
||||
break;
|
||||
r = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
case KVM_PPC_GET_HTAB_FD: {
|
||||
struct kvm_get_htab_fd ghf;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&ghf, argp, sizeof(ghf)))
|
||||
break;
|
||||
r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
r = -ENOTTY;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static void kvmppc_book3s_hv_exit(void)
|
||||
static struct kvmppc_ops kvm_ops_hv = {
|
||||
.get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
|
||||
.set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
|
||||
.get_one_reg = kvmppc_get_one_reg_hv,
|
||||
.set_one_reg = kvmppc_set_one_reg_hv,
|
||||
.vcpu_load = kvmppc_core_vcpu_load_hv,
|
||||
.vcpu_put = kvmppc_core_vcpu_put_hv,
|
||||
.set_msr = kvmppc_set_msr_hv,
|
||||
.vcpu_run = kvmppc_vcpu_run_hv,
|
||||
.vcpu_create = kvmppc_core_vcpu_create_hv,
|
||||
.vcpu_free = kvmppc_core_vcpu_free_hv,
|
||||
.check_requests = kvmppc_core_check_requests_hv,
|
||||
.get_dirty_log = kvm_vm_ioctl_get_dirty_log_hv,
|
||||
.flush_memslot = kvmppc_core_flush_memslot_hv,
|
||||
.prepare_memory_region = kvmppc_core_prepare_memory_region_hv,
|
||||
.commit_memory_region = kvmppc_core_commit_memory_region_hv,
|
||||
.unmap_hva = kvm_unmap_hva_hv,
|
||||
.unmap_hva_range = kvm_unmap_hva_range_hv,
|
||||
.age_hva = kvm_age_hva_hv,
|
||||
.test_age_hva = kvm_test_age_hva_hv,
|
||||
.set_spte_hva = kvm_set_spte_hva_hv,
|
||||
.mmu_destroy = kvmppc_mmu_destroy_hv,
|
||||
.free_memslot = kvmppc_core_free_memslot_hv,
|
||||
.create_memslot = kvmppc_core_create_memslot_hv,
|
||||
.init_vm = kvmppc_core_init_vm_hv,
|
||||
.destroy_vm = kvmppc_core_destroy_vm_hv,
|
||||
.get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv,
|
||||
.emulate_op = kvmppc_core_emulate_op_hv,
|
||||
.emulate_mtspr = kvmppc_core_emulate_mtspr_hv,
|
||||
.emulate_mfspr = kvmppc_core_emulate_mfspr_hv,
|
||||
.fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv,
|
||||
.arch_vm_ioctl = kvm_arch_vm_ioctl_hv,
|
||||
};
|
||||
|
||||
static int kvmppc_book3s_init_hv(void)
|
||||
{
|
||||
kvm_exit();
|
||||
int r;
|
||||
/*
|
||||
* FIXME!! Do we need to check on all cpus ?
|
||||
*/
|
||||
r = kvmppc_core_check_processor_compat_hv();
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
kvm_ops_hv.owner = THIS_MODULE;
|
||||
kvmppc_hv_ops = &kvm_ops_hv;
|
||||
|
||||
r = kvmppc_mmu_hv_init();
|
||||
return r;
|
||||
}
|
||||
|
||||
module_init(kvmppc_book3s_hv_init);
|
||||
module_exit(kvmppc_book3s_hv_exit);
|
||||
static void kvmppc_book3s_exit_hv(void)
|
||||
{
|
||||
kvmppc_hv_ops = NULL;
|
||||
}
|
||||
|
||||
module_init(kvmppc_book3s_init_hv);
|
||||
module_exit(kvmppc_book3s_exit_hv);
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -158,9 +158,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
|
|||
* Interrupts are enabled again at this point.
|
||||
*/
|
||||
|
||||
.global kvmppc_handler_highmem
|
||||
kvmppc_handler_highmem:
|
||||
|
||||
/*
|
||||
* Register usage at this point:
|
||||
*
|
||||
|
|
|
@ -33,30 +33,6 @@
|
|||
#error Need to fix lppaca and SLB shadow accesses in little endian mode
|
||||
#endif
|
||||
|
||||
/*****************************************************************************
|
||||
* *
|
||||
* Real Mode handlers that need to be in the linear mapping *
|
||||
* *
|
||||
****************************************************************************/
|
||||
|
||||
.globl kvmppc_skip_interrupt
|
||||
kvmppc_skip_interrupt:
|
||||
mfspr r13,SPRN_SRR0
|
||||
addi r13,r13,4
|
||||
mtspr SPRN_SRR0,r13
|
||||
GET_SCRATCH0(r13)
|
||||
rfid
|
||||
b .
|
||||
|
||||
.globl kvmppc_skip_Hinterrupt
|
||||
kvmppc_skip_Hinterrupt:
|
||||
mfspr r13,SPRN_HSRR0
|
||||
addi r13,r13,4
|
||||
mtspr SPRN_HSRR0,r13
|
||||
GET_SCRATCH0(r13)
|
||||
hrfid
|
||||
b .
|
||||
|
||||
/*
|
||||
* Call kvmppc_hv_entry in real mode.
|
||||
* Must be called with interrupts hard-disabled.
|
||||
|
@ -66,8 +42,11 @@ kvmppc_skip_Hinterrupt:
|
|||
* LR = return address to continue at after eventually re-enabling MMU
|
||||
*/
|
||||
_GLOBAL(kvmppc_hv_entry_trampoline)
|
||||
mflr r0
|
||||
std r0, PPC_LR_STKOFF(r1)
|
||||
stdu r1, -112(r1)
|
||||
mfmsr r10
|
||||
LOAD_REG_ADDR(r5, kvmppc_hv_entry)
|
||||
LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
|
||||
li r0,MSR_RI
|
||||
andc r0,r10,r0
|
||||
li r6,MSR_IR | MSR_DR
|
||||
|
@ -77,11 +56,103 @@ _GLOBAL(kvmppc_hv_entry_trampoline)
|
|||
mtsrr1 r6
|
||||
RFI
|
||||
|
||||
/******************************************************************************
|
||||
* *
|
||||
* Entry code *
|
||||
* *
|
||||
*****************************************************************************/
|
||||
kvmppc_call_hv_entry:
|
||||
bl kvmppc_hv_entry
|
||||
|
||||
/* Back from guest - restore host state and return to caller */
|
||||
|
||||
/* Restore host DABR and DABRX */
|
||||
ld r5,HSTATE_DABR(r13)
|
||||
li r6,7
|
||||
mtspr SPRN_DABR,r5
|
||||
mtspr SPRN_DABRX,r6
|
||||
|
||||
/* Restore SPRG3 */
|
||||
ld r3,PACA_SPRG3(r13)
|
||||
mtspr SPRN_SPRG3,r3
|
||||
|
||||
/*
|
||||
* Reload DEC. HDEC interrupts were disabled when
|
||||
* we reloaded the host's LPCR value.
|
||||
*/
|
||||
ld r3, HSTATE_DECEXP(r13)
|
||||
mftb r4
|
||||
subf r4, r4, r3
|
||||
mtspr SPRN_DEC, r4
|
||||
|
||||
/* Reload the host's PMU registers */
|
||||
ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
|
||||
lbz r4, LPPACA_PMCINUSE(r3)
|
||||
cmpwi r4, 0
|
||||
beq 23f /* skip if not */
|
||||
lwz r3, HSTATE_PMC(r13)
|
||||
lwz r4, HSTATE_PMC + 4(r13)
|
||||
lwz r5, HSTATE_PMC + 8(r13)
|
||||
lwz r6, HSTATE_PMC + 12(r13)
|
||||
lwz r8, HSTATE_PMC + 16(r13)
|
||||
lwz r9, HSTATE_PMC + 20(r13)
|
||||
BEGIN_FTR_SECTION
|
||||
lwz r10, HSTATE_PMC + 24(r13)
|
||||
lwz r11, HSTATE_PMC + 28(r13)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
|
||||
mtspr SPRN_PMC1, r3
|
||||
mtspr SPRN_PMC2, r4
|
||||
mtspr SPRN_PMC3, r5
|
||||
mtspr SPRN_PMC4, r6
|
||||
mtspr SPRN_PMC5, r8
|
||||
mtspr SPRN_PMC6, r9
|
||||
BEGIN_FTR_SECTION
|
||||
mtspr SPRN_PMC7, r10
|
||||
mtspr SPRN_PMC8, r11
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
|
||||
ld r3, HSTATE_MMCR(r13)
|
||||
ld r4, HSTATE_MMCR + 8(r13)
|
||||
ld r5, HSTATE_MMCR + 16(r13)
|
||||
mtspr SPRN_MMCR1, r4
|
||||
mtspr SPRN_MMCRA, r5
|
||||
mtspr SPRN_MMCR0, r3
|
||||
isync
|
||||
23:
|
||||
|
||||
/*
|
||||
* For external and machine check interrupts, we need
|
||||
* to call the Linux handler to process the interrupt.
|
||||
* We do that by jumping to absolute address 0x500 for
|
||||
* external interrupts, or the machine_check_fwnmi label
|
||||
* for machine checks (since firmware might have patched
|
||||
* the vector area at 0x200). The [h]rfid at the end of the
|
||||
* handler will return to the book3s_hv_interrupts.S code.
|
||||
* For other interrupts we do the rfid to get back
|
||||
* to the book3s_hv_interrupts.S code here.
|
||||
*/
|
||||
ld r8, 112+PPC_LR_STKOFF(r1)
|
||||
addi r1, r1, 112
|
||||
ld r7, HSTATE_HOST_MSR(r13)
|
||||
|
||||
cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
|
||||
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
|
||||
BEGIN_FTR_SECTION
|
||||
beq 11f
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
||||
|
||||
/* RFI into the highmem handler, or branch to interrupt handler */
|
||||
mfmsr r6
|
||||
li r0, MSR_RI
|
||||
andc r6, r6, r0
|
||||
mtmsrd r6, 1 /* Clear RI in MSR */
|
||||
mtsrr0 r8
|
||||
mtsrr1 r7
|
||||
beqa 0x500 /* external interrupt (PPC970) */
|
||||
beq cr1, 13f /* machine check */
|
||||
RFI
|
||||
|
||||
/* On POWER7, we have external interrupts set to use HSRR0/1 */
|
||||
11: mtspr SPRN_HSRR0, r8
|
||||
mtspr SPRN_HSRR1, r7
|
||||
ba 0x500
|
||||
|
||||
13: b machine_check_fwnmi
|
||||
|
||||
|
||||
/*
|
||||
* We come in here when wakened from nap mode on a secondary hw thread.
|
||||
|
@ -137,7 +208,7 @@ kvm_start_guest:
|
|||
cmpdi r4,0
|
||||
/* if we have no vcpu to run, go back to sleep */
|
||||
beq kvm_no_guest
|
||||
b kvmppc_hv_entry
|
||||
b 30f
|
||||
|
||||
27: /* XXX should handle hypervisor maintenance interrupts etc. here */
|
||||
b kvm_no_guest
|
||||
|
@ -147,6 +218,57 @@ kvm_start_guest:
|
|||
stw r8,HSTATE_SAVED_XIRR(r13)
|
||||
b kvm_no_guest
|
||||
|
||||
30: bl kvmppc_hv_entry
|
||||
|
||||
/* Back from the guest, go back to nap */
|
||||
/* Clear our vcpu pointer so we don't come back in early */
|
||||
li r0, 0
|
||||
std r0, HSTATE_KVM_VCPU(r13)
|
||||
lwsync
|
||||
/* Clear any pending IPI - we're an offline thread */
|
||||
ld r5, HSTATE_XICS_PHYS(r13)
|
||||
li r7, XICS_XIRR
|
||||
lwzcix r3, r5, r7 /* ack any pending interrupt */
|
||||
rlwinm. r0, r3, 0, 0xffffff /* any pending? */
|
||||
beq 37f
|
||||
sync
|
||||
li r0, 0xff
|
||||
li r6, XICS_MFRR
|
||||
stbcix r0, r5, r6 /* clear the IPI */
|
||||
stwcix r3, r5, r7 /* EOI it */
|
||||
37: sync
|
||||
|
||||
/* increment the nap count and then go to nap mode */
|
||||
ld r4, HSTATE_KVM_VCORE(r13)
|
||||
addi r4, r4, VCORE_NAP_COUNT
|
||||
lwsync /* make previous updates visible */
|
||||
51: lwarx r3, 0, r4
|
||||
addi r3, r3, 1
|
||||
stwcx. r3, 0, r4
|
||||
bne 51b
|
||||
|
||||
kvm_no_guest:
|
||||
li r0, KVM_HWTHREAD_IN_NAP
|
||||
stb r0, HSTATE_HWTHREAD_STATE(r13)
|
||||
li r3, LPCR_PECE0
|
||||
mfspr r4, SPRN_LPCR
|
||||
rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
|
||||
mtspr SPRN_LPCR, r4
|
||||
isync
|
||||
std r0, HSTATE_SCRATCH0(r13)
|
||||
ptesync
|
||||
ld r0, HSTATE_SCRATCH0(r13)
|
||||
1: cmpd r0, r0
|
||||
bne 1b
|
||||
nap
|
||||
b .
|
||||
|
||||
/******************************************************************************
|
||||
* *
|
||||
* Entry code *
|
||||
* *
|
||||
*****************************************************************************/
|
||||
|
||||
.global kvmppc_hv_entry
|
||||
kvmppc_hv_entry:
|
||||
|
||||
|
@ -159,7 +281,8 @@ kvmppc_hv_entry:
|
|||
* all other volatile GPRS = free
|
||||
*/
|
||||
mflr r0
|
||||
std r0, HSTATE_VMHANDLER(r13)
|
||||
std r0, PPC_LR_STKOFF(r1)
|
||||
stdu r1, -112(r1)
|
||||
|
||||
/* Set partition DABR */
|
||||
/* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
|
||||
|
@ -200,8 +323,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
|
|||
ld r3, VCPU_MMCR(r4)
|
||||
ld r5, VCPU_MMCR + 8(r4)
|
||||
ld r6, VCPU_MMCR + 16(r4)
|
||||
ld r7, VCPU_SIAR(r4)
|
||||
ld r8, VCPU_SDAR(r4)
|
||||
mtspr SPRN_MMCR1, r5
|
||||
mtspr SPRN_MMCRA, r6
|
||||
mtspr SPRN_SIAR, r7
|
||||
mtspr SPRN_SDAR, r8
|
||||
mtspr SPRN_MMCR0, r3
|
||||
isync
|
||||
|
||||
|
@ -254,22 +381,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
|||
/* Save R1 in the PACA */
|
||||
std r1, HSTATE_HOST_R1(r13)
|
||||
|
||||
/* Increment yield count if they have a VPA */
|
||||
ld r3, VCPU_VPA(r4)
|
||||
cmpdi r3, 0
|
||||
beq 25f
|
||||
lwz r5, LPPACA_YIELDCOUNT(r3)
|
||||
addi r5, r5, 1
|
||||
stw r5, LPPACA_YIELDCOUNT(r3)
|
||||
li r6, 1
|
||||
stb r6, VCPU_VPA_DIRTY(r4)
|
||||
25:
|
||||
/* Load up DAR and DSISR */
|
||||
ld r5, VCPU_DAR(r4)
|
||||
lwz r6, VCPU_DSISR(r4)
|
||||
mtspr SPRN_DAR, r5
|
||||
mtspr SPRN_DSISR, r6
|
||||
|
||||
li r6, KVM_GUEST_MODE_HOST_HV
|
||||
stb r6, HSTATE_IN_GUEST(r13)
|
||||
|
||||
BEGIN_FTR_SECTION
|
||||
/* Restore AMR and UAMOR, set AMOR to all 1s */
|
||||
ld r5,VCPU_AMR(r4)
|
||||
|
@ -343,7 +463,28 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
|
|||
bdnz 28b
|
||||
ptesync
|
||||
|
||||
22: li r0,1
|
||||
/* Add timebase offset onto timebase */
|
||||
22: ld r8,VCORE_TB_OFFSET(r5)
|
||||
cmpdi r8,0
|
||||
beq 37f
|
||||
mftb r6 /* current host timebase */
|
||||
add r8,r8,r6
|
||||
mtspr SPRN_TBU40,r8 /* update upper 40 bits */
|
||||
mftb r7 /* check if lower 24 bits overflowed */
|
||||
clrldi r6,r6,40
|
||||
clrldi r7,r7,40
|
||||
cmpld r7,r6
|
||||
bge 37f
|
||||
addis r8,r8,0x100 /* if so, increment upper 40 bits */
|
||||
mtspr SPRN_TBU40,r8
|
||||
|
||||
/* Load guest PCR value to select appropriate compat mode */
|
||||
37: ld r7, VCORE_PCR(r5)
|
||||
cmpdi r7, 0
|
||||
beq 38f
|
||||
mtspr SPRN_PCR, r7
|
||||
38:
|
||||
li r0,1
|
||||
stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
|
||||
b 10f
|
||||
|
||||
|
@ -353,12 +494,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
|
|||
beq 20b
|
||||
|
||||
/* Set LPCR and RMOR. */
|
||||
10: ld r8,KVM_LPCR(r9)
|
||||
10: ld r8,VCORE_LPCR(r5)
|
||||
mtspr SPRN_LPCR,r8
|
||||
ld r8,KVM_RMOR(r9)
|
||||
mtspr SPRN_RMOR,r8
|
||||
isync
|
||||
|
||||
/* Increment yield count if they have a VPA */
|
||||
ld r3, VCPU_VPA(r4)
|
||||
cmpdi r3, 0
|
||||
beq 25f
|
||||
lwz r5, LPPACA_YIELDCOUNT(r3)
|
||||
addi r5, r5, 1
|
||||
stw r5, LPPACA_YIELDCOUNT(r3)
|
||||
li r6, 1
|
||||
stb r6, VCPU_VPA_DIRTY(r4)
|
||||
25:
|
||||
/* Check if HDEC expires soon */
|
||||
mfspr r3,SPRN_HDEC
|
||||
cmpwi r3,10
|
||||
|
@ -405,7 +556,8 @@ toc_tlbie_lock:
|
|||
bne 24b
|
||||
isync
|
||||
|
||||
ld r7,KVM_LPCR(r9) /* use kvm->arch.lpcr to store HID4 */
|
||||
ld r5,HSTATE_KVM_VCORE(r13)
|
||||
ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */
|
||||
li r0,0x18f
|
||||
rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
|
||||
or r0,r7,r0
|
||||
|
@ -541,7 +693,7 @@ fast_guest_return:
|
|||
mtspr SPRN_HSRR1,r11
|
||||
|
||||
/* Activate guest mode, so faults get handled by KVM */
|
||||
li r9, KVM_GUEST_MODE_GUEST
|
||||
li r9, KVM_GUEST_MODE_GUEST_HV
|
||||
stb r9, HSTATE_IN_GUEST(r13)
|
||||
|
||||
/* Enter guest */
|
||||
|
@ -550,13 +702,15 @@ BEGIN_FTR_SECTION
|
|||
ld r5, VCPU_CFAR(r4)
|
||||
mtspr SPRN_CFAR, r5
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
|
||||
BEGIN_FTR_SECTION
|
||||
ld r0, VCPU_PPR(r4)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
||||
|
||||
ld r5, VCPU_LR(r4)
|
||||
lwz r6, VCPU_CR(r4)
|
||||
mtlr r5
|
||||
mtcr r6
|
||||
|
||||
ld r0, VCPU_GPR(R0)(r4)
|
||||
ld r1, VCPU_GPR(R1)(r4)
|
||||
ld r2, VCPU_GPR(R2)(r4)
|
||||
ld r3, VCPU_GPR(R3)(r4)
|
||||
|
@ -570,6 +724,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
|
|||
ld r12, VCPU_GPR(R12)(r4)
|
||||
ld r13, VCPU_GPR(R13)(r4)
|
||||
|
||||
BEGIN_FTR_SECTION
|
||||
mtspr SPRN_PPR, r0
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
||||
ld r0, VCPU_GPR(R0)(r4)
|
||||
ld r4, VCPU_GPR(R4)(r4)
|
||||
|
||||
hrfid
|
||||
|
@ -584,8 +742,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
|
|||
/*
|
||||
* We come here from the first-level interrupt handlers.
|
||||
*/
|
||||
.globl kvmppc_interrupt
|
||||
kvmppc_interrupt:
|
||||
.globl kvmppc_interrupt_hv
|
||||
kvmppc_interrupt_hv:
|
||||
/*
|
||||
* Register contents:
|
||||
* R12 = interrupt vector
|
||||
|
@ -595,6 +753,19 @@ kvmppc_interrupt:
|
|||
*/
|
||||
/* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
|
||||
std r9, HSTATE_HOST_R2(r13)
|
||||
|
||||
lbz r9, HSTATE_IN_GUEST(r13)
|
||||
cmpwi r9, KVM_GUEST_MODE_HOST_HV
|
||||
beq kvmppc_bad_host_intr
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
|
||||
cmpwi r9, KVM_GUEST_MODE_GUEST
|
||||
ld r9, HSTATE_HOST_R2(r13)
|
||||
beq kvmppc_interrupt_pr
|
||||
#endif
|
||||
/* We're now back in the host but in guest MMU context */
|
||||
li r9, KVM_GUEST_MODE_HOST_HV
|
||||
stb r9, HSTATE_IN_GUEST(r13)
|
||||
|
||||
ld r9, HSTATE_KVM_VCPU(r13)
|
||||
|
||||
/* Save registers */
|
||||
|
@ -620,6 +791,10 @@ BEGIN_FTR_SECTION
|
|||
ld r3, HSTATE_CFAR(r13)
|
||||
std r3, VCPU_CFAR(r9)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
|
||||
BEGIN_FTR_SECTION
|
||||
ld r4, HSTATE_PPR(r13)
|
||||
std r4, VCPU_PPR(r9)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
||||
|
||||
/* Restore R1/R2 so we can handle faults */
|
||||
ld r1, HSTATE_HOST_R1(r13)
|
||||
|
@ -642,10 +817,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
|
|||
std r3, VCPU_GPR(R13)(r9)
|
||||
std r4, VCPU_LR(r9)
|
||||
|
||||
/* Unset guest mode */
|
||||
li r0, KVM_GUEST_MODE_NONE
|
||||
stb r0, HSTATE_IN_GUEST(r13)
|
||||
|
||||
stw r12,VCPU_TRAP(r9)
|
||||
|
||||
/* Save HEIR (HV emulation assist reg) in last_inst
|
||||
|
@ -696,46 +867,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
|
|||
* set, we know the host wants us out so let's do it now
|
||||
*/
|
||||
do_ext_interrupt:
|
||||
lbz r0, HSTATE_HOST_IPI(r13)
|
||||
cmpwi r0, 0
|
||||
bne ext_interrupt_to_host
|
||||
|
||||
/* Now read the interrupt from the ICP */
|
||||
ld r5, HSTATE_XICS_PHYS(r13)
|
||||
li r7, XICS_XIRR
|
||||
cmpdi r5, 0
|
||||
beq- ext_interrupt_to_host
|
||||
lwzcix r3, r5, r7
|
||||
rlwinm. r0, r3, 0, 0xffffff
|
||||
sync
|
||||
beq 3f /* if nothing pending in the ICP */
|
||||
|
||||
/* We found something in the ICP...
|
||||
*
|
||||
* If it's not an IPI, stash it in the PACA and return to
|
||||
* the host, we don't (yet) handle directing real external
|
||||
* interrupts directly to the guest
|
||||
*/
|
||||
cmpwi r0, XICS_IPI
|
||||
bne ext_stash_for_host
|
||||
|
||||
/* It's an IPI, clear the MFRR and EOI it */
|
||||
li r0, 0xff
|
||||
li r6, XICS_MFRR
|
||||
stbcix r0, r5, r6 /* clear the IPI */
|
||||
stwcix r3, r5, r7 /* EOI it */
|
||||
sync
|
||||
|
||||
/* We need to re-check host IPI now in case it got set in the
|
||||
* meantime. If it's clear, we bounce the interrupt to the
|
||||
* guest
|
||||
*/
|
||||
lbz r0, HSTATE_HOST_IPI(r13)
|
||||
cmpwi r0, 0
|
||||
bne- 1f
|
||||
bl kvmppc_read_intr
|
||||
cmpdi r3, 0
|
||||
bgt ext_interrupt_to_host
|
||||
|
||||
/* Allright, looks like an IPI for the guest, we need to set MER */
|
||||
3:
|
||||
/* Check if any CPU is heading out to the host, if so head out too */
|
||||
ld r5, HSTATE_KVM_VCORE(r13)
|
||||
lwz r0, VCORE_ENTRY_EXIT(r5)
|
||||
|
@ -764,27 +900,9 @@ do_ext_interrupt:
|
|||
mtspr SPRN_LPCR, r8
|
||||
b fast_guest_return
|
||||
|
||||
/* We raced with the host, we need to resend that IPI, bummer */
|
||||
1: li r0, IPI_PRIORITY
|
||||
stbcix r0, r5, r6 /* set the IPI */
|
||||
sync
|
||||
b ext_interrupt_to_host
|
||||
|
||||
ext_stash_for_host:
|
||||
/* It's not an IPI and it's for the host, stash it in the PACA
|
||||
* before exit, it will be picked up by the host ICP driver
|
||||
*/
|
||||
stw r3, HSTATE_SAVED_XIRR(r13)
|
||||
ext_interrupt_to_host:
|
||||
|
||||
guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
|
||||
/* Save DEC */
|
||||
mfspr r5,SPRN_DEC
|
||||
mftb r6
|
||||
extsw r5,r5
|
||||
add r5,r5,r6
|
||||
std r5,VCPU_DEC_EXPIRES(r9)
|
||||
|
||||
/* Save more register state */
|
||||
mfdar r6
|
||||
mfdsisr r7
|
||||
|
@ -954,7 +1072,30 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
|
|||
mtspr SPRN_SDR1,r6 /* switch to partition page table */
|
||||
mtspr SPRN_LPID,r7
|
||||
isync
|
||||
li r0,0
|
||||
|
||||
/* Subtract timebase offset from timebase */
|
||||
ld r8,VCORE_TB_OFFSET(r5)
|
||||
cmpdi r8,0
|
||||
beq 17f
|
||||
mftb r6 /* current host timebase */
|
||||
subf r8,r8,r6
|
||||
mtspr SPRN_TBU40,r8 /* update upper 40 bits */
|
||||
mftb r7 /* check if lower 24 bits overflowed */
|
||||
clrldi r6,r6,40
|
||||
clrldi r7,r7,40
|
||||
cmpld r7,r6
|
||||
bge 17f
|
||||
addis r8,r8,0x100 /* if so, increment upper 40 bits */
|
||||
mtspr SPRN_TBU40,r8
|
||||
|
||||
/* Reset PCR */
|
||||
17: ld r0, VCORE_PCR(r5)
|
||||
cmpdi r0, 0
|
||||
beq 18f
|
||||
li r0, 0
|
||||
mtspr SPRN_PCR, r0
|
||||
18:
|
||||
/* Signal secondary CPUs to continue */
|
||||
stb r0,VCORE_IN_GUEST(r5)
|
||||
lis r8,0x7fff /* MAX_INT@h */
|
||||
mtspr SPRN_HDEC,r8
|
||||
|
@ -1052,6 +1193,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
|
|||
1: addi r8,r8,16
|
||||
.endr
|
||||
|
||||
/* Save DEC */
|
||||
mfspr r5,SPRN_DEC
|
||||
mftb r6
|
||||
extsw r5,r5
|
||||
add r5,r5,r6
|
||||
std r5,VCPU_DEC_EXPIRES(r9)
|
||||
|
||||
/* Save and reset AMR and UAMOR before turning on the MMU */
|
||||
BEGIN_FTR_SECTION
|
||||
mfspr r5,SPRN_AMR
|
||||
|
@ -1062,6 +1210,10 @@ BEGIN_FTR_SECTION
|
|||
mtspr SPRN_AMR,r6
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
||||
|
||||
/* Unset guest mode */
|
||||
li r0, KVM_GUEST_MODE_NONE
|
||||
stb r0, HSTATE_IN_GUEST(r13)
|
||||
|
||||
/* Switch DSCR back to host value */
|
||||
BEGIN_FTR_SECTION
|
||||
mfspr r8, SPRN_DSCR
|
||||
|
@ -1134,9 +1286,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
|||
std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
|
||||
b 22f
|
||||
21: mfspr r5, SPRN_MMCR1
|
||||
mfspr r7, SPRN_SIAR
|
||||
mfspr r8, SPRN_SDAR
|
||||
std r4, VCPU_MMCR(r9)
|
||||
std r5, VCPU_MMCR + 8(r9)
|
||||
std r6, VCPU_MMCR + 16(r9)
|
||||
std r7, VCPU_SIAR(r9)
|
||||
std r8, VCPU_SDAR(r9)
|
||||
mfspr r3, SPRN_PMC1
|
||||
mfspr r4, SPRN_PMC2
|
||||
mfspr r5, SPRN_PMC3
|
||||
|
@ -1158,103 +1314,30 @@ BEGIN_FTR_SECTION
|
|||
stw r11, VCPU_PMC + 28(r9)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
|
||||
22:
|
||||
ld r0, 112+PPC_LR_STKOFF(r1)
|
||||
addi r1, r1, 112
|
||||
mtlr r0
|
||||
blr
|
||||
secondary_too_late:
|
||||
ld r5,HSTATE_KVM_VCORE(r13)
|
||||
HMT_LOW
|
||||
13: lbz r3,VCORE_IN_GUEST(r5)
|
||||
cmpwi r3,0
|
||||
bne 13b
|
||||
HMT_MEDIUM
|
||||
li r0, KVM_GUEST_MODE_NONE
|
||||
stb r0, HSTATE_IN_GUEST(r13)
|
||||
ld r11,PACA_SLBSHADOWPTR(r13)
|
||||
|
||||
/* Secondary threads go off to take a nap on POWER7 */
|
||||
BEGIN_FTR_SECTION
|
||||
lwz r0,VCPU_PTID(r9)
|
||||
cmpwi r0,0
|
||||
bne secondary_nap
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
||||
|
||||
/* Restore host DABR and DABRX */
|
||||
ld r5,HSTATE_DABR(r13)
|
||||
li r6,7
|
||||
mtspr SPRN_DABR,r5
|
||||
mtspr SPRN_DABRX,r6
|
||||
|
||||
/* Restore SPRG3 */
|
||||
ld r3,PACA_SPRG3(r13)
|
||||
mtspr SPRN_SPRG3,r3
|
||||
|
||||
/*
|
||||
* Reload DEC. HDEC interrupts were disabled when
|
||||
* we reloaded the host's LPCR value.
|
||||
*/
|
||||
ld r3, HSTATE_DECEXP(r13)
|
||||
mftb r4
|
||||
subf r4, r4, r3
|
||||
mtspr SPRN_DEC, r4
|
||||
|
||||
/* Reload the host's PMU registers */
|
||||
ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
|
||||
lbz r4, LPPACA_PMCINUSE(r3)
|
||||
cmpwi r4, 0
|
||||
beq 23f /* skip if not */
|
||||
lwz r3, HSTATE_PMC(r13)
|
||||
lwz r4, HSTATE_PMC + 4(r13)
|
||||
lwz r5, HSTATE_PMC + 8(r13)
|
||||
lwz r6, HSTATE_PMC + 12(r13)
|
||||
lwz r8, HSTATE_PMC + 16(r13)
|
||||
lwz r9, HSTATE_PMC + 20(r13)
|
||||
BEGIN_FTR_SECTION
|
||||
lwz r10, HSTATE_PMC + 24(r13)
|
||||
lwz r11, HSTATE_PMC + 28(r13)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
|
||||
mtspr SPRN_PMC1, r3
|
||||
mtspr SPRN_PMC2, r4
|
||||
mtspr SPRN_PMC3, r5
|
||||
mtspr SPRN_PMC4, r6
|
||||
mtspr SPRN_PMC5, r8
|
||||
mtspr SPRN_PMC6, r9
|
||||
BEGIN_FTR_SECTION
|
||||
mtspr SPRN_PMC7, r10
|
||||
mtspr SPRN_PMC8, r11
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
|
||||
ld r3, HSTATE_MMCR(r13)
|
||||
ld r4, HSTATE_MMCR + 8(r13)
|
||||
ld r5, HSTATE_MMCR + 16(r13)
|
||||
mtspr SPRN_MMCR1, r4
|
||||
mtspr SPRN_MMCRA, r5
|
||||
mtspr SPRN_MMCR0, r3
|
||||
isync
|
||||
23:
|
||||
/*
|
||||
* For external and machine check interrupts, we need
|
||||
* to call the Linux handler to process the interrupt.
|
||||
* We do that by jumping to absolute address 0x500 for
|
||||
* external interrupts, or the machine_check_fwnmi label
|
||||
* for machine checks (since firmware might have patched
|
||||
* the vector area at 0x200). The [h]rfid at the end of the
|
||||
* handler will return to the book3s_hv_interrupts.S code.
|
||||
* For other interrupts we do the rfid to get back
|
||||
* to the book3s_hv_interrupts.S code here.
|
||||
*/
|
||||
ld r8, HSTATE_VMHANDLER(r13)
|
||||
ld r7, HSTATE_HOST_MSR(r13)
|
||||
|
||||
cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
|
||||
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
|
||||
BEGIN_FTR_SECTION
|
||||
beq 11f
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
||||
|
||||
/* RFI into the highmem handler, or branch to interrupt handler */
|
||||
mfmsr r6
|
||||
li r0, MSR_RI
|
||||
andc r6, r6, r0
|
||||
mtmsrd r6, 1 /* Clear RI in MSR */
|
||||
mtsrr0 r8
|
||||
mtsrr1 r7
|
||||
beqa 0x500 /* external interrupt (PPC970) */
|
||||
beq cr1, 13f /* machine check */
|
||||
RFI
|
||||
|
||||
/* On POWER7, we have external interrupts set to use HSRR0/1 */
|
||||
11: mtspr SPRN_HSRR0, r8
|
||||
mtspr SPRN_HSRR1, r7
|
||||
ba 0x500
|
||||
|
||||
13: b machine_check_fwnmi
|
||||
.rept SLB_NUM_BOLTED
|
||||
ld r5,SLBSHADOW_SAVEAREA(r11)
|
||||
ld r6,SLBSHADOW_SAVEAREA+8(r11)
|
||||
andis. r7,r5,SLB_ESID_V@h
|
||||
beq 1f
|
||||
slbmte r6,r5
|
||||
1: addi r11,r11,16
|
||||
.endr
|
||||
b 22b
|
||||
|
||||
/*
|
||||
* Check whether an HDSI is an HPTE not found fault or something else.
|
||||
|
@ -1333,7 +1416,7 @@ fast_interrupt_c_return:
|
|||
stw r8, VCPU_LAST_INST(r9)
|
||||
|
||||
/* Unset guest mode. */
|
||||
li r0, KVM_GUEST_MODE_NONE
|
||||
li r0, KVM_GUEST_MODE_HOST_HV
|
||||
stb r0, HSTATE_IN_GUEST(r13)
|
||||
b guest_exit_cont
|
||||
|
||||
|
@ -1701,67 +1784,70 @@ machine_check_realmode:
|
|||
rotldi r11, r11, 63
|
||||
b fast_interrupt_c_return
|
||||
|
||||
secondary_too_late:
|
||||
ld r5,HSTATE_KVM_VCORE(r13)
|
||||
HMT_LOW
|
||||
13: lbz r3,VCORE_IN_GUEST(r5)
|
||||
cmpwi r3,0
|
||||
bne 13b
|
||||
HMT_MEDIUM
|
||||
ld r11,PACA_SLBSHADOWPTR(r13)
|
||||
/*
|
||||
* Determine what sort of external interrupt is pending (if any).
|
||||
* Returns:
|
||||
* 0 if no interrupt is pending
|
||||
* 1 if an interrupt is pending that needs to be handled by the host
|
||||
* -1 if there was a guest wakeup IPI (which has now been cleared)
|
||||
*/
|
||||
kvmppc_read_intr:
|
||||
/* see if a host IPI is pending */
|
||||
li r3, 1
|
||||
lbz r0, HSTATE_HOST_IPI(r13)
|
||||
cmpwi r0, 0
|
||||
bne 1f
|
||||
|
||||
.rept SLB_NUM_BOLTED
|
||||
ld r5,SLBSHADOW_SAVEAREA(r11)
|
||||
ld r6,SLBSHADOW_SAVEAREA+8(r11)
|
||||
andis. r7,r5,SLB_ESID_V@h
|
||||
beq 1f
|
||||
slbmte r6,r5
|
||||
1: addi r11,r11,16
|
||||
.endr
|
||||
|
||||
secondary_nap:
|
||||
/* Clear our vcpu pointer so we don't come back in early */
|
||||
li r0, 0
|
||||
std r0, HSTATE_KVM_VCPU(r13)
|
||||
lwsync
|
||||
/* Clear any pending IPI - assume we're a secondary thread */
|
||||
ld r5, HSTATE_XICS_PHYS(r13)
|
||||
/* Now read the interrupt from the ICP */
|
||||
ld r6, HSTATE_XICS_PHYS(r13)
|
||||
li r7, XICS_XIRR
|
||||
lwzcix r3, r5, r7 /* ack any pending interrupt */
|
||||
rlwinm. r0, r3, 0, 0xffffff /* any pending? */
|
||||
beq 37f
|
||||
cmpdi r6, 0
|
||||
beq- 1f
|
||||
lwzcix r0, r6, r7
|
||||
rlwinm. r3, r0, 0, 0xffffff
|
||||
sync
|
||||
li r0, 0xff
|
||||
li r6, XICS_MFRR
|
||||
stbcix r0, r5, r6 /* clear the IPI */
|
||||
stwcix r3, r5, r7 /* EOI it */
|
||||
37: sync
|
||||
beq 1f /* if nothing pending in the ICP */
|
||||
|
||||
/* increment the nap count and then go to nap mode */
|
||||
ld r4, HSTATE_KVM_VCORE(r13)
|
||||
addi r4, r4, VCORE_NAP_COUNT
|
||||
lwsync /* make previous updates visible */
|
||||
51: lwarx r3, 0, r4
|
||||
addi r3, r3, 1
|
||||
stwcx. r3, 0, r4
|
||||
bne 51b
|
||||
/* We found something in the ICP...
|
||||
*
|
||||
* If it's not an IPI, stash it in the PACA and return to
|
||||
* the host, we don't (yet) handle directing real external
|
||||
* interrupts directly to the guest
|
||||
*/
|
||||
cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
|
||||
li r3, 1
|
||||
bne 42f
|
||||
|
||||
kvm_no_guest:
|
||||
li r0, KVM_HWTHREAD_IN_NAP
|
||||
stb r0, HSTATE_HWTHREAD_STATE(r13)
|
||||
/* It's an IPI, clear the MFRR and EOI it */
|
||||
li r3, 0xff
|
||||
li r8, XICS_MFRR
|
||||
stbcix r3, r6, r8 /* clear the IPI */
|
||||
stwcix r0, r6, r7 /* EOI it */
|
||||
sync
|
||||
|
||||
li r3, LPCR_PECE0
|
||||
mfspr r4, SPRN_LPCR
|
||||
rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
|
||||
mtspr SPRN_LPCR, r4
|
||||
isync
|
||||
std r0, HSTATE_SCRATCH0(r13)
|
||||
ptesync
|
||||
ld r0, HSTATE_SCRATCH0(r13)
|
||||
1: cmpd r0, r0
|
||||
bne 1b
|
||||
nap
|
||||
b .
|
||||
/* We need to re-check host IPI now in case it got set in the
|
||||
* meantime. If it's clear, we bounce the interrupt to the
|
||||
* guest
|
||||
*/
|
||||
lbz r0, HSTATE_HOST_IPI(r13)
|
||||
cmpwi r0, 0
|
||||
bne- 43f
|
||||
|
||||
/* OK, it's an IPI for us */
|
||||
li r3, -1
|
||||
1: blr
|
||||
|
||||
42: /* It's not an IPI and it's for the host, stash it in the PACA
|
||||
* before exit, it will be picked up by the host ICP driver
|
||||
*/
|
||||
stw r0, HSTATE_SAVED_XIRR(r13)
|
||||
b 1b
|
||||
|
||||
43: /* We raced with the host, we need to resend that IPI, bummer */
|
||||
li r0, IPI_PRIORITY
|
||||
stbcix r0, r6, r8 /* set the IPI */
|
||||
sync
|
||||
b 1b
|
||||
|
||||
/*
|
||||
* Save away FP, VMX and VSX registers.
|
||||
|
@ -1879,3 +1965,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
|||
lwz r7,VCPU_VRSAVE(r4)
|
||||
mtspr SPRN_VRSAVE,r7
|
||||
blr
|
||||
|
||||
/*
|
||||
* We come here if we get any exception or interrupt while we are
|
||||
* executing host real mode code while in guest MMU context.
|
||||
* For now just spin, but we should do something better.
|
||||
*/
|
||||
kvmppc_bad_host_intr:
|
||||
b .
|
||||
|
|
|
@ -26,8 +26,12 @@
|
|||
|
||||
#if defined(CONFIG_PPC_BOOK3S_64)
|
||||
#define FUNC(name) GLUE(.,name)
|
||||
#define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU
|
||||
|
||||
#elif defined(CONFIG_PPC_BOOK3S_32)
|
||||
#define FUNC(name) name
|
||||
#define GET_SHADOW_VCPU(reg) lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2)
|
||||
|
||||
#endif /* CONFIG_PPC_BOOK3S_XX */
|
||||
|
||||
#define VCPU_LOAD_NVGPRS(vcpu) \
|
||||
|
@ -87,8 +91,14 @@ kvm_start_entry:
|
|||
VCPU_LOAD_NVGPRS(r4)
|
||||
|
||||
kvm_start_lightweight:
|
||||
/* Copy registers into shadow vcpu so we can access them in real mode */
|
||||
GET_SHADOW_VCPU(r3)
|
||||
bl FUNC(kvmppc_copy_to_svcpu)
|
||||
nop
|
||||
REST_GPR(4, r1)
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
/* Get the dcbz32 flag */
|
||||
PPC_LL r3, VCPU_HFLAGS(r4)
|
||||
rldicl r3, r3, 0, 63 /* r3 &= 1 */
|
||||
stb r3, HSTATE_RESTORE_HID5(r13)
|
||||
|
@ -111,9 +121,6 @@ kvm_start_lightweight:
|
|||
*
|
||||
*/
|
||||
|
||||
.global kvmppc_handler_highmem
|
||||
kvmppc_handler_highmem:
|
||||
|
||||
/*
|
||||
* Register usage at this point:
|
||||
*
|
||||
|
@ -125,18 +132,31 @@ kvmppc_handler_highmem:
|
|||
*
|
||||
*/
|
||||
|
||||
/* R7 = vcpu */
|
||||
PPC_LL r7, GPR4(r1)
|
||||
/* Transfer reg values from shadow vcpu back to vcpu struct */
|
||||
/* On 64-bit, interrupts are still off at this point */
|
||||
PPC_LL r3, GPR4(r1) /* vcpu pointer */
|
||||
GET_SHADOW_VCPU(r4)
|
||||
bl FUNC(kvmppc_copy_from_svcpu)
|
||||
nop
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
/* Re-enable interrupts */
|
||||
ld r3, HSTATE_HOST_MSR(r13)
|
||||
ori r3, r3, MSR_EE
|
||||
MTMSR_EERI(r3)
|
||||
|
||||
/*
|
||||
* Reload kernel SPRG3 value.
|
||||
* No need to save guest value as usermode can't modify SPRG3.
|
||||
*/
|
||||
ld r3, PACA_SPRG3(r13)
|
||||
mtspr SPRN_SPRG3, r3
|
||||
|
||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
|
||||
/* R7 = vcpu */
|
||||
PPC_LL r7, GPR4(r1)
|
||||
|
||||
PPC_STL r14, VCPU_GPR(R14)(r7)
|
||||
PPC_STL r15, VCPU_GPR(R15)(r7)
|
||||
PPC_STL r16, VCPU_GPR(R16)(r7)
|
||||
|
@ -161,7 +181,7 @@ kvmppc_handler_highmem:
|
|||
|
||||
/* Restore r3 (kvm_run) and r4 (vcpu) */
|
||||
REST_2GPRS(3, r1)
|
||||
bl FUNC(kvmppc_handle_exit)
|
||||
bl FUNC(kvmppc_handle_exit_pr)
|
||||
|
||||
/* If RESUME_GUEST, get back in the loop */
|
||||
cmpwi r3, RESUME_GUEST
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
#include <asm/mmu_context.h>
|
||||
#include <asm/hw_irq.h>
|
||||
|
||||
#include "trace.h"
|
||||
#include "trace_pr.h"
|
||||
|
||||
#define PTE_SIZE 12
|
||||
|
||||
|
@ -56,6 +56,14 @@ static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
|
|||
HPTEG_HASH_BITS_VPTE_LONG);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
static inline u64 kvmppc_mmu_hash_vpte_64k(u64 vpage)
|
||||
{
|
||||
return hash_64((vpage & 0xffffffff0ULL) >> 4,
|
||||
HPTEG_HASH_BITS_VPTE_64K);
|
||||
}
|
||||
#endif
|
||||
|
||||
void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
|
||||
{
|
||||
u64 index;
|
||||
|
@ -83,6 +91,15 @@ void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
|
|||
hlist_add_head_rcu(&pte->list_vpte_long,
|
||||
&vcpu3s->hpte_hash_vpte_long[index]);
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
/* Add to vPTE_64k list */
|
||||
index = kvmppc_mmu_hash_vpte_64k(pte->pte.vpage);
|
||||
hlist_add_head_rcu(&pte->list_vpte_64k,
|
||||
&vcpu3s->hpte_hash_vpte_64k[index]);
|
||||
#endif
|
||||
|
||||
vcpu3s->hpte_cache_count++;
|
||||
|
||||
spin_unlock(&vcpu3s->mmu_lock);
|
||||
}
|
||||
|
||||
|
@ -113,10 +130,13 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
|
|||
hlist_del_init_rcu(&pte->list_pte_long);
|
||||
hlist_del_init_rcu(&pte->list_vpte);
|
||||
hlist_del_init_rcu(&pte->list_vpte_long);
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
hlist_del_init_rcu(&pte->list_vpte_64k);
|
||||
#endif
|
||||
vcpu3s->hpte_cache_count--;
|
||||
|
||||
spin_unlock(&vcpu3s->mmu_lock);
|
||||
|
||||
vcpu3s->hpte_cache_count--;
|
||||
call_rcu(&pte->rcu_head, free_pte_rcu);
|
||||
}
|
||||
|
||||
|
@ -219,6 +239,29 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
|
|||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
/* Flush with mask 0xffffffff0 */
|
||||
static void kvmppc_mmu_pte_vflush_64k(struct kvm_vcpu *vcpu, u64 guest_vp)
|
||||
{
|
||||
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
|
||||
struct hlist_head *list;
|
||||
struct hpte_cache *pte;
|
||||
u64 vp_mask = 0xffffffff0ULL;
|
||||
|
||||
list = &vcpu3s->hpte_hash_vpte_64k[
|
||||
kvmppc_mmu_hash_vpte_64k(guest_vp)];
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
/* Check the list for matching entries and invalidate */
|
||||
hlist_for_each_entry_rcu(pte, list, list_vpte_64k)
|
||||
if ((pte->pte.vpage & vp_mask) == guest_vp)
|
||||
invalidate_pte(vcpu, pte);
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Flush with mask 0xffffff000 */
|
||||
static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
|
||||
{
|
||||
|
@ -249,6 +292,11 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
|
|||
case 0xfffffffffULL:
|
||||
kvmppc_mmu_pte_vflush_short(vcpu, guest_vp);
|
||||
break;
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
case 0xffffffff0ULL:
|
||||
kvmppc_mmu_pte_vflush_64k(vcpu, guest_vp);
|
||||
break;
|
||||
#endif
|
||||
case 0xffffff000ULL:
|
||||
kvmppc_mmu_pte_vflush_long(vcpu, guest_vp);
|
||||
break;
|
||||
|
@ -285,15 +333,19 @@ struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
|
|||
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
|
||||
struct hpte_cache *pte;
|
||||
|
||||
pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
|
||||
vcpu3s->hpte_cache_count++;
|
||||
|
||||
if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM)
|
||||
kvmppc_mmu_pte_flush_all(vcpu);
|
||||
|
||||
pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
|
||||
|
||||
return pte;
|
||||
}
|
||||
|
||||
void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte)
|
||||
{
|
||||
kmem_cache_free(hpte_cache, pte);
|
||||
}
|
||||
|
||||
void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvmppc_mmu_pte_flush(vcpu, 0, 0);
|
||||
|
@ -320,6 +372,10 @@ int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
|
|||
ARRAY_SIZE(vcpu3s->hpte_hash_vpte));
|
||||
kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long,
|
||||
ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long));
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_64k,
|
||||
ARRAY_SIZE(vcpu3s->hpte_hash_vpte_64k));
|
||||
#endif
|
||||
|
||||
spin_lock_init(&vcpu3s->mmu_lock);
|
||||
|
||||
|
|
|
@ -40,8 +40,12 @@
|
|||
#include <linux/sched.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include "trace.h"
|
||||
#include "book3s.h"
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "trace_pr.h"
|
||||
|
||||
/* #define EXIT_DEBUG */
|
||||
/* #define DEBUG_EXT */
|
||||
|
@ -56,29 +60,25 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
|
|||
#define HW_PAGE_SIZE PAGE_SIZE
|
||||
#endif
|
||||
|
||||
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
|
||||
memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
|
||||
sizeof(get_paca()->shadow_vcpu));
|
||||
svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
|
||||
svcpu_put(svcpu);
|
||||
#endif
|
||||
vcpu->cpu = smp_processor_id();
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
|
||||
current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
|
||||
#endif
|
||||
}
|
||||
|
||||
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
|
||||
memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
|
||||
sizeof(get_paca()->shadow_vcpu));
|
||||
to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
|
||||
svcpu_put(svcpu);
|
||||
#endif
|
||||
|
@ -87,7 +87,61 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
|
|||
vcpu->cpu = -1;
|
||||
}
|
||||
|
||||
int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
|
||||
/* Copy data needed by real-mode code from vcpu to shadow vcpu */
|
||||
void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
svcpu->gpr[0] = vcpu->arch.gpr[0];
|
||||
svcpu->gpr[1] = vcpu->arch.gpr[1];
|
||||
svcpu->gpr[2] = vcpu->arch.gpr[2];
|
||||
svcpu->gpr[3] = vcpu->arch.gpr[3];
|
||||
svcpu->gpr[4] = vcpu->arch.gpr[4];
|
||||
svcpu->gpr[5] = vcpu->arch.gpr[5];
|
||||
svcpu->gpr[6] = vcpu->arch.gpr[6];
|
||||
svcpu->gpr[7] = vcpu->arch.gpr[7];
|
||||
svcpu->gpr[8] = vcpu->arch.gpr[8];
|
||||
svcpu->gpr[9] = vcpu->arch.gpr[9];
|
||||
svcpu->gpr[10] = vcpu->arch.gpr[10];
|
||||
svcpu->gpr[11] = vcpu->arch.gpr[11];
|
||||
svcpu->gpr[12] = vcpu->arch.gpr[12];
|
||||
svcpu->gpr[13] = vcpu->arch.gpr[13];
|
||||
svcpu->cr = vcpu->arch.cr;
|
||||
svcpu->xer = vcpu->arch.xer;
|
||||
svcpu->ctr = vcpu->arch.ctr;
|
||||
svcpu->lr = vcpu->arch.lr;
|
||||
svcpu->pc = vcpu->arch.pc;
|
||||
}
|
||||
|
||||
/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
|
||||
void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu)
|
||||
{
|
||||
vcpu->arch.gpr[0] = svcpu->gpr[0];
|
||||
vcpu->arch.gpr[1] = svcpu->gpr[1];
|
||||
vcpu->arch.gpr[2] = svcpu->gpr[2];
|
||||
vcpu->arch.gpr[3] = svcpu->gpr[3];
|
||||
vcpu->arch.gpr[4] = svcpu->gpr[4];
|
||||
vcpu->arch.gpr[5] = svcpu->gpr[5];
|
||||
vcpu->arch.gpr[6] = svcpu->gpr[6];
|
||||
vcpu->arch.gpr[7] = svcpu->gpr[7];
|
||||
vcpu->arch.gpr[8] = svcpu->gpr[8];
|
||||
vcpu->arch.gpr[9] = svcpu->gpr[9];
|
||||
vcpu->arch.gpr[10] = svcpu->gpr[10];
|
||||
vcpu->arch.gpr[11] = svcpu->gpr[11];
|
||||
vcpu->arch.gpr[12] = svcpu->gpr[12];
|
||||
vcpu->arch.gpr[13] = svcpu->gpr[13];
|
||||
vcpu->arch.cr = svcpu->cr;
|
||||
vcpu->arch.xer = svcpu->xer;
|
||||
vcpu->arch.ctr = svcpu->ctr;
|
||||
vcpu->arch.lr = svcpu->lr;
|
||||
vcpu->arch.pc = svcpu->pc;
|
||||
vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
|
||||
vcpu->arch.fault_dar = svcpu->fault_dar;
|
||||
vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
|
||||
vcpu->arch.last_inst = svcpu->last_inst;
|
||||
}
|
||||
|
||||
static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int r = 1; /* Indicate we want to get back into the guest */
|
||||
|
||||
|
@ -100,44 +154,69 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
|
||||
/************* MMU Notifiers *************/
|
||||
static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
long i;
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_memslots *slots;
|
||||
struct kvm_memory_slot *memslot;
|
||||
|
||||
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
|
||||
slots = kvm_memslots(kvm);
|
||||
kvm_for_each_memslot(memslot, slots) {
|
||||
unsigned long hva_start, hva_end;
|
||||
gfn_t gfn, gfn_end;
|
||||
|
||||
hva_start = max(start, memslot->userspace_addr);
|
||||
hva_end = min(end, memslot->userspace_addr +
|
||||
(memslot->npages << PAGE_SHIFT));
|
||||
if (hva_start >= hva_end)
|
||||
continue;
|
||||
/*
|
||||
* {gfn(page) | page intersects with [hva_start, hva_end)} =
|
||||
* {gfn, gfn+1, ..., gfn_end-1}.
|
||||
*/
|
||||
gfn = hva_to_gfn_memslot(hva_start, memslot);
|
||||
gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
|
||||
kvm_for_each_vcpu(i, vcpu, kvm)
|
||||
kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
|
||||
gfn_end << PAGE_SHIFT);
|
||||
}
|
||||
}
|
||||
|
||||
static int kvm_unmap_hva_pr(struct kvm *kvm, unsigned long hva)
|
||||
{
|
||||
trace_kvm_unmap_hva(hva);
|
||||
|
||||
/*
|
||||
* Flush all shadow tlb entries everywhere. This is slow, but
|
||||
* we are 100% sure that we catch the to be unmapped page
|
||||
*/
|
||||
kvm_flush_remote_tlbs(kvm);
|
||||
do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
|
||||
static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
/* kvm_unmap_hva flushes everything anyways */
|
||||
kvm_unmap_hva(kvm, start);
|
||||
do_kvm_unmap_hva(kvm, start, end);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_age_hva(struct kvm *kvm, unsigned long hva)
|
||||
static int kvm_age_hva_pr(struct kvm *kvm, unsigned long hva)
|
||||
{
|
||||
/* XXX could be more clever ;) */
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
|
||||
static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
|
||||
{
|
||||
/* XXX could be more clever ;) */
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
|
||||
static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
|
||||
{
|
||||
/* The page will get remapped properly on its next fault */
|
||||
kvm_unmap_hva(kvm, hva);
|
||||
do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
|
||||
}
|
||||
|
||||
/*****************************************/
|
||||
|
@ -159,7 +238,7 @@ static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
|
|||
vcpu->arch.shadow_msr = smsr;
|
||||
}
|
||||
|
||||
void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
|
||||
static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
|
||||
{
|
||||
ulong old_msr = vcpu->arch.shared->msr;
|
||||
|
||||
|
@ -219,7 +298,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
|
|||
kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
|
||||
}
|
||||
|
||||
void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
|
||||
void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
|
||||
{
|
||||
u32 host_pvr;
|
||||
|
||||
|
@ -256,6 +335,23 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
|
|||
if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
|
||||
to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
|
||||
|
||||
/*
|
||||
* If they're asking for POWER6 or later, set the flag
|
||||
* indicating that we can do multiple large page sizes
|
||||
* and 1TB segments.
|
||||
* Also set the flag that indicates that tlbie has the large
|
||||
* page bit in the RB operand instead of the instruction.
|
||||
*/
|
||||
switch (PVR_VER(pvr)) {
|
||||
case PVR_POWER6:
|
||||
case PVR_POWER7:
|
||||
case PVR_POWER7p:
|
||||
case PVR_POWER8:
|
||||
vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
|
||||
BOOK3S_HFLAG_NEW_TLBIE;
|
||||
break;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
/* 32 bit Book3S always has 32 byte dcbz */
|
||||
vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
|
||||
|
@ -334,6 +430,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
ulong eaddr, int vec)
|
||||
{
|
||||
bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
|
||||
bool iswrite = false;
|
||||
int r = RESUME_GUEST;
|
||||
int relocated;
|
||||
int page_found = 0;
|
||||
|
@ -344,10 +441,12 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
u64 vsid;
|
||||
|
||||
relocated = data ? dr : ir;
|
||||
if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
|
||||
iswrite = true;
|
||||
|
||||
/* Resolve real address if translation turned on */
|
||||
if (relocated) {
|
||||
page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data);
|
||||
page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
|
||||
} else {
|
||||
pte.may_execute = true;
|
||||
pte.may_read = true;
|
||||
|
@ -355,6 +454,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
pte.raddr = eaddr & KVM_PAM;
|
||||
pte.eaddr = eaddr;
|
||||
pte.vpage = eaddr >> 12;
|
||||
pte.page_size = MMU_PAGE_64K;
|
||||
}
|
||||
|
||||
switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
|
||||
|
@ -388,22 +488,18 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
|
||||
if (page_found == -ENOENT) {
|
||||
/* Page not found in guest PTE entries */
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
|
||||
vcpu->arch.shared->dsisr = svcpu->fault_dsisr;
|
||||
vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr;
|
||||
vcpu->arch.shared->msr |=
|
||||
(svcpu->shadow_srr1 & 0x00000000f8000000ULL);
|
||||
svcpu_put(svcpu);
|
||||
vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
|
||||
kvmppc_book3s_queue_irqprio(vcpu, vec);
|
||||
} else if (page_found == -EPERM) {
|
||||
/* Storage protection */
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
|
||||
vcpu->arch.shared->dsisr = svcpu->fault_dsisr & ~DSISR_NOHPTE;
|
||||
vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
|
||||
vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
|
||||
vcpu->arch.shared->msr |=
|
||||
svcpu->shadow_srr1 & 0x00000000f8000000ULL;
|
||||
svcpu_put(svcpu);
|
||||
vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
|
||||
kvmppc_book3s_queue_irqprio(vcpu, vec);
|
||||
} else if (page_found == -EINVAL) {
|
||||
/* Page not found in guest SLB */
|
||||
|
@ -411,12 +507,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
|
||||
} else if (!is_mmio &&
|
||||
kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
|
||||
if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
|
||||
/*
|
||||
* There is already a host HPTE there, presumably
|
||||
* a read-only one for a page the guest thinks
|
||||
* is writable, so get rid of it first.
|
||||
*/
|
||||
kvmppc_mmu_unmap_page(vcpu, &pte);
|
||||
}
|
||||
/* The guest's PTE is not mapped yet. Map on the host */
|
||||
kvmppc_mmu_map_page(vcpu, &pte);
|
||||
kvmppc_mmu_map_page(vcpu, &pte, iswrite);
|
||||
if (data)
|
||||
vcpu->stat.sp_storage++;
|
||||
else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
|
||||
(!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
|
||||
(!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
|
||||
kvmppc_patch_dcbz(vcpu, &pte);
|
||||
} else {
|
||||
/* MMIO */
|
||||
|
@ -619,13 +723,15 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
|
|||
|
||||
if (lost_ext & MSR_FP)
|
||||
kvmppc_load_up_fpu();
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
if (lost_ext & MSR_VEC)
|
||||
kvmppc_load_up_altivec();
|
||||
#endif
|
||||
current->thread.regs->msr |= lost_ext;
|
||||
}
|
||||
|
||||
int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int exit_nr)
|
||||
int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int exit_nr)
|
||||
{
|
||||
int r = RESUME_HOST;
|
||||
int s;
|
||||
|
@ -643,25 +749,32 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
switch (exit_nr) {
|
||||
case BOOK3S_INTERRUPT_INST_STORAGE:
|
||||
{
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
ulong shadow_srr1 = svcpu->shadow_srr1;
|
||||
ulong shadow_srr1 = vcpu->arch.shadow_srr1;
|
||||
vcpu->stat.pf_instruc++;
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
/* We set segments as unused segments when invalidating them. So
|
||||
* treat the respective fault as segment fault. */
|
||||
if (svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] == SR_INVALID) {
|
||||
kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
|
||||
r = RESUME_GUEST;
|
||||
{
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu;
|
||||
u32 sr;
|
||||
|
||||
svcpu = svcpu_get(vcpu);
|
||||
sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
|
||||
svcpu_put(svcpu);
|
||||
break;
|
||||
if (sr == SR_INVALID) {
|
||||
kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
|
||||
r = RESUME_GUEST;
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
svcpu_put(svcpu);
|
||||
|
||||
/* only care about PTEG not found errors, but leave NX alone */
|
||||
if (shadow_srr1 & 0x40000000) {
|
||||
int idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
vcpu->stat.sp_instruc++;
|
||||
} else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
|
||||
(!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
|
||||
|
@ -682,25 +795,36 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
case BOOK3S_INTERRUPT_DATA_STORAGE:
|
||||
{
|
||||
ulong dar = kvmppc_get_fault_dar(vcpu);
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
u32 fault_dsisr = svcpu->fault_dsisr;
|
||||
u32 fault_dsisr = vcpu->arch.fault_dsisr;
|
||||
vcpu->stat.pf_storage++;
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
/* We set segments as unused segments when invalidating them. So
|
||||
* treat the respective fault as segment fault. */
|
||||
if ((svcpu->sr[dar >> SID_SHIFT]) == SR_INVALID) {
|
||||
kvmppc_mmu_map_segment(vcpu, dar);
|
||||
r = RESUME_GUEST;
|
||||
{
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu;
|
||||
u32 sr;
|
||||
|
||||
svcpu = svcpu_get(vcpu);
|
||||
sr = svcpu->sr[dar >> SID_SHIFT];
|
||||
svcpu_put(svcpu);
|
||||
break;
|
||||
if (sr == SR_INVALID) {
|
||||
kvmppc_mmu_map_segment(vcpu, dar);
|
||||
r = RESUME_GUEST;
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
svcpu_put(svcpu);
|
||||
|
||||
/* The only case we need to handle is missing shadow PTEs */
|
||||
if (fault_dsisr & DSISR_NOHPTE) {
|
||||
/*
|
||||
* We need to handle missing shadow PTEs, and
|
||||
* protection faults due to us mapping a page read-only
|
||||
* when the guest thinks it is writable.
|
||||
*/
|
||||
if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
|
||||
int idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
} else {
|
||||
vcpu->arch.shared->dar = dar;
|
||||
vcpu->arch.shared->dsisr = fault_dsisr;
|
||||
|
@ -743,13 +867,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
|
||||
{
|
||||
enum emulation_result er;
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu;
|
||||
ulong flags;
|
||||
|
||||
program_interrupt:
|
||||
svcpu = svcpu_get(vcpu);
|
||||
flags = svcpu->shadow_srr1 & 0x1f0000ull;
|
||||
svcpu_put(svcpu);
|
||||
flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
|
||||
|
||||
if (vcpu->arch.shared->msr & MSR_PR) {
|
||||
#ifdef EXIT_DEBUG
|
||||
|
@ -798,7 +919,7 @@ program_interrupt:
|
|||
ulong cmd = kvmppc_get_gpr(vcpu, 3);
|
||||
int i;
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_PR
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
|
||||
r = RESUME_GUEST;
|
||||
break;
|
||||
|
@ -881,9 +1002,7 @@ program_interrupt:
|
|||
break;
|
||||
default:
|
||||
{
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
ulong shadow_srr1 = svcpu->shadow_srr1;
|
||||
svcpu_put(svcpu);
|
||||
ulong shadow_srr1 = vcpu->arch.shadow_srr1;
|
||||
/* Ugh - bork here! What did we get? */
|
||||
printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
|
||||
exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
|
||||
|
@ -920,8 +1039,8 @@ program_interrupt:
|
|||
return r;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
|
||||
struct kvm_sregs *sregs)
|
||||
static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
|
||||
struct kvm_sregs *sregs)
|
||||
{
|
||||
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
|
||||
int i;
|
||||
|
@ -947,13 +1066,13 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
|
||||
struct kvm_sregs *sregs)
|
||||
static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
|
||||
struct kvm_sregs *sregs)
|
||||
{
|
||||
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
|
||||
int i;
|
||||
|
||||
kvmppc_set_pvr(vcpu, sregs->pvr);
|
||||
kvmppc_set_pvr_pr(vcpu, sregs->pvr);
|
||||
|
||||
vcpu3s->sdr1 = sregs->u.s.sdr1;
|
||||
if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
|
||||
|
@ -983,7 +1102,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
|
||||
static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
|
||||
union kvmppc_one_reg *val)
|
||||
{
|
||||
int r = 0;
|
||||
|
||||
|
@ -1012,7 +1132,8 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
|
|||
return r;
|
||||
}
|
||||
|
||||
int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
|
||||
static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
|
||||
union kvmppc_one_reg *val)
|
||||
{
|
||||
int r = 0;
|
||||
|
||||
|
@ -1042,28 +1163,30 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
|
|||
return r;
|
||||
}
|
||||
|
||||
int kvmppc_core_check_processor_compat(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
|
||||
static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
|
||||
unsigned int id)
|
||||
{
|
||||
struct kvmppc_vcpu_book3s *vcpu_book3s;
|
||||
struct kvm_vcpu *vcpu;
|
||||
int err = -ENOMEM;
|
||||
unsigned long p;
|
||||
|
||||
vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
|
||||
if (!vcpu_book3s)
|
||||
vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
|
||||
if (!vcpu)
|
||||
goto out;
|
||||
|
||||
vcpu_book3s->shadow_vcpu =
|
||||
kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
|
||||
if (!vcpu_book3s->shadow_vcpu)
|
||||
vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
|
||||
if (!vcpu_book3s)
|
||||
goto free_vcpu;
|
||||
vcpu->arch.book3s = vcpu_book3s;
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_32
|
||||
vcpu->arch.shadow_vcpu =
|
||||
kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
|
||||
if (!vcpu->arch.shadow_vcpu)
|
||||
goto free_vcpu3s;
|
||||
#endif
|
||||
|
||||
vcpu = &vcpu_book3s->vcpu;
|
||||
err = kvm_vcpu_init(vcpu, kvm, id);
|
||||
if (err)
|
||||
goto free_shadow_vcpu;
|
||||
|
@ -1076,13 +1199,19 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
|
|||
vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
/* default to book3s_64 (970fx) */
|
||||
/*
|
||||
* Default to the same as the host if we're on sufficiently
|
||||
* recent machine that we have 1TB segments;
|
||||
* otherwise default to PPC970FX.
|
||||
*/
|
||||
vcpu->arch.pvr = 0x3C0301;
|
||||
if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
|
||||
vcpu->arch.pvr = mfspr(SPRN_PVR);
|
||||
#else
|
||||
/* default to book3s_32 (750) */
|
||||
vcpu->arch.pvr = 0x84202;
|
||||
#endif
|
||||
kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
|
||||
kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
|
||||
vcpu->arch.slb_nr = 64;
|
||||
|
||||
vcpu->arch.shadow_msr = MSR_USER64;
|
||||
|
@ -1096,24 +1225,31 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
|
|||
uninit_vcpu:
|
||||
kvm_vcpu_uninit(vcpu);
|
||||
free_shadow_vcpu:
|
||||
kfree(vcpu_book3s->shadow_vcpu);
|
||||
free_vcpu:
|
||||
#ifdef CONFIG_KVM_BOOK3S_32
|
||||
kfree(vcpu->arch.shadow_vcpu);
|
||||
free_vcpu3s:
|
||||
#endif
|
||||
vfree(vcpu_book3s);
|
||||
free_vcpu:
|
||||
kmem_cache_free(kvm_vcpu_cache, vcpu);
|
||||
out:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
|
||||
static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
|
||||
|
||||
free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
|
||||
kvm_vcpu_uninit(vcpu);
|
||||
kfree(vcpu_book3s->shadow_vcpu);
|
||||
#ifdef CONFIG_KVM_BOOK3S_32
|
||||
kfree(vcpu->arch.shadow_vcpu);
|
||||
#endif
|
||||
vfree(vcpu_book3s);
|
||||
kmem_cache_free(kvm_vcpu_cache, vcpu);
|
||||
}
|
||||
|
||||
int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||
static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret;
|
||||
double fpr[32][TS_FPRWIDTH];
|
||||
|
@ -1222,8 +1358,8 @@ out:
|
|||
/*
|
||||
* Get (and clear) the dirty memory log for a memory slot.
|
||||
*/
|
||||
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
|
||||
struct kvm_dirty_log *log)
|
||||
static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
|
||||
struct kvm_dirty_log *log)
|
||||
{
|
||||
struct kvm_memory_slot *memslot;
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
@ -1258,10 +1394,47 @@ out:
|
|||
return r;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
|
||||
static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot)
|
||||
{
|
||||
info->flags = KVM_PPC_1T_SEGMENTS;
|
||||
return;
|
||||
}
|
||||
|
||||
static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot,
|
||||
struct kvm_userspace_memory_region *mem)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
|
||||
struct kvm_userspace_memory_region *mem,
|
||||
const struct kvm_memory_slot *old)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free,
|
||||
struct kvm_memory_slot *dont)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot,
|
||||
unsigned long npages)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
|
||||
struct kvm_ppc_smmu_info *info)
|
||||
{
|
||||
long int i;
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
info->flags = 0;
|
||||
|
||||
/* SLB is always 64 entries */
|
||||
info->slb_size = 64;
|
||||
|
@ -1272,53 +1445,49 @@ int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
|
|||
info->sps[0].enc[0].page_shift = 12;
|
||||
info->sps[0].enc[0].pte_enc = 0;
|
||||
|
||||
/*
|
||||
* 64k large page size.
|
||||
* We only want to put this in if the CPUs we're emulating
|
||||
* support it, but unfortunately we don't have a vcpu easily
|
||||
* to hand here to test. Just pick the first vcpu, and if
|
||||
* that doesn't exist yet, report the minimum capability,
|
||||
* i.e., no 64k pages.
|
||||
* 1T segment support goes along with 64k pages.
|
||||
*/
|
||||
i = 1;
|
||||
vcpu = kvm_get_vcpu(kvm, 0);
|
||||
if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
|
||||
info->flags = KVM_PPC_1T_SEGMENTS;
|
||||
info->sps[i].page_shift = 16;
|
||||
info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
|
||||
info->sps[i].enc[0].page_shift = 16;
|
||||
info->sps[i].enc[0].pte_enc = 1;
|
||||
++i;
|
||||
}
|
||||
|
||||
/* Standard 16M large page size segment */
|
||||
info->sps[1].page_shift = 24;
|
||||
info->sps[1].slb_enc = SLB_VSID_L;
|
||||
info->sps[1].enc[0].page_shift = 24;
|
||||
info->sps[1].enc[0].pte_enc = 0;
|
||||
info->sps[i].page_shift = 24;
|
||||
info->sps[i].slb_enc = SLB_VSID_L;
|
||||
info->sps[i].enc[0].page_shift = 24;
|
||||
info->sps[i].enc[0].pte_enc = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
|
||||
struct kvm_ppc_smmu_info *info)
|
||||
{
|
||||
/* We should not get called */
|
||||
BUG();
|
||||
}
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
|
||||
struct kvm_memory_slot *dont)
|
||||
{
|
||||
}
|
||||
|
||||
int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
|
||||
unsigned long npages)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvmppc_core_prepare_memory_region(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot,
|
||||
struct kvm_userspace_memory_region *mem)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvmppc_core_commit_memory_region(struct kvm *kvm,
|
||||
struct kvm_userspace_memory_region *mem,
|
||||
const struct kvm_memory_slot *old)
|
||||
{
|
||||
}
|
||||
|
||||
void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
|
||||
{
|
||||
}
|
||||
|
||||
static unsigned int kvm_global_user_count = 0;
|
||||
static DEFINE_SPINLOCK(kvm_global_user_count_lock);
|
||||
|
||||
int kvmppc_core_init_vm(struct kvm *kvm)
|
||||
static int kvmppc_core_init_vm_pr(struct kvm *kvm)
|
||||
{
|
||||
#ifdef CONFIG_PPC64
|
||||
INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
|
||||
INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
|
||||
#endif
|
||||
mutex_init(&kvm->arch.hpt_mutex);
|
||||
|
||||
if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
|
||||
spin_lock(&kvm_global_user_count_lock);
|
||||
|
@ -1329,7 +1498,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void kvmppc_core_destroy_vm(struct kvm *kvm)
|
||||
static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
|
||||
{
|
||||
#ifdef CONFIG_PPC64
|
||||
WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
|
||||
|
@ -1344,26 +1513,81 @@ void kvmppc_core_destroy_vm(struct kvm *kvm)
|
|||
}
|
||||
}
|
||||
|
||||
static int kvmppc_book3s_init(void)
|
||||
static int kvmppc_core_check_processor_compat_pr(void)
|
||||
{
|
||||
/* we are always compatible */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long kvm_arch_vm_ioctl_pr(struct file *filp,
|
||||
unsigned int ioctl, unsigned long arg)
|
||||
{
|
||||
return -ENOTTY;
|
||||
}
|
||||
|
||||
static struct kvmppc_ops kvm_ops_pr = {
|
||||
.get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
|
||||
.set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
|
||||
.get_one_reg = kvmppc_get_one_reg_pr,
|
||||
.set_one_reg = kvmppc_set_one_reg_pr,
|
||||
.vcpu_load = kvmppc_core_vcpu_load_pr,
|
||||
.vcpu_put = kvmppc_core_vcpu_put_pr,
|
||||
.set_msr = kvmppc_set_msr_pr,
|
||||
.vcpu_run = kvmppc_vcpu_run_pr,
|
||||
.vcpu_create = kvmppc_core_vcpu_create_pr,
|
||||
.vcpu_free = kvmppc_core_vcpu_free_pr,
|
||||
.check_requests = kvmppc_core_check_requests_pr,
|
||||
.get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr,
|
||||
.flush_memslot = kvmppc_core_flush_memslot_pr,
|
||||
.prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
|
||||
.commit_memory_region = kvmppc_core_commit_memory_region_pr,
|
||||
.unmap_hva = kvm_unmap_hva_pr,
|
||||
.unmap_hva_range = kvm_unmap_hva_range_pr,
|
||||
.age_hva = kvm_age_hva_pr,
|
||||
.test_age_hva = kvm_test_age_hva_pr,
|
||||
.set_spte_hva = kvm_set_spte_hva_pr,
|
||||
.mmu_destroy = kvmppc_mmu_destroy_pr,
|
||||
.free_memslot = kvmppc_core_free_memslot_pr,
|
||||
.create_memslot = kvmppc_core_create_memslot_pr,
|
||||
.init_vm = kvmppc_core_init_vm_pr,
|
||||
.destroy_vm = kvmppc_core_destroy_vm_pr,
|
||||
.get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
|
||||
.emulate_op = kvmppc_core_emulate_op_pr,
|
||||
.emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
|
||||
.emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
|
||||
.fast_vcpu_kick = kvm_vcpu_kick,
|
||||
.arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
|
||||
};
|
||||
|
||||
|
||||
int kvmppc_book3s_init_pr(void)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0,
|
||||
THIS_MODULE);
|
||||
|
||||
if (r)
|
||||
r = kvmppc_core_check_processor_compat_pr();
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
r = kvmppc_mmu_hpte_sysinit();
|
||||
kvm_ops_pr.owner = THIS_MODULE;
|
||||
kvmppc_pr_ops = &kvm_ops_pr;
|
||||
|
||||
r = kvmppc_mmu_hpte_sysinit();
|
||||
return r;
|
||||
}
|
||||
|
||||
static void kvmppc_book3s_exit(void)
|
||||
void kvmppc_book3s_exit_pr(void)
|
||||
{
|
||||
kvmppc_pr_ops = NULL;
|
||||
kvmppc_mmu_hpte_sysexit();
|
||||
kvm_exit();
|
||||
}
|
||||
|
||||
module_init(kvmppc_book3s_init);
|
||||
module_exit(kvmppc_book3s_exit);
|
||||
/*
|
||||
* We only support separate modules for book3s 64
|
||||
*/
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
|
||||
module_init(kvmppc_book3s_init_pr);
|
||||
module_exit(kvmppc_book3s_exit_pr);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
#endif
|
||||
|
|
|
@ -21,6 +21,8 @@
|
|||
#include <asm/kvm_ppc.h>
|
||||
#include <asm/kvm_book3s.h>
|
||||
|
||||
#define HPTE_SIZE 16 /* bytes per HPT entry */
|
||||
|
||||
static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index)
|
||||
{
|
||||
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
|
||||
|
@ -40,32 +42,41 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
|
|||
long pte_index = kvmppc_get_gpr(vcpu, 5);
|
||||
unsigned long pteg[2 * 8];
|
||||
unsigned long pteg_addr, i, *hpte;
|
||||
long int ret;
|
||||
|
||||
i = pte_index & 7;
|
||||
pte_index &= ~7UL;
|
||||
pteg_addr = get_pteg_addr(vcpu, pte_index);
|
||||
|
||||
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
|
||||
copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg));
|
||||
hpte = pteg;
|
||||
|
||||
ret = H_PTEG_FULL;
|
||||
if (likely((flags & H_EXACT) == 0)) {
|
||||
pte_index &= ~7UL;
|
||||
for (i = 0; ; ++i) {
|
||||
if (i == 8)
|
||||
return H_PTEG_FULL;
|
||||
goto done;
|
||||
if ((*hpte & HPTE_V_VALID) == 0)
|
||||
break;
|
||||
hpte += 2;
|
||||
}
|
||||
} else {
|
||||
i = kvmppc_get_gpr(vcpu, 5) & 7UL;
|
||||
hpte += i * 2;
|
||||
if (*hpte & HPTE_V_VALID)
|
||||
goto done;
|
||||
}
|
||||
|
||||
hpte[0] = kvmppc_get_gpr(vcpu, 6);
|
||||
hpte[1] = kvmppc_get_gpr(vcpu, 7);
|
||||
copy_to_user((void __user *)pteg_addr, pteg, sizeof(pteg));
|
||||
kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
|
||||
pteg_addr += i * HPTE_SIZE;
|
||||
copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE);
|
||||
kvmppc_set_gpr(vcpu, 4, pte_index | i);
|
||||
ret = H_SUCCESS;
|
||||
|
||||
done:
|
||||
mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
|
||||
kvmppc_set_gpr(vcpu, 3, ret);
|
||||
|
||||
return EMULATE_DONE;
|
||||
}
|
||||
|
@ -77,26 +88,31 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
|
|||
unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
|
||||
unsigned long v = 0, pteg, rb;
|
||||
unsigned long pte[2];
|
||||
long int ret;
|
||||
|
||||
pteg = get_pteg_addr(vcpu, pte_index);
|
||||
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
|
||||
copy_from_user(pte, (void __user *)pteg, sizeof(pte));
|
||||
|
||||
ret = H_NOT_FOUND;
|
||||
if ((pte[0] & HPTE_V_VALID) == 0 ||
|
||||
((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) ||
|
||||
((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) {
|
||||
kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND);
|
||||
return EMULATE_DONE;
|
||||
}
|
||||
((flags & H_ANDCOND) && (pte[0] & avpn) != 0))
|
||||
goto done;
|
||||
|
||||
copy_to_user((void __user *)pteg, &v, sizeof(v));
|
||||
|
||||
rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
|
||||
vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
|
||||
|
||||
kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
|
||||
ret = H_SUCCESS;
|
||||
kvmppc_set_gpr(vcpu, 4, pte[0]);
|
||||
kvmppc_set_gpr(vcpu, 5, pte[1]);
|
||||
|
||||
done:
|
||||
mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
|
||||
kvmppc_set_gpr(vcpu, 3, ret);
|
||||
|
||||
return EMULATE_DONE;
|
||||
}
|
||||
|
||||
|
@ -124,6 +140,7 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
|
|||
int paramnr = 4;
|
||||
int ret = H_SUCCESS;
|
||||
|
||||
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
|
||||
for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
|
||||
unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i));
|
||||
unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1);
|
||||
|
@ -172,6 +189,7 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
|
||||
}
|
||||
mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
|
||||
kvmppc_set_gpr(vcpu, 3, ret);
|
||||
|
||||
return EMULATE_DONE;
|
||||
|
@ -184,15 +202,16 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
|
|||
unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
|
||||
unsigned long rb, pteg, r, v;
|
||||
unsigned long pte[2];
|
||||
long int ret;
|
||||
|
||||
pteg = get_pteg_addr(vcpu, pte_index);
|
||||
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
|
||||
copy_from_user(pte, (void __user *)pteg, sizeof(pte));
|
||||
|
||||
ret = H_NOT_FOUND;
|
||||
if ((pte[0] & HPTE_V_VALID) == 0 ||
|
||||
((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn)) {
|
||||
kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND);
|
||||
return EMULATE_DONE;
|
||||
}
|
||||
((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn))
|
||||
goto done;
|
||||
|
||||
v = pte[0];
|
||||
r = pte[1];
|
||||
|
@ -207,8 +226,11 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
|
|||
rb = compute_tlbie_rb(v, r, pte_index);
|
||||
vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
|
||||
copy_to_user((void __user *)pteg, pte, sizeof(pte));
|
||||
ret = H_SUCCESS;
|
||||
|
||||
kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
|
||||
done:
|
||||
mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
|
||||
kvmppc_set_gpr(vcpu, 3, ret);
|
||||
|
||||
return EMULATE_DONE;
|
||||
}
|
||||
|
|
|
@ -38,32 +38,6 @@
|
|||
|
||||
#define FUNC(name) GLUE(.,name)
|
||||
|
||||
.globl kvmppc_skip_interrupt
|
||||
kvmppc_skip_interrupt:
|
||||
/*
|
||||
* Here all GPRs are unchanged from when the interrupt happened
|
||||
* except for r13, which is saved in SPRG_SCRATCH0.
|
||||
*/
|
||||
mfspr r13, SPRN_SRR0
|
||||
addi r13, r13, 4
|
||||
mtspr SPRN_SRR0, r13
|
||||
GET_SCRATCH0(r13)
|
||||
rfid
|
||||
b .
|
||||
|
||||
.globl kvmppc_skip_Hinterrupt
|
||||
kvmppc_skip_Hinterrupt:
|
||||
/*
|
||||
* Here all GPRs are unchanged from when the interrupt happened
|
||||
* except for r13, which is saved in SPRG_SCRATCH0.
|
||||
*/
|
||||
mfspr r13, SPRN_HSRR0
|
||||
addi r13, r13, 4
|
||||
mtspr SPRN_HSRR0, r13
|
||||
GET_SCRATCH0(r13)
|
||||
hrfid
|
||||
b .
|
||||
|
||||
#elif defined(CONFIG_PPC_BOOK3S_32)
|
||||
|
||||
#define FUNC(name) name
|
||||
|
@ -179,11 +153,15 @@ _GLOBAL(kvmppc_entry_trampoline)
|
|||
|
||||
li r6, MSR_IR | MSR_DR
|
||||
andc r6, r5, r6 /* Clear DR and IR in MSR value */
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
/*
|
||||
* Set EE in HOST_MSR so that it's enabled when we get into our
|
||||
* C exit handler function
|
||||
* C exit handler function. On 64-bit we delay enabling
|
||||
* interrupts until we have finished transferring stuff
|
||||
* to or from the PACA.
|
||||
*/
|
||||
ori r5, r5, MSR_EE
|
||||
#endif
|
||||
mtsrr0 r7
|
||||
mtsrr1 r6
|
||||
RFI
|
||||
|
|
|
@ -260,6 +260,7 @@ fail:
|
|||
*/
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_rtas_hcall);
|
||||
|
||||
void kvmppc_rtas_tokens_free(struct kvm *kvm)
|
||||
{
|
||||
|
|
|
@ -161,8 +161,8 @@ kvmppc_handler_trampoline_enter_end:
|
|||
.global kvmppc_handler_trampoline_exit
|
||||
kvmppc_handler_trampoline_exit:
|
||||
|
||||
.global kvmppc_interrupt
|
||||
kvmppc_interrupt:
|
||||
.global kvmppc_interrupt_pr
|
||||
kvmppc_interrupt_pr:
|
||||
|
||||
/* Register usage at this point:
|
||||
*
|
||||
|
|
|
@ -818,7 +818,7 @@ int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
|
|||
}
|
||||
|
||||
/* Check for real mode returning too hard */
|
||||
if (xics->real_mode)
|
||||
if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm))
|
||||
return kvmppc_xics_rm_complete(vcpu, req);
|
||||
|
||||
switch (req) {
|
||||
|
@ -840,6 +840,7 @@ int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
|
|||
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_xics_hcall);
|
||||
|
||||
|
||||
/* -- Initialisation code etc. -- */
|
||||
|
@ -1250,13 +1251,13 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
|
|||
|
||||
xics_debugfs_init(xics);
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_206)) {
|
||||
/* Enable real mode support */
|
||||
xics->real_mode = ENABLE_REALMODE;
|
||||
xics->real_mode_dbg = DEBUG_REALMODE;
|
||||
}
|
||||
#endif /* CONFIG_KVM_BOOK3S_64_HV */
|
||||
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -40,7 +40,9 @@
|
|||
|
||||
#include "timing.h"
|
||||
#include "booke.h"
|
||||
#include "trace.h"
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "trace_booke.h"
|
||||
|
||||
unsigned long kvmppc_booke_handlers;
|
||||
|
||||
|
@ -133,6 +135,29 @@ static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
|
|||
#endif
|
||||
}
|
||||
|
||||
static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/* Synchronize guest's desire to get debug interrupts into shadow MSR */
|
||||
#ifndef CONFIG_KVM_BOOKE_HV
|
||||
vcpu->arch.shadow_msr &= ~MSR_DE;
|
||||
vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
|
||||
#endif
|
||||
|
||||
/* Force enable debug interrupts when user space wants to debug */
|
||||
if (vcpu->guest_debug) {
|
||||
#ifdef CONFIG_KVM_BOOKE_HV
|
||||
/*
|
||||
* Since there is no shadow MSR, sync MSR_DE into the guest
|
||||
* visible MSR.
|
||||
*/
|
||||
vcpu->arch.shared->msr |= MSR_DE;
|
||||
#else
|
||||
vcpu->arch.shadow_msr |= MSR_DE;
|
||||
vcpu->arch.shared->msr &= ~MSR_DE;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper function for "full" MSR writes. No need to call this if only
|
||||
* EE/CE/ME/DE/RI are changing.
|
||||
|
@ -150,6 +175,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
|
|||
kvmppc_mmu_msr_notify(vcpu, old_msr);
|
||||
kvmppc_vcpu_sync_spe(vcpu);
|
||||
kvmppc_vcpu_sync_fpu(vcpu);
|
||||
kvmppc_vcpu_sync_debug(vcpu);
|
||||
}
|
||||
|
||||
static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
|
||||
|
@ -655,6 +681,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
|
|||
int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret, s;
|
||||
struct thread_struct thread;
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
unsigned int fpscr;
|
||||
int fpexc_mode;
|
||||
|
@ -696,6 +723,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
|||
kvmppc_load_guest_fp(vcpu);
|
||||
#endif
|
||||
|
||||
/* Switch to guest debug context */
|
||||
thread.debug = vcpu->arch.shadow_dbg_reg;
|
||||
switch_booke_debug_regs(&thread);
|
||||
thread.debug = current->thread.debug;
|
||||
current->thread.debug = vcpu->arch.shadow_dbg_reg;
|
||||
|
||||
kvmppc_fix_ee_before_entry();
|
||||
|
||||
ret = __kvmppc_vcpu_run(kvm_run, vcpu);
|
||||
|
@ -703,6 +736,10 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
|||
/* No need for kvm_guest_exit. It's done in handle_exit.
|
||||
We also get here with interrupts enabled. */
|
||||
|
||||
/* Switch back to user space debug context */
|
||||
switch_booke_debug_regs(&thread);
|
||||
current->thread.debug = thread.debug;
|
||||
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
kvmppc_save_guest_fp(vcpu);
|
||||
|
||||
|
@ -758,6 +795,30 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||
}
|
||||
}
|
||||
|
||||
static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct debug_reg *dbg_reg = &(vcpu->arch.shadow_dbg_reg);
|
||||
u32 dbsr = vcpu->arch.dbsr;
|
||||
|
||||
run->debug.arch.status = 0;
|
||||
run->debug.arch.address = vcpu->arch.pc;
|
||||
|
||||
if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
|
||||
run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
|
||||
} else {
|
||||
if (dbsr & (DBSR_DAC1W | DBSR_DAC2W))
|
||||
run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE;
|
||||
else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R))
|
||||
run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ;
|
||||
if (dbsr & (DBSR_DAC1R | DBSR_DAC1W))
|
||||
run->debug.arch.address = dbg_reg->dac1;
|
||||
else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W))
|
||||
run->debug.arch.address = dbg_reg->dac2;
|
||||
}
|
||||
|
||||
return RESUME_HOST;
|
||||
}
|
||||
|
||||
static void kvmppc_fill_pt_regs(struct pt_regs *regs)
|
||||
{
|
||||
ulong r1, ip, msr, lr;
|
||||
|
@ -818,6 +879,11 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
|
|||
case BOOKE_INTERRUPT_CRITICAL:
|
||||
unknown_exception(®s);
|
||||
break;
|
||||
case BOOKE_INTERRUPT_DEBUG:
|
||||
/* Save DBSR before preemption is enabled */
|
||||
vcpu->arch.dbsr = mfspr(SPRN_DBSR);
|
||||
kvmppc_clear_dbsr();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1135,18 +1201,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
}
|
||||
|
||||
case BOOKE_INTERRUPT_DEBUG: {
|
||||
u32 dbsr;
|
||||
|
||||
vcpu->arch.pc = mfspr(SPRN_CSRR0);
|
||||
|
||||
/* clear IAC events in DBSR register */
|
||||
dbsr = mfspr(SPRN_DBSR);
|
||||
dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
|
||||
mtspr(SPRN_DBSR, dbsr);
|
||||
|
||||
run->exit_reason = KVM_EXIT_DEBUG;
|
||||
r = kvmppc_handle_debug(run, vcpu);
|
||||
if (r == RESUME_HOST)
|
||||
run->exit_reason = KVM_EXIT_DEBUG;
|
||||
kvmppc_account_exit(vcpu, DEBUG_EXITS);
|
||||
r = RESUME_HOST;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1197,7 +1255,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
|||
kvmppc_set_msr(vcpu, 0);
|
||||
|
||||
#ifndef CONFIG_KVM_BOOKE_HV
|
||||
vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
|
||||
vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
|
||||
vcpu->arch.shadow_pid = 1;
|
||||
vcpu->arch.shared->msr = 0;
|
||||
#endif
|
||||
|
@ -1359,7 +1417,7 @@ static int set_sregs_arch206(struct kvm_vcpu *vcpu,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
|
||||
int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
|
||||
{
|
||||
sregs->u.e.features |= KVM_SREGS_E_IVOR;
|
||||
|
||||
|
@ -1379,6 +1437,7 @@ void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
|
|||
sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
|
||||
sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
|
||||
sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
|
||||
|
@ -1413,8 +1472,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
|
|||
|
||||
get_sregs_base(vcpu, sregs);
|
||||
get_sregs_arch206(vcpu, sregs);
|
||||
kvmppc_core_get_sregs(vcpu, sregs);
|
||||
return 0;
|
||||
return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
|
||||
|
@ -1433,7 +1491,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return kvmppc_core_set_sregs(vcpu, sregs);
|
||||
return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
|
||||
}
|
||||
|
||||
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
||||
|
@ -1441,7 +1499,6 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
|||
int r = 0;
|
||||
union kvmppc_one_reg val;
|
||||
int size;
|
||||
long int i;
|
||||
|
||||
size = one_reg_size(reg->id);
|
||||
if (size > sizeof(val))
|
||||
|
@ -1449,16 +1506,24 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
|||
|
||||
switch (reg->id) {
|
||||
case KVM_REG_PPC_IAC1:
|
||||
case KVM_REG_PPC_IAC2:
|
||||
case KVM_REG_PPC_IAC3:
|
||||
case KVM_REG_PPC_IAC4:
|
||||
i = reg->id - KVM_REG_PPC_IAC1;
|
||||
val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac[i]);
|
||||
val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac1);
|
||||
break;
|
||||
case KVM_REG_PPC_IAC2:
|
||||
val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac2);
|
||||
break;
|
||||
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
|
||||
case KVM_REG_PPC_IAC3:
|
||||
val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac3);
|
||||
break;
|
||||
case KVM_REG_PPC_IAC4:
|
||||
val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac4);
|
||||
break;
|
||||
#endif
|
||||
case KVM_REG_PPC_DAC1:
|
||||
val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac1);
|
||||
break;
|
||||
case KVM_REG_PPC_DAC2:
|
||||
i = reg->id - KVM_REG_PPC_DAC1;
|
||||
val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac[i]);
|
||||
val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac2);
|
||||
break;
|
||||
case KVM_REG_PPC_EPR: {
|
||||
u32 epr = get_guest_epr(vcpu);
|
||||
|
@ -1477,10 +1542,13 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
|||
val = get_reg_val(reg->id, vcpu->arch.tsr);
|
||||
break;
|
||||
case KVM_REG_PPC_DEBUG_INST:
|
||||
val = get_reg_val(reg->id, KVMPPC_INST_EHPRIV);
|
||||
val = get_reg_val(reg->id, KVMPPC_INST_EHPRIV_DEBUG);
|
||||
break;
|
||||
case KVM_REG_PPC_VRSAVE:
|
||||
val = get_reg_val(reg->id, vcpu->arch.vrsave);
|
||||
break;
|
||||
default:
|
||||
r = kvmppc_get_one_reg(vcpu, reg->id, &val);
|
||||
r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1498,7 +1566,6 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
|||
int r = 0;
|
||||
union kvmppc_one_reg val;
|
||||
int size;
|
||||
long int i;
|
||||
|
||||
size = one_reg_size(reg->id);
|
||||
if (size > sizeof(val))
|
||||
|
@ -1509,16 +1576,24 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
|||
|
||||
switch (reg->id) {
|
||||
case KVM_REG_PPC_IAC1:
|
||||
case KVM_REG_PPC_IAC2:
|
||||
case KVM_REG_PPC_IAC3:
|
||||
case KVM_REG_PPC_IAC4:
|
||||
i = reg->id - KVM_REG_PPC_IAC1;
|
||||
vcpu->arch.dbg_reg.iac[i] = set_reg_val(reg->id, val);
|
||||
vcpu->arch.dbg_reg.iac1 = set_reg_val(reg->id, val);
|
||||
break;
|
||||
case KVM_REG_PPC_IAC2:
|
||||
vcpu->arch.dbg_reg.iac2 = set_reg_val(reg->id, val);
|
||||
break;
|
||||
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
|
||||
case KVM_REG_PPC_IAC3:
|
||||
vcpu->arch.dbg_reg.iac3 = set_reg_val(reg->id, val);
|
||||
break;
|
||||
case KVM_REG_PPC_IAC4:
|
||||
vcpu->arch.dbg_reg.iac4 = set_reg_val(reg->id, val);
|
||||
break;
|
||||
#endif
|
||||
case KVM_REG_PPC_DAC1:
|
||||
vcpu->arch.dbg_reg.dac1 = set_reg_val(reg->id, val);
|
||||
break;
|
||||
case KVM_REG_PPC_DAC2:
|
||||
i = reg->id - KVM_REG_PPC_DAC1;
|
||||
vcpu->arch.dbg_reg.dac[i] = set_reg_val(reg->id, val);
|
||||
vcpu->arch.dbg_reg.dac2 = set_reg_val(reg->id, val);
|
||||
break;
|
||||
case KVM_REG_PPC_EPR: {
|
||||
u32 new_epr = set_reg_val(reg->id, val);
|
||||
|
@ -1552,20 +1627,17 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
|||
kvmppc_set_tcr(vcpu, tcr);
|
||||
break;
|
||||
}
|
||||
case KVM_REG_PPC_VRSAVE:
|
||||
vcpu->arch.vrsave = set_reg_val(reg->id, val);
|
||||
break;
|
||||
default:
|
||||
r = kvmppc_set_one_reg(vcpu, reg->id, &val);
|
||||
r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val);
|
||||
break;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
|
||||
struct kvm_guest_debug *dbg)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
|
@ -1590,12 +1662,12 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
|
|||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
|
||||
void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
|
||||
struct kvm_memory_slot *dont)
|
||||
{
|
||||
}
|
||||
|
||||
int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
|
||||
int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||
unsigned long npages)
|
||||
{
|
||||
return 0;
|
||||
|
@ -1671,6 +1743,157 @@ void kvmppc_decrementer_func(unsigned long data)
|
|||
kvmppc_set_tsr_bits(vcpu, TSR_DIS);
|
||||
}
|
||||
|
||||
static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg,
|
||||
uint64_t addr, int index)
|
||||
{
|
||||
switch (index) {
|
||||
case 0:
|
||||
dbg_reg->dbcr0 |= DBCR0_IAC1;
|
||||
dbg_reg->iac1 = addr;
|
||||
break;
|
||||
case 1:
|
||||
dbg_reg->dbcr0 |= DBCR0_IAC2;
|
||||
dbg_reg->iac2 = addr;
|
||||
break;
|
||||
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
|
||||
case 2:
|
||||
dbg_reg->dbcr0 |= DBCR0_IAC3;
|
||||
dbg_reg->iac3 = addr;
|
||||
break;
|
||||
case 3:
|
||||
dbg_reg->dbcr0 |= DBCR0_IAC4;
|
||||
dbg_reg->iac4 = addr;
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dbg_reg->dbcr0 |= DBCR0_IDM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr,
|
||||
int type, int index)
|
||||
{
|
||||
switch (index) {
|
||||
case 0:
|
||||
if (type & KVMPPC_DEBUG_WATCH_READ)
|
||||
dbg_reg->dbcr0 |= DBCR0_DAC1R;
|
||||
if (type & KVMPPC_DEBUG_WATCH_WRITE)
|
||||
dbg_reg->dbcr0 |= DBCR0_DAC1W;
|
||||
dbg_reg->dac1 = addr;
|
||||
break;
|
||||
case 1:
|
||||
if (type & KVMPPC_DEBUG_WATCH_READ)
|
||||
dbg_reg->dbcr0 |= DBCR0_DAC2R;
|
||||
if (type & KVMPPC_DEBUG_WATCH_WRITE)
|
||||
dbg_reg->dbcr0 |= DBCR0_DAC2W;
|
||||
dbg_reg->dac2 = addr;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dbg_reg->dbcr0 |= DBCR0_IDM;
|
||||
return 0;
|
||||
}
|
||||
void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
|
||||
{
|
||||
/* XXX: Add similar MSR protection for BookE-PR */
|
||||
#ifdef CONFIG_KVM_BOOKE_HV
|
||||
BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP));
|
||||
if (set) {
|
||||
if (prot_bitmap & MSR_UCLE)
|
||||
vcpu->arch.shadow_msrp |= MSRP_UCLEP;
|
||||
if (prot_bitmap & MSR_DE)
|
||||
vcpu->arch.shadow_msrp |= MSRP_DEP;
|
||||
if (prot_bitmap & MSR_PMM)
|
||||
vcpu->arch.shadow_msrp |= MSRP_PMMP;
|
||||
} else {
|
||||
if (prot_bitmap & MSR_UCLE)
|
||||
vcpu->arch.shadow_msrp &= ~MSRP_UCLEP;
|
||||
if (prot_bitmap & MSR_DE)
|
||||
vcpu->arch.shadow_msrp &= ~MSRP_DEP;
|
||||
if (prot_bitmap & MSR_PMM)
|
||||
vcpu->arch.shadow_msrp &= ~MSRP_PMMP;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
|
||||
struct kvm_guest_debug *dbg)
|
||||
{
|
||||
struct debug_reg *dbg_reg;
|
||||
int n, b = 0, w = 0;
|
||||
|
||||
if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
|
||||
vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
|
||||
vcpu->guest_debug = 0;
|
||||
kvm_guest_protect_msr(vcpu, MSR_DE, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
kvm_guest_protect_msr(vcpu, MSR_DE, true);
|
||||
vcpu->guest_debug = dbg->control;
|
||||
vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
|
||||
/* Set DBCR0_EDM in guest visible DBCR0 register. */
|
||||
vcpu->arch.dbg_reg.dbcr0 = DBCR0_EDM;
|
||||
|
||||
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
|
||||
vcpu->arch.shadow_dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
|
||||
|
||||
/* Code below handles only HW breakpoints */
|
||||
dbg_reg = &(vcpu->arch.shadow_dbg_reg);
|
||||
|
||||
#ifdef CONFIG_KVM_BOOKE_HV
|
||||
/*
|
||||
* On BookE-HV (e500mc) the guest is always executed with MSR.GS=1
|
||||
* DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0
|
||||
*/
|
||||
dbg_reg->dbcr1 = 0;
|
||||
dbg_reg->dbcr2 = 0;
|
||||
#else
|
||||
/*
|
||||
* On BookE-PR (e500v2) the guest is always executed with MSR.PR=1
|
||||
* We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR
|
||||
* is set.
|
||||
*/
|
||||
dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US |
|
||||
DBCR1_IAC4US;
|
||||
dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
|
||||
#endif
|
||||
|
||||
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
|
||||
return 0;
|
||||
|
||||
for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) {
|
||||
uint64_t addr = dbg->arch.bp[n].addr;
|
||||
uint32_t type = dbg->arch.bp[n].type;
|
||||
|
||||
if (type == KVMPPC_DEBUG_NONE)
|
||||
continue;
|
||||
|
||||
if (type & !(KVMPPC_DEBUG_WATCH_READ |
|
||||
KVMPPC_DEBUG_WATCH_WRITE |
|
||||
KVMPPC_DEBUG_BREAKPOINT))
|
||||
return -EINVAL;
|
||||
|
||||
if (type & KVMPPC_DEBUG_BREAKPOINT) {
|
||||
/* Setting H/W breakpoint */
|
||||
if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++))
|
||||
return -EINVAL;
|
||||
} else {
|
||||
/* Setting H/W watchpoint */
|
||||
if (kvmppc_booke_add_watchpoint(dbg_reg, addr,
|
||||
type, w++))
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
vcpu->cpu = smp_processor_id();
|
||||
|
@ -1681,6 +1904,44 @@ void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
current->thread.kvm_vcpu = NULL;
|
||||
vcpu->cpu = -1;
|
||||
|
||||
/* Clear pending debug event in DBSR */
|
||||
kvmppc_clear_dbsr();
|
||||
}
|
||||
|
||||
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
|
||||
}
|
||||
|
||||
int kvmppc_core_init_vm(struct kvm *kvm)
|
||||
{
|
||||
return kvm->arch.kvm_ops->init_vm(kvm);
|
||||
}
|
||||
|
||||
struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
|
||||
{
|
||||
return kvm->arch.kvm_ops->vcpu_create(kvm, id);
|
||||
}
|
||||
|
||||
void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
|
||||
}
|
||||
|
||||
void kvmppc_core_destroy_vm(struct kvm *kvm)
|
||||
{
|
||||
kvm->arch.kvm_ops->destroy_vm(kvm);
|
||||
}
|
||||
|
||||
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
|
||||
}
|
||||
|
||||
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
|
||||
}
|
||||
|
||||
int __init kvmppc_booke_init(void)
|
||||
|
|
|
@ -99,6 +99,30 @@ enum int_class {
|
|||
|
||||
void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type);
|
||||
|
||||
extern void kvmppc_mmu_destroy_44x(struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int inst, int *advance);
|
||||
extern int kvmppc_core_emulate_mtspr_44x(struct kvm_vcpu *vcpu, int sprn,
|
||||
ulong spr_val);
|
||||
extern int kvmppc_core_emulate_mfspr_44x(struct kvm_vcpu *vcpu, int sprn,
|
||||
ulong *spr_val);
|
||||
extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_core_emulate_op_e500(struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu,
|
||||
unsigned int inst, int *advance);
|
||||
extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
|
||||
ulong spr_val);
|
||||
extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
|
||||
ulong *spr_val);
|
||||
extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_core_emulate_op_e500(struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu,
|
||||
unsigned int inst, int *advance);
|
||||
extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
|
||||
ulong spr_val);
|
||||
extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
|
||||
ulong *spr_val);
|
||||
|
||||
/*
|
||||
* Load up guest vcpu FP state if it's needed.
|
||||
* It also set the MSR_FP in thread so that host know
|
||||
|
@ -129,4 +153,9 @@ static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
|
|||
giveup_fpu(current);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void kvmppc_clear_dbsr(void)
|
||||
{
|
||||
mtspr(SPRN_DBSR, mfspr(SPRN_DBSR));
|
||||
}
|
||||
#endif /* __KVM_BOOKE_H__ */
|
||||
|
|
|
@ -305,7 +305,7 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
}
|
||||
|
||||
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
static void kvmppc_core_vcpu_load_e500(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
kvmppc_booke_vcpu_load(vcpu, cpu);
|
||||
|
||||
|
@ -313,7 +313,7 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||
kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
|
||||
}
|
||||
|
||||
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
static void kvmppc_core_vcpu_put_e500(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
#ifdef CONFIG_SPE
|
||||
if (vcpu->arch.shadow_msr & MSR_SPE)
|
||||
|
@ -367,7 +367,8 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
|
||||
static int kvmppc_core_get_sregs_e500(struct kvm_vcpu *vcpu,
|
||||
struct kvm_sregs *sregs)
|
||||
{
|
||||
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
|
||||
|
||||
|
@ -388,9 +389,11 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
|
|||
|
||||
kvmppc_get_sregs_ivor(vcpu, sregs);
|
||||
kvmppc_get_sregs_e500_tlb(vcpu, sregs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
|
||||
static int kvmppc_core_set_sregs_e500(struct kvm_vcpu *vcpu,
|
||||
struct kvm_sregs *sregs)
|
||||
{
|
||||
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
|
||||
int ret;
|
||||
|
@ -425,21 +428,22 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
|
|||
return kvmppc_set_sregs_ivor(vcpu, sregs);
|
||||
}
|
||||
|
||||
int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
|
||||
union kvmppc_one_reg *val)
|
||||
static int kvmppc_get_one_reg_e500(struct kvm_vcpu *vcpu, u64 id,
|
||||
union kvmppc_one_reg *val)
|
||||
{
|
||||
int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
|
||||
return r;
|
||||
}
|
||||
|
||||
int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
|
||||
union kvmppc_one_reg *val)
|
||||
static int kvmppc_set_one_reg_e500(struct kvm_vcpu *vcpu, u64 id,
|
||||
union kvmppc_one_reg *val)
|
||||
{
|
||||
int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
|
||||
return r;
|
||||
}
|
||||
|
||||
struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
|
||||
static struct kvm_vcpu *kvmppc_core_vcpu_create_e500(struct kvm *kvm,
|
||||
unsigned int id)
|
||||
{
|
||||
struct kvmppc_vcpu_e500 *vcpu_e500;
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
@ -481,7 +485,7 @@ out:
|
|||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
|
||||
static void kvmppc_core_vcpu_free_e500(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
|
||||
|
||||
|
@ -492,15 +496,32 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
|
|||
kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
|
||||
}
|
||||
|
||||
int kvmppc_core_init_vm(struct kvm *kvm)
|
||||
static int kvmppc_core_init_vm_e500(struct kvm *kvm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvmppc_core_destroy_vm(struct kvm *kvm)
|
||||
static void kvmppc_core_destroy_vm_e500(struct kvm *kvm)
|
||||
{
|
||||
}
|
||||
|
||||
static struct kvmppc_ops kvm_ops_e500 = {
|
||||
.get_sregs = kvmppc_core_get_sregs_e500,
|
||||
.set_sregs = kvmppc_core_set_sregs_e500,
|
||||
.get_one_reg = kvmppc_get_one_reg_e500,
|
||||
.set_one_reg = kvmppc_set_one_reg_e500,
|
||||
.vcpu_load = kvmppc_core_vcpu_load_e500,
|
||||
.vcpu_put = kvmppc_core_vcpu_put_e500,
|
||||
.vcpu_create = kvmppc_core_vcpu_create_e500,
|
||||
.vcpu_free = kvmppc_core_vcpu_free_e500,
|
||||
.mmu_destroy = kvmppc_mmu_destroy_e500,
|
||||
.init_vm = kvmppc_core_init_vm_e500,
|
||||
.destroy_vm = kvmppc_core_destroy_vm_e500,
|
||||
.emulate_op = kvmppc_core_emulate_op_e500,
|
||||
.emulate_mtspr = kvmppc_core_emulate_mtspr_e500,
|
||||
.emulate_mfspr = kvmppc_core_emulate_mfspr_e500,
|
||||
};
|
||||
|
||||
static int __init kvmppc_e500_init(void)
|
||||
{
|
||||
int r, i;
|
||||
|
@ -512,11 +533,11 @@ static int __init kvmppc_e500_init(void)
|
|||
|
||||
r = kvmppc_core_check_processor_compat();
|
||||
if (r)
|
||||
return r;
|
||||
goto err_out;
|
||||
|
||||
r = kvmppc_booke_init();
|
||||
if (r)
|
||||
return r;
|
||||
goto err_out;
|
||||
|
||||
/* copy extra E500 exception handlers */
|
||||
ivor[0] = mfspr(SPRN_IVOR32);
|
||||
|
@ -534,11 +555,19 @@ static int __init kvmppc_e500_init(void)
|
|||
flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
|
||||
ivor[max_ivor] + handler_len);
|
||||
|
||||
return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
|
||||
r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
|
||||
if (r)
|
||||
goto err_out;
|
||||
kvm_ops_e500.owner = THIS_MODULE;
|
||||
kvmppc_pr_ops = &kvm_ops_e500;
|
||||
|
||||
err_out:
|
||||
return r;
|
||||
}
|
||||
|
||||
static void __exit kvmppc_e500_exit(void)
|
||||
{
|
||||
kvmppc_pr_ops = NULL;
|
||||
kvmppc_booke_exit();
|
||||
}
|
||||
|
||||
|
|
|
@ -117,7 +117,7 @@ static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
|
|||
#define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW)
|
||||
#define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW)
|
||||
#define MAS2_ATTRIB_MASK \
|
||||
(MAS2_X0 | MAS2_X1)
|
||||
(MAS2_X0 | MAS2_X1 | MAS2_E | MAS2_G)
|
||||
#define MAS3_ATTRIB_MASK \
|
||||
(MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \
|
||||
| E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK)
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#define XOP_TLBRE 946
|
||||
#define XOP_TLBWE 978
|
||||
#define XOP_TLBILX 18
|
||||
#define XOP_EHPRIV 270
|
||||
|
||||
#ifdef CONFIG_KVM_E500MC
|
||||
static int dbell2prio(ulong param)
|
||||
|
@ -82,8 +83,28 @@ static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
|
|||
}
|
||||
#endif
|
||||
|
||||
int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int inst, int *advance)
|
||||
static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int inst, int *advance)
|
||||
{
|
||||
int emulated = EMULATE_DONE;
|
||||
|
||||
switch (get_oc(inst)) {
|
||||
case EHPRIV_OC_DEBUG:
|
||||
run->exit_reason = KVM_EXIT_DEBUG;
|
||||
run->debug.arch.address = vcpu->arch.pc;
|
||||
run->debug.arch.status = 0;
|
||||
kvmppc_account_exit(vcpu, DEBUG_EXITS);
|
||||
emulated = EMULATE_EXIT_USER;
|
||||
*advance = 0;
|
||||
break;
|
||||
default:
|
||||
emulated = EMULATE_FAIL;
|
||||
}
|
||||
return emulated;
|
||||
}
|
||||
|
||||
int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int inst, int *advance)
|
||||
{
|
||||
int emulated = EMULATE_DONE;
|
||||
int ra = get_ra(inst);
|
||||
|
@ -130,6 +151,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
emulated = kvmppc_e500_emul_tlbivax(vcpu, ea);
|
||||
break;
|
||||
|
||||
case XOP_EHPRIV:
|
||||
emulated = kvmppc_e500_emul_ehpriv(run, vcpu, inst,
|
||||
advance);
|
||||
break;
|
||||
|
||||
default:
|
||||
emulated = EMULATE_FAIL;
|
||||
}
|
||||
|
@ -146,7 +172,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
return emulated;
|
||||
}
|
||||
|
||||
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
|
||||
int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
|
||||
{
|
||||
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
|
||||
int emulated = EMULATE_DONE;
|
||||
|
@ -237,7 +263,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
|
|||
return emulated;
|
||||
}
|
||||
|
||||
int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
|
||||
int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
|
||||
{
|
||||
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
|
||||
int emulated = EMULATE_DONE;
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
#include <asm/kvm_ppc.h>
|
||||
|
||||
#include "e500.h"
|
||||
#include "trace.h"
|
||||
#include "trace_booke.h"
|
||||
#include "timing.h"
|
||||
#include "e500_mmu_host.h"
|
||||
|
||||
|
@ -536,7 +536,7 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
|
|||
return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
|
||||
}
|
||||
|
||||
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
|
||||
void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -32,10 +32,11 @@
|
|||
#include <asm/kvm_ppc.h>
|
||||
|
||||
#include "e500.h"
|
||||
#include "trace.h"
|
||||
#include "timing.h"
|
||||
#include "e500_mmu_host.h"
|
||||
|
||||
#include "trace_booke.h"
|
||||
|
||||
#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
|
||||
|
||||
static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
|
||||
|
@ -253,6 +254,9 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
|
|||
ref->pfn = pfn;
|
||||
ref->flags |= E500_TLB_VALID;
|
||||
|
||||
/* Mark the page accessed */
|
||||
kvm_set_pfn_accessed(pfn);
|
||||
|
||||
if (tlbe_is_writable(gtlbe))
|
||||
kvm_set_pfn_dirty(pfn);
|
||||
}
|
||||
|
|
|
@ -110,7 +110,7 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
|
|||
|
||||
static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu);
|
||||
|
||||
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
|
||||
|
||||
|
@ -147,7 +147,7 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||
kvmppc_load_guest_fp(vcpu);
|
||||
}
|
||||
|
||||
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.eplc = mfspr(SPRN_EPLC);
|
||||
vcpu->arch.epsc = mfspr(SPRN_EPSC);
|
||||
|
@ -204,7 +204,8 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
|
||||
static int kvmppc_core_get_sregs_e500mc(struct kvm_vcpu *vcpu,
|
||||
struct kvm_sregs *sregs)
|
||||
{
|
||||
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
|
||||
|
||||
|
@ -224,10 +225,11 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
|
|||
sregs->u.e.ivor_high[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
|
||||
sregs->u.e.ivor_high[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
|
||||
|
||||
kvmppc_get_sregs_ivor(vcpu, sregs);
|
||||
return kvmppc_get_sregs_ivor(vcpu, sregs);
|
||||
}
|
||||
|
||||
int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
|
||||
static int kvmppc_core_set_sregs_e500mc(struct kvm_vcpu *vcpu,
|
||||
struct kvm_sregs *sregs)
|
||||
{
|
||||
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
|
||||
int ret;
|
||||
|
@ -260,21 +262,22 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
|
|||
return kvmppc_set_sregs_ivor(vcpu, sregs);
|
||||
}
|
||||
|
||||
int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
|
||||
union kvmppc_one_reg *val)
|
||||
static int kvmppc_get_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id,
|
||||
union kvmppc_one_reg *val)
|
||||
{
|
||||
int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
|
||||
return r;
|
||||
}
|
||||
|
||||
int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
|
||||
union kvmppc_one_reg *val)
|
||||
static int kvmppc_set_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id,
|
||||
union kvmppc_one_reg *val)
|
||||
{
|
||||
int r = kvmppc_set_one_reg_e500_tlb(vcpu, id, val);
|
||||
return r;
|
||||
}
|
||||
|
||||
struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
|
||||
static struct kvm_vcpu *kvmppc_core_vcpu_create_e500mc(struct kvm *kvm,
|
||||
unsigned int id)
|
||||
{
|
||||
struct kvmppc_vcpu_e500 *vcpu_e500;
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
@ -315,7 +318,7 @@ out:
|
|||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
|
||||
static void kvmppc_core_vcpu_free_e500mc(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
|
||||
|
||||
|
@ -325,7 +328,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
|
|||
kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
|
||||
}
|
||||
|
||||
int kvmppc_core_init_vm(struct kvm *kvm)
|
||||
static int kvmppc_core_init_vm_e500mc(struct kvm *kvm)
|
||||
{
|
||||
int lpid;
|
||||
|
||||
|
@ -337,27 +340,52 @@ int kvmppc_core_init_vm(struct kvm *kvm)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void kvmppc_core_destroy_vm(struct kvm *kvm)
|
||||
static void kvmppc_core_destroy_vm_e500mc(struct kvm *kvm)
|
||||
{
|
||||
kvmppc_free_lpid(kvm->arch.lpid);
|
||||
}
|
||||
|
||||
static struct kvmppc_ops kvm_ops_e500mc = {
|
||||
.get_sregs = kvmppc_core_get_sregs_e500mc,
|
||||
.set_sregs = kvmppc_core_set_sregs_e500mc,
|
||||
.get_one_reg = kvmppc_get_one_reg_e500mc,
|
||||
.set_one_reg = kvmppc_set_one_reg_e500mc,
|
||||
.vcpu_load = kvmppc_core_vcpu_load_e500mc,
|
||||
.vcpu_put = kvmppc_core_vcpu_put_e500mc,
|
||||
.vcpu_create = kvmppc_core_vcpu_create_e500mc,
|
||||
.vcpu_free = kvmppc_core_vcpu_free_e500mc,
|
||||
.mmu_destroy = kvmppc_mmu_destroy_e500,
|
||||
.init_vm = kvmppc_core_init_vm_e500mc,
|
||||
.destroy_vm = kvmppc_core_destroy_vm_e500mc,
|
||||
.emulate_op = kvmppc_core_emulate_op_e500,
|
||||
.emulate_mtspr = kvmppc_core_emulate_mtspr_e500,
|
||||
.emulate_mfspr = kvmppc_core_emulate_mfspr_e500,
|
||||
};
|
||||
|
||||
static int __init kvmppc_e500mc_init(void)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = kvmppc_booke_init();
|
||||
if (r)
|
||||
return r;
|
||||
goto err_out;
|
||||
|
||||
kvmppc_init_lpid(64);
|
||||
kvmppc_claim_lpid(0); /* host */
|
||||
|
||||
return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
|
||||
r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
|
||||
if (r)
|
||||
goto err_out;
|
||||
kvm_ops_e500mc.owner = THIS_MODULE;
|
||||
kvmppc_pr_ops = &kvm_ops_e500mc;
|
||||
|
||||
err_out:
|
||||
return r;
|
||||
}
|
||||
|
||||
static void __exit kvmppc_e500mc_exit(void)
|
||||
{
|
||||
kvmppc_pr_ops = NULL;
|
||||
kvmppc_booke_exit();
|
||||
}
|
||||
|
||||
|
|
|
@ -130,8 +130,8 @@ static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
|
|||
case SPRN_PIR: break;
|
||||
|
||||
default:
|
||||
emulated = kvmppc_core_emulate_mtspr(vcpu, sprn,
|
||||
spr_val);
|
||||
emulated = vcpu->kvm->arch.kvm_ops->emulate_mtspr(vcpu, sprn,
|
||||
spr_val);
|
||||
if (emulated == EMULATE_FAIL)
|
||||
printk(KERN_INFO "mtspr: unknown spr "
|
||||
"0x%x\n", sprn);
|
||||
|
@ -191,8 +191,8 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
|
|||
spr_val = kvmppc_get_dec(vcpu, get_tb());
|
||||
break;
|
||||
default:
|
||||
emulated = kvmppc_core_emulate_mfspr(vcpu, sprn,
|
||||
&spr_val);
|
||||
emulated = vcpu->kvm->arch.kvm_ops->emulate_mfspr(vcpu, sprn,
|
||||
&spr_val);
|
||||
if (unlikely(emulated == EMULATE_FAIL)) {
|
||||
printk(KERN_INFO "mfspr: unknown spr "
|
||||
"0x%x\n", sprn);
|
||||
|
@ -464,7 +464,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||
}
|
||||
|
||||
if (emulated == EMULATE_FAIL) {
|
||||
emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
|
||||
emulated = vcpu->kvm->arch.kvm_ops->emulate_op(run, vcpu, inst,
|
||||
&advance);
|
||||
if (emulated == EMULATE_AGAIN) {
|
||||
advance = 0;
|
||||
} else if (emulated == EMULATE_FAIL) {
|
||||
|
@ -483,3 +484,4 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||
|
||||
return emulated;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_emulate_instruction);
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <linux/fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/cputable.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/kvm_ppc.h>
|
||||
|
@ -39,6 +40,12 @@
|
|||
#define CREATE_TRACE_POINTS
|
||||
#include "trace.h"
|
||||
|
||||
struct kvmppc_ops *kvmppc_hv_ops;
|
||||
EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
|
||||
struct kvmppc_ops *kvmppc_pr_ops;
|
||||
EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
|
||||
|
||||
|
||||
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
|
||||
{
|
||||
return !!(v->arch.pending_exceptions) ||
|
||||
|
@ -50,7 +57,6 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
|
|||
return 1;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_KVM_BOOK3S_64_HV
|
||||
/*
|
||||
* Common checks before entering the guest world. Call with interrupts
|
||||
* disabled.
|
||||
|
@ -125,7 +131,7 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
|
|||
|
||||
return r;
|
||||
}
|
||||
#endif /* CONFIG_KVM_BOOK3S_64_HV */
|
||||
EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
|
||||
|
||||
int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
@ -179,6 +185,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
|
|||
|
||||
return r;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
|
||||
|
||||
int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
@ -192,11 +199,9 @@ int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
|
|||
if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
|
||||
goto out;
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
/* HV KVM can only do PAPR mode for now */
|
||||
if (!vcpu->arch.papr_enabled)
|
||||
if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
|
||||
goto out;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KVM_BOOKE_HV
|
||||
if (!cpu_has_feature(CPU_FTR_EMB_HV))
|
||||
|
@ -209,6 +214,7 @@ out:
|
|||
vcpu->arch.sane = r;
|
||||
return r ? 0 : -EINVAL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
|
||||
|
||||
int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
@ -243,6 +249,7 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||
|
||||
return r;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
|
||||
|
||||
int kvm_arch_hardware_enable(void *garbage)
|
||||
{
|
||||
|
@ -269,10 +276,35 @@ void kvm_arch_check_processor_compat(void *rtn)
|
|||
|
||||
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
{
|
||||
if (type)
|
||||
return -EINVAL;
|
||||
struct kvmppc_ops *kvm_ops = NULL;
|
||||
/*
|
||||
* if we have both HV and PR enabled, default is HV
|
||||
*/
|
||||
if (type == 0) {
|
||||
if (kvmppc_hv_ops)
|
||||
kvm_ops = kvmppc_hv_ops;
|
||||
else
|
||||
kvm_ops = kvmppc_pr_ops;
|
||||
if (!kvm_ops)
|
||||
goto err_out;
|
||||
} else if (type == KVM_VM_PPC_HV) {
|
||||
if (!kvmppc_hv_ops)
|
||||
goto err_out;
|
||||
kvm_ops = kvmppc_hv_ops;
|
||||
} else if (type == KVM_VM_PPC_PR) {
|
||||
if (!kvmppc_pr_ops)
|
||||
goto err_out;
|
||||
kvm_ops = kvmppc_pr_ops;
|
||||
} else
|
||||
goto err_out;
|
||||
|
||||
if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
|
||||
return -ENOENT;
|
||||
|
||||
kvm->arch.kvm_ops = kvm_ops;
|
||||
return kvmppc_core_init_vm(kvm);
|
||||
err_out:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
void kvm_arch_destroy_vm(struct kvm *kvm)
|
||||
|
@ -292,6 +324,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
|
|||
kvmppc_core_destroy_vm(kvm);
|
||||
|
||||
mutex_unlock(&kvm->lock);
|
||||
|
||||
/* drop the module reference */
|
||||
module_put(kvm->arch.kvm_ops->owner);
|
||||
}
|
||||
|
||||
void kvm_arch_sync_events(struct kvm *kvm)
|
||||
|
@ -301,6 +336,10 @@ void kvm_arch_sync_events(struct kvm *kvm)
|
|||
int kvm_dev_ioctl_check_extension(long ext)
|
||||
{
|
||||
int r;
|
||||
/* FIXME!!
|
||||
* Should some of this be vm ioctl ? is it possible now ?
|
||||
*/
|
||||
int hv_enabled = kvmppc_hv_ops ? 1 : 0;
|
||||
|
||||
switch (ext) {
|
||||
#ifdef CONFIG_BOOKE
|
||||
|
@ -320,22 +359,26 @@ int kvm_dev_ioctl_check_extension(long ext)
|
|||
case KVM_CAP_DEVICE_CTRL:
|
||||
r = 1;
|
||||
break;
|
||||
#ifndef CONFIG_KVM_BOOK3S_64_HV
|
||||
case KVM_CAP_PPC_PAIRED_SINGLES:
|
||||
case KVM_CAP_PPC_OSI:
|
||||
case KVM_CAP_PPC_GET_PVINFO:
|
||||
#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
|
||||
case KVM_CAP_SW_TLB:
|
||||
#endif
|
||||
#ifdef CONFIG_KVM_MPIC
|
||||
case KVM_CAP_IRQ_MPIC:
|
||||
#endif
|
||||
r = 1;
|
||||
/* We support this only for PR */
|
||||
r = !hv_enabled;
|
||||
break;
|
||||
#ifdef CONFIG_KVM_MMIO
|
||||
case KVM_CAP_COALESCED_MMIO:
|
||||
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_KVM_MPIC
|
||||
case KVM_CAP_IRQ_MPIC:
|
||||
r = 1;
|
||||
break;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
case KVM_CAP_SPAPR_TCE:
|
||||
case KVM_CAP_PPC_ALLOC_HTAB:
|
||||
|
@ -346,32 +389,37 @@ int kvm_dev_ioctl_check_extension(long ext)
|
|||
r = 1;
|
||||
break;
|
||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
case KVM_CAP_PPC_SMT:
|
||||
r = threads_per_core;
|
||||
if (hv_enabled)
|
||||
r = threads_per_core;
|
||||
else
|
||||
r = 0;
|
||||
break;
|
||||
case KVM_CAP_PPC_RMA:
|
||||
r = 1;
|
||||
r = hv_enabled;
|
||||
/* PPC970 requires an RMA */
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_201))
|
||||
if (r && cpu_has_feature(CPU_FTR_ARCH_201))
|
||||
r = 2;
|
||||
break;
|
||||
#endif
|
||||
case KVM_CAP_SYNC_MMU:
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
if (hv_enabled)
|
||||
r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
|
||||
else
|
||||
r = 0;
|
||||
#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
|
||||
r = 1;
|
||||
#else
|
||||
r = 0;
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
break;
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
case KVM_CAP_PPC_HTAB_FD:
|
||||
r = 1;
|
||||
r = hv_enabled;
|
||||
break;
|
||||
#endif
|
||||
break;
|
||||
case KVM_CAP_NR_VCPUS:
|
||||
/*
|
||||
* Recommending a number of CPUs is somewhat arbitrary; we
|
||||
|
@ -379,11 +427,10 @@ int kvm_dev_ioctl_check_extension(long ext)
|
|||
* will have secondary threads "offline"), and for other KVM
|
||||
* implementations just count online CPUs.
|
||||
*/
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
r = num_present_cpus();
|
||||
#else
|
||||
r = num_online_cpus();
|
||||
#endif
|
||||
if (hv_enabled)
|
||||
r = num_present_cpus();
|
||||
else
|
||||
r = num_online_cpus();
|
||||
break;
|
||||
case KVM_CAP_MAX_VCPUS:
|
||||
r = KVM_MAX_VCPUS;
|
||||
|
@ -407,15 +454,16 @@ long kvm_arch_dev_ioctl(struct file *filp,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
void kvm_arch_free_memslot(struct kvm_memory_slot *free,
|
||||
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
|
||||
struct kvm_memory_slot *dont)
|
||||
{
|
||||
kvmppc_core_free_memslot(free, dont);
|
||||
kvmppc_core_free_memslot(kvm, free, dont);
|
||||
}
|
||||
|
||||
int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
|
||||
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||
unsigned long npages)
|
||||
{
|
||||
return kvmppc_core_create_memslot(slot, npages);
|
||||
return kvmppc_core_create_memslot(kvm, slot, npages);
|
||||
}
|
||||
|
||||
void kvm_arch_memslots_updated(struct kvm *kvm)
|
||||
|
@ -659,6 +707,7 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
|
||||
return EMULATE_DO_MMIO;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_handle_load);
|
||||
|
||||
/* Same as above, but sign extends */
|
||||
int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
|
@ -720,6 +769,7 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
|
||||
return EMULATE_DO_MMIO;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_handle_store);
|
||||
|
||||
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
|
@ -1024,52 +1074,12 @@ long kvm_arch_vm_ioctl(struct file *filp,
|
|||
r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
|
||||
goto out;
|
||||
}
|
||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
case KVM_ALLOCATE_RMA: {
|
||||
struct kvm_allocate_rma rma;
|
||||
struct kvm *kvm = filp->private_data;
|
||||
|
||||
r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
|
||||
if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
|
||||
r = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
case KVM_PPC_ALLOCATE_HTAB: {
|
||||
u32 htab_order;
|
||||
|
||||
r = -EFAULT;
|
||||
if (get_user(htab_order, (u32 __user *)argp))
|
||||
break;
|
||||
r = kvmppc_alloc_reset_hpt(kvm, &htab_order);
|
||||
if (r)
|
||||
break;
|
||||
r = -EFAULT;
|
||||
if (put_user(htab_order, (u32 __user *)argp))
|
||||
break;
|
||||
r = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
case KVM_PPC_GET_HTAB_FD: {
|
||||
struct kvm_get_htab_fd ghf;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&ghf, argp, sizeof(ghf)))
|
||||
break;
|
||||
r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
|
||||
break;
|
||||
}
|
||||
#endif /* CONFIG_KVM_BOOK3S_64_HV */
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
case KVM_PPC_GET_SMMU_INFO: {
|
||||
struct kvm_ppc_smmu_info info;
|
||||
struct kvm *kvm = filp->private_data;
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
r = kvm_vm_ioctl_get_smmu_info(kvm, &info);
|
||||
r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
|
||||
if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
|
||||
r = -EFAULT;
|
||||
break;
|
||||
|
@ -1080,11 +1090,15 @@ long kvm_arch_vm_ioctl(struct file *filp,
|
|||
r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
|
||||
break;
|
||||
}
|
||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
default: {
|
||||
struct kvm *kvm = filp->private_data;
|
||||
r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
|
||||
}
|
||||
#else /* CONFIG_PPC_BOOK3S_64 */
|
||||
default:
|
||||
r = -ENOTTY;
|
||||
#endif
|
||||
}
|
||||
|
||||
out:
|
||||
return r;
|
||||
}
|
||||
|
@ -1106,22 +1120,26 @@ long kvmppc_alloc_lpid(void)
|
|||
|
||||
return lpid;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
|
||||
|
||||
void kvmppc_claim_lpid(long lpid)
|
||||
{
|
||||
set_bit(lpid, lpid_inuse);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
|
||||
|
||||
void kvmppc_free_lpid(long lpid)
|
||||
{
|
||||
clear_bit(lpid, lpid_inuse);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
|
||||
|
||||
void kvmppc_init_lpid(unsigned long nr_lpids_param)
|
||||
{
|
||||
nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
|
||||
memset(lpid_inuse, 0, sizeof(lpid_inuse));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
|
||||
|
||||
int kvm_arch_init(void *opaque)
|
||||
{
|
||||
|
@ -1130,4 +1148,5 @@ int kvm_arch_init(void *opaque)
|
|||
|
||||
void kvm_arch_exit(void)
|
||||
{
|
||||
|
||||
}
|
||||
|
|
|
@ -31,126 +31,6 @@ TRACE_EVENT(kvm_ppc_instr,
|
|||
__entry->inst, __entry->pc, __entry->emulate)
|
||||
);
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
#define kvm_trace_symbol_exit \
|
||||
{0x100, "SYSTEM_RESET"}, \
|
||||
{0x200, "MACHINE_CHECK"}, \
|
||||
{0x300, "DATA_STORAGE"}, \
|
||||
{0x380, "DATA_SEGMENT"}, \
|
||||
{0x400, "INST_STORAGE"}, \
|
||||
{0x480, "INST_SEGMENT"}, \
|
||||
{0x500, "EXTERNAL"}, \
|
||||
{0x501, "EXTERNAL_LEVEL"}, \
|
||||
{0x502, "EXTERNAL_HV"}, \
|
||||
{0x600, "ALIGNMENT"}, \
|
||||
{0x700, "PROGRAM"}, \
|
||||
{0x800, "FP_UNAVAIL"}, \
|
||||
{0x900, "DECREMENTER"}, \
|
||||
{0x980, "HV_DECREMENTER"}, \
|
||||
{0xc00, "SYSCALL"}, \
|
||||
{0xd00, "TRACE"}, \
|
||||
{0xe00, "H_DATA_STORAGE"}, \
|
||||
{0xe20, "H_INST_STORAGE"}, \
|
||||
{0xe40, "H_EMUL_ASSIST"}, \
|
||||
{0xf00, "PERFMON"}, \
|
||||
{0xf20, "ALTIVEC"}, \
|
||||
{0xf40, "VSX"}
|
||||
#else
|
||||
#define kvm_trace_symbol_exit \
|
||||
{0, "CRITICAL"}, \
|
||||
{1, "MACHINE_CHECK"}, \
|
||||
{2, "DATA_STORAGE"}, \
|
||||
{3, "INST_STORAGE"}, \
|
||||
{4, "EXTERNAL"}, \
|
||||
{5, "ALIGNMENT"}, \
|
||||
{6, "PROGRAM"}, \
|
||||
{7, "FP_UNAVAIL"}, \
|
||||
{8, "SYSCALL"}, \
|
||||
{9, "AP_UNAVAIL"}, \
|
||||
{10, "DECREMENTER"}, \
|
||||
{11, "FIT"}, \
|
||||
{12, "WATCHDOG"}, \
|
||||
{13, "DTLB_MISS"}, \
|
||||
{14, "ITLB_MISS"}, \
|
||||
{15, "DEBUG"}, \
|
||||
{32, "SPE_UNAVAIL"}, \
|
||||
{33, "SPE_FP_DATA"}, \
|
||||
{34, "SPE_FP_ROUND"}, \
|
||||
{35, "PERFORMANCE_MONITOR"}, \
|
||||
{36, "DOORBELL"}, \
|
||||
{37, "DOORBELL_CRITICAL"}, \
|
||||
{38, "GUEST_DBELL"}, \
|
||||
{39, "GUEST_DBELL_CRIT"}, \
|
||||
{40, "HV_SYSCALL"}, \
|
||||
{41, "HV_PRIV"}
|
||||
#endif
|
||||
|
||||
TRACE_EVENT(kvm_exit,
|
||||
TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
|
||||
TP_ARGS(exit_nr, vcpu),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned int, exit_nr )
|
||||
__field( unsigned long, pc )
|
||||
__field( unsigned long, msr )
|
||||
__field( unsigned long, dar )
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR
|
||||
__field( unsigned long, srr1 )
|
||||
#endif
|
||||
__field( unsigned long, last_inst )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu;
|
||||
#endif
|
||||
__entry->exit_nr = exit_nr;
|
||||
__entry->pc = kvmppc_get_pc(vcpu);
|
||||
__entry->dar = kvmppc_get_fault_dar(vcpu);
|
||||
__entry->msr = vcpu->arch.shared->msr;
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR
|
||||
svcpu = svcpu_get(vcpu);
|
||||
__entry->srr1 = svcpu->shadow_srr1;
|
||||
svcpu_put(svcpu);
|
||||
#endif
|
||||
__entry->last_inst = vcpu->arch.last_inst;
|
||||
),
|
||||
|
||||
TP_printk("exit=%s"
|
||||
" | pc=0x%lx"
|
||||
" | msr=0x%lx"
|
||||
" | dar=0x%lx"
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR
|
||||
" | srr1=0x%lx"
|
||||
#endif
|
||||
" | last_inst=0x%lx"
|
||||
,
|
||||
__print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
|
||||
__entry->pc,
|
||||
__entry->msr,
|
||||
__entry->dar,
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR
|
||||
__entry->srr1,
|
||||
#endif
|
||||
__entry->last_inst
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_unmap_hva,
|
||||
TP_PROTO(unsigned long hva),
|
||||
TP_ARGS(hva),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned long, hva )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->hva = hva;
|
||||
),
|
||||
|
||||
TP_printk("unmap hva 0x%lx\n", __entry->hva)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_stlb_inval,
|
||||
TP_PROTO(unsigned int stlb_index),
|
||||
TP_ARGS(stlb_index),
|
||||
|
@ -236,315 +116,6 @@ TRACE_EVENT(kvm_check_requests,
|
|||
__entry->cpu_nr, __entry->requests)
|
||||
);
|
||||
|
||||
|
||||
/*************************************************************************
|
||||
* Book3S trace points *
|
||||
*************************************************************************/
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR
|
||||
|
||||
TRACE_EVENT(kvm_book3s_reenter,
|
||||
TP_PROTO(int r, struct kvm_vcpu *vcpu),
|
||||
TP_ARGS(r, vcpu),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned int, r )
|
||||
__field( unsigned long, pc )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->r = r;
|
||||
__entry->pc = kvmppc_get_pc(vcpu);
|
||||
),
|
||||
|
||||
TP_printk("reentry r=%d | pc=0x%lx", __entry->r, __entry->pc)
|
||||
);
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
|
||||
TRACE_EVENT(kvm_book3s_64_mmu_map,
|
||||
TP_PROTO(int rflags, ulong hpteg, ulong va, pfn_t hpaddr,
|
||||
struct kvmppc_pte *orig_pte),
|
||||
TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned char, flag_w )
|
||||
__field( unsigned char, flag_x )
|
||||
__field( unsigned long, eaddr )
|
||||
__field( unsigned long, hpteg )
|
||||
__field( unsigned long, va )
|
||||
__field( unsigned long long, vpage )
|
||||
__field( unsigned long, hpaddr )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->flag_w = ((rflags & HPTE_R_PP) == 3) ? '-' : 'w';
|
||||
__entry->flag_x = (rflags & HPTE_R_N) ? '-' : 'x';
|
||||
__entry->eaddr = orig_pte->eaddr;
|
||||
__entry->hpteg = hpteg;
|
||||
__entry->va = va;
|
||||
__entry->vpage = orig_pte->vpage;
|
||||
__entry->hpaddr = hpaddr;
|
||||
),
|
||||
|
||||
TP_printk("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx",
|
||||
__entry->flag_w, __entry->flag_x, __entry->eaddr,
|
||||
__entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr)
|
||||
);
|
||||
|
||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
|
||||
TRACE_EVENT(kvm_book3s_mmu_map,
|
||||
TP_PROTO(struct hpte_cache *pte),
|
||||
TP_ARGS(pte),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( u64, host_vpn )
|
||||
__field( u64, pfn )
|
||||
__field( ulong, eaddr )
|
||||
__field( u64, vpage )
|
||||
__field( ulong, raddr )
|
||||
__field( int, flags )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->host_vpn = pte->host_vpn;
|
||||
__entry->pfn = pte->pfn;
|
||||
__entry->eaddr = pte->pte.eaddr;
|
||||
__entry->vpage = pte->pte.vpage;
|
||||
__entry->raddr = pte->pte.raddr;
|
||||
__entry->flags = (pte->pte.may_read ? 0x4 : 0) |
|
||||
(pte->pte.may_write ? 0x2 : 0) |
|
||||
(pte->pte.may_execute ? 0x1 : 0);
|
||||
),
|
||||
|
||||
TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
|
||||
__entry->host_vpn, __entry->pfn, __entry->eaddr,
|
||||
__entry->vpage, __entry->raddr, __entry->flags)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_book3s_mmu_invalidate,
|
||||
TP_PROTO(struct hpte_cache *pte),
|
||||
TP_ARGS(pte),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( u64, host_vpn )
|
||||
__field( u64, pfn )
|
||||
__field( ulong, eaddr )
|
||||
__field( u64, vpage )
|
||||
__field( ulong, raddr )
|
||||
__field( int, flags )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->host_vpn = pte->host_vpn;
|
||||
__entry->pfn = pte->pfn;
|
||||
__entry->eaddr = pte->pte.eaddr;
|
||||
__entry->vpage = pte->pte.vpage;
|
||||
__entry->raddr = pte->pte.raddr;
|
||||
__entry->flags = (pte->pte.may_read ? 0x4 : 0) |
|
||||
(pte->pte.may_write ? 0x2 : 0) |
|
||||
(pte->pte.may_execute ? 0x1 : 0);
|
||||
),
|
||||
|
||||
TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
|
||||
__entry->host_vpn, __entry->pfn, __entry->eaddr,
|
||||
__entry->vpage, __entry->raddr, __entry->flags)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_book3s_mmu_flush,
|
||||
TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1,
|
||||
unsigned long long p2),
|
||||
TP_ARGS(type, vcpu, p1, p2),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( int, count )
|
||||
__field( unsigned long long, p1 )
|
||||
__field( unsigned long long, p2 )
|
||||
__field( const char *, type )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->count = to_book3s(vcpu)->hpte_cache_count;
|
||||
__entry->p1 = p1;
|
||||
__entry->p2 = p2;
|
||||
__entry->type = type;
|
||||
),
|
||||
|
||||
TP_printk("Flush %d %sPTEs: %llx - %llx",
|
||||
__entry->count, __entry->type, __entry->p1, __entry->p2)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_book3s_slb_found,
|
||||
TP_PROTO(unsigned long long gvsid, unsigned long long hvsid),
|
||||
TP_ARGS(gvsid, hvsid),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned long long, gvsid )
|
||||
__field( unsigned long long, hvsid )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->gvsid = gvsid;
|
||||
__entry->hvsid = hvsid;
|
||||
),
|
||||
|
||||
TP_printk("%llx -> %llx", __entry->gvsid, __entry->hvsid)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_book3s_slb_fail,
|
||||
TP_PROTO(u16 sid_map_mask, unsigned long long gvsid),
|
||||
TP_ARGS(sid_map_mask, gvsid),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned short, sid_map_mask )
|
||||
__field( unsigned long long, gvsid )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->sid_map_mask = sid_map_mask;
|
||||
__entry->gvsid = gvsid;
|
||||
),
|
||||
|
||||
TP_printk("%x/%x: %llx", __entry->sid_map_mask,
|
||||
SID_MAP_MASK - __entry->sid_map_mask, __entry->gvsid)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_book3s_slb_map,
|
||||
TP_PROTO(u16 sid_map_mask, unsigned long long gvsid,
|
||||
unsigned long long hvsid),
|
||||
TP_ARGS(sid_map_mask, gvsid, hvsid),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned short, sid_map_mask )
|
||||
__field( unsigned long long, guest_vsid )
|
||||
__field( unsigned long long, host_vsid )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->sid_map_mask = sid_map_mask;
|
||||
__entry->guest_vsid = gvsid;
|
||||
__entry->host_vsid = hvsid;
|
||||
),
|
||||
|
||||
TP_printk("%x: %llx -> %llx", __entry->sid_map_mask,
|
||||
__entry->guest_vsid, __entry->host_vsid)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_book3s_slbmte,
|
||||
TP_PROTO(u64 slb_vsid, u64 slb_esid),
|
||||
TP_ARGS(slb_vsid, slb_esid),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( u64, slb_vsid )
|
||||
__field( u64, slb_esid )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->slb_vsid = slb_vsid;
|
||||
__entry->slb_esid = slb_esid;
|
||||
),
|
||||
|
||||
TP_printk("%llx, %llx", __entry->slb_vsid, __entry->slb_esid)
|
||||
);
|
||||
|
||||
#endif /* CONFIG_PPC_BOOK3S */
|
||||
|
||||
|
||||
/*************************************************************************
|
||||
* Book3E trace points *
|
||||
*************************************************************************/
|
||||
|
||||
#ifdef CONFIG_BOOKE
|
||||
|
||||
TRACE_EVENT(kvm_booke206_stlb_write,
|
||||
TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3),
|
||||
TP_ARGS(mas0, mas8, mas1, mas2, mas7_3),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( __u32, mas0 )
|
||||
__field( __u32, mas8 )
|
||||
__field( __u32, mas1 )
|
||||
__field( __u64, mas2 )
|
||||
__field( __u64, mas7_3 )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->mas0 = mas0;
|
||||
__entry->mas8 = mas8;
|
||||
__entry->mas1 = mas1;
|
||||
__entry->mas2 = mas2;
|
||||
__entry->mas7_3 = mas7_3;
|
||||
),
|
||||
|
||||
TP_printk("mas0=%x mas8=%x mas1=%x mas2=%llx mas7_3=%llx",
|
||||
__entry->mas0, __entry->mas8, __entry->mas1,
|
||||
__entry->mas2, __entry->mas7_3)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_booke206_gtlb_write,
|
||||
TP_PROTO(__u32 mas0, __u32 mas1, __u64 mas2, __u64 mas7_3),
|
||||
TP_ARGS(mas0, mas1, mas2, mas7_3),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( __u32, mas0 )
|
||||
__field( __u32, mas1 )
|
||||
__field( __u64, mas2 )
|
||||
__field( __u64, mas7_3 )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->mas0 = mas0;
|
||||
__entry->mas1 = mas1;
|
||||
__entry->mas2 = mas2;
|
||||
__entry->mas7_3 = mas7_3;
|
||||
),
|
||||
|
||||
TP_printk("mas0=%x mas1=%x mas2=%llx mas7_3=%llx",
|
||||
__entry->mas0, __entry->mas1,
|
||||
__entry->mas2, __entry->mas7_3)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_booke206_ref_release,
|
||||
TP_PROTO(__u64 pfn, __u32 flags),
|
||||
TP_ARGS(pfn, flags),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( __u64, pfn )
|
||||
__field( __u32, flags )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->pfn = pfn;
|
||||
__entry->flags = flags;
|
||||
),
|
||||
|
||||
TP_printk("pfn=%llx flags=%x",
|
||||
__entry->pfn, __entry->flags)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_booke_queue_irqprio,
|
||||
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int priority),
|
||||
TP_ARGS(vcpu, priority),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( __u32, cpu_nr )
|
||||
__field( __u32, priority )
|
||||
__field( unsigned long, pending )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->cpu_nr = vcpu->vcpu_id;
|
||||
__entry->priority = priority;
|
||||
__entry->pending = vcpu->arch.pending_exceptions;
|
||||
),
|
||||
|
||||
TP_printk("vcpu=%x prio=%x pending=%lx",
|
||||
__entry->cpu_nr, __entry->priority, __entry->pending)
|
||||
);
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* _TRACE_KVM_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
|
|
@ -0,0 +1,177 @@
|
|||
#if !defined(_TRACE_KVM_BOOKE_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_KVM_BOOKE_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM kvm_booke
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#define TRACE_INCLUDE_FILE trace_booke
|
||||
|
||||
#define kvm_trace_symbol_exit \
|
||||
{0, "CRITICAL"}, \
|
||||
{1, "MACHINE_CHECK"}, \
|
||||
{2, "DATA_STORAGE"}, \
|
||||
{3, "INST_STORAGE"}, \
|
||||
{4, "EXTERNAL"}, \
|
||||
{5, "ALIGNMENT"}, \
|
||||
{6, "PROGRAM"}, \
|
||||
{7, "FP_UNAVAIL"}, \
|
||||
{8, "SYSCALL"}, \
|
||||
{9, "AP_UNAVAIL"}, \
|
||||
{10, "DECREMENTER"}, \
|
||||
{11, "FIT"}, \
|
||||
{12, "WATCHDOG"}, \
|
||||
{13, "DTLB_MISS"}, \
|
||||
{14, "ITLB_MISS"}, \
|
||||
{15, "DEBUG"}, \
|
||||
{32, "SPE_UNAVAIL"}, \
|
||||
{33, "SPE_FP_DATA"}, \
|
||||
{34, "SPE_FP_ROUND"}, \
|
||||
{35, "PERFORMANCE_MONITOR"}, \
|
||||
{36, "DOORBELL"}, \
|
||||
{37, "DOORBELL_CRITICAL"}, \
|
||||
{38, "GUEST_DBELL"}, \
|
||||
{39, "GUEST_DBELL_CRIT"}, \
|
||||
{40, "HV_SYSCALL"}, \
|
||||
{41, "HV_PRIV"}
|
||||
|
||||
TRACE_EVENT(kvm_exit,
|
||||
TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
|
||||
TP_ARGS(exit_nr, vcpu),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned int, exit_nr )
|
||||
__field( unsigned long, pc )
|
||||
__field( unsigned long, msr )
|
||||
__field( unsigned long, dar )
|
||||
__field( unsigned long, last_inst )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->exit_nr = exit_nr;
|
||||
__entry->pc = kvmppc_get_pc(vcpu);
|
||||
__entry->dar = kvmppc_get_fault_dar(vcpu);
|
||||
__entry->msr = vcpu->arch.shared->msr;
|
||||
__entry->last_inst = vcpu->arch.last_inst;
|
||||
),
|
||||
|
||||
TP_printk("exit=%s"
|
||||
" | pc=0x%lx"
|
||||
" | msr=0x%lx"
|
||||
" | dar=0x%lx"
|
||||
" | last_inst=0x%lx"
|
||||
,
|
||||
__print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
|
||||
__entry->pc,
|
||||
__entry->msr,
|
||||
__entry->dar,
|
||||
__entry->last_inst
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_unmap_hva,
|
||||
TP_PROTO(unsigned long hva),
|
||||
TP_ARGS(hva),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned long, hva )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->hva = hva;
|
||||
),
|
||||
|
||||
TP_printk("unmap hva 0x%lx\n", __entry->hva)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_booke206_stlb_write,
|
||||
TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3),
|
||||
TP_ARGS(mas0, mas8, mas1, mas2, mas7_3),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( __u32, mas0 )
|
||||
__field( __u32, mas8 )
|
||||
__field( __u32, mas1 )
|
||||
__field( __u64, mas2 )
|
||||
__field( __u64, mas7_3 )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->mas0 = mas0;
|
||||
__entry->mas8 = mas8;
|
||||
__entry->mas1 = mas1;
|
||||
__entry->mas2 = mas2;
|
||||
__entry->mas7_3 = mas7_3;
|
||||
),
|
||||
|
||||
TP_printk("mas0=%x mas8=%x mas1=%x mas2=%llx mas7_3=%llx",
|
||||
__entry->mas0, __entry->mas8, __entry->mas1,
|
||||
__entry->mas2, __entry->mas7_3)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_booke206_gtlb_write,
|
||||
TP_PROTO(__u32 mas0, __u32 mas1, __u64 mas2, __u64 mas7_3),
|
||||
TP_ARGS(mas0, mas1, mas2, mas7_3),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( __u32, mas0 )
|
||||
__field( __u32, mas1 )
|
||||
__field( __u64, mas2 )
|
||||
__field( __u64, mas7_3 )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->mas0 = mas0;
|
||||
__entry->mas1 = mas1;
|
||||
__entry->mas2 = mas2;
|
||||
__entry->mas7_3 = mas7_3;
|
||||
),
|
||||
|
||||
TP_printk("mas0=%x mas1=%x mas2=%llx mas7_3=%llx",
|
||||
__entry->mas0, __entry->mas1,
|
||||
__entry->mas2, __entry->mas7_3)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_booke206_ref_release,
|
||||
TP_PROTO(__u64 pfn, __u32 flags),
|
||||
TP_ARGS(pfn, flags),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( __u64, pfn )
|
||||
__field( __u32, flags )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->pfn = pfn;
|
||||
__entry->flags = flags;
|
||||
),
|
||||
|
||||
TP_printk("pfn=%llx flags=%x",
|
||||
__entry->pfn, __entry->flags)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_booke_queue_irqprio,
|
||||
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int priority),
|
||||
TP_ARGS(vcpu, priority),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( __u32, cpu_nr )
|
||||
__field( __u32, priority )
|
||||
__field( unsigned long, pending )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->cpu_nr = vcpu->vcpu_id;
|
||||
__entry->priority = priority;
|
||||
__entry->pending = vcpu->arch.pending_exceptions;
|
||||
),
|
||||
|
||||
TP_printk("vcpu=%x prio=%x pending=%lx",
|
||||
__entry->cpu_nr, __entry->priority, __entry->pending)
|
||||
);
|
||||
|
||||
#endif
|
||||
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
|
@ -0,0 +1,297 @@
|
|||
|
||||
#if !defined(_TRACE_KVM_PR_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_KVM_PR_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM kvm_pr
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#define TRACE_INCLUDE_FILE trace_pr
|
||||
|
||||
#define kvm_trace_symbol_exit \
|
||||
{0x100, "SYSTEM_RESET"}, \
|
||||
{0x200, "MACHINE_CHECK"}, \
|
||||
{0x300, "DATA_STORAGE"}, \
|
||||
{0x380, "DATA_SEGMENT"}, \
|
||||
{0x400, "INST_STORAGE"}, \
|
||||
{0x480, "INST_SEGMENT"}, \
|
||||
{0x500, "EXTERNAL"}, \
|
||||
{0x501, "EXTERNAL_LEVEL"}, \
|
||||
{0x502, "EXTERNAL_HV"}, \
|
||||
{0x600, "ALIGNMENT"}, \
|
||||
{0x700, "PROGRAM"}, \
|
||||
{0x800, "FP_UNAVAIL"}, \
|
||||
{0x900, "DECREMENTER"}, \
|
||||
{0x980, "HV_DECREMENTER"}, \
|
||||
{0xc00, "SYSCALL"}, \
|
||||
{0xd00, "TRACE"}, \
|
||||
{0xe00, "H_DATA_STORAGE"}, \
|
||||
{0xe20, "H_INST_STORAGE"}, \
|
||||
{0xe40, "H_EMUL_ASSIST"}, \
|
||||
{0xf00, "PERFMON"}, \
|
||||
{0xf20, "ALTIVEC"}, \
|
||||
{0xf40, "VSX"}
|
||||
|
||||
TRACE_EVENT(kvm_book3s_reenter,
|
||||
TP_PROTO(int r, struct kvm_vcpu *vcpu),
|
||||
TP_ARGS(r, vcpu),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned int, r )
|
||||
__field( unsigned long, pc )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->r = r;
|
||||
__entry->pc = kvmppc_get_pc(vcpu);
|
||||
),
|
||||
|
||||
TP_printk("reentry r=%d | pc=0x%lx", __entry->r, __entry->pc)
|
||||
);
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
|
||||
TRACE_EVENT(kvm_book3s_64_mmu_map,
|
||||
TP_PROTO(int rflags, ulong hpteg, ulong va, pfn_t hpaddr,
|
||||
struct kvmppc_pte *orig_pte),
|
||||
TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned char, flag_w )
|
||||
__field( unsigned char, flag_x )
|
||||
__field( unsigned long, eaddr )
|
||||
__field( unsigned long, hpteg )
|
||||
__field( unsigned long, va )
|
||||
__field( unsigned long long, vpage )
|
||||
__field( unsigned long, hpaddr )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->flag_w = ((rflags & HPTE_R_PP) == 3) ? '-' : 'w';
|
||||
__entry->flag_x = (rflags & HPTE_R_N) ? '-' : 'x';
|
||||
__entry->eaddr = orig_pte->eaddr;
|
||||
__entry->hpteg = hpteg;
|
||||
__entry->va = va;
|
||||
__entry->vpage = orig_pte->vpage;
|
||||
__entry->hpaddr = hpaddr;
|
||||
),
|
||||
|
||||
TP_printk("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx",
|
||||
__entry->flag_w, __entry->flag_x, __entry->eaddr,
|
||||
__entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr)
|
||||
);
|
||||
|
||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
|
||||
TRACE_EVENT(kvm_book3s_mmu_map,
|
||||
TP_PROTO(struct hpte_cache *pte),
|
||||
TP_ARGS(pte),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( u64, host_vpn )
|
||||
__field( u64, pfn )
|
||||
__field( ulong, eaddr )
|
||||
__field( u64, vpage )
|
||||
__field( ulong, raddr )
|
||||
__field( int, flags )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->host_vpn = pte->host_vpn;
|
||||
__entry->pfn = pte->pfn;
|
||||
__entry->eaddr = pte->pte.eaddr;
|
||||
__entry->vpage = pte->pte.vpage;
|
||||
__entry->raddr = pte->pte.raddr;
|
||||
__entry->flags = (pte->pte.may_read ? 0x4 : 0) |
|
||||
(pte->pte.may_write ? 0x2 : 0) |
|
||||
(pte->pte.may_execute ? 0x1 : 0);
|
||||
),
|
||||
|
||||
TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
|
||||
__entry->host_vpn, __entry->pfn, __entry->eaddr,
|
||||
__entry->vpage, __entry->raddr, __entry->flags)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_book3s_mmu_invalidate,
|
||||
TP_PROTO(struct hpte_cache *pte),
|
||||
TP_ARGS(pte),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( u64, host_vpn )
|
||||
__field( u64, pfn )
|
||||
__field( ulong, eaddr )
|
||||
__field( u64, vpage )
|
||||
__field( ulong, raddr )
|
||||
__field( int, flags )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->host_vpn = pte->host_vpn;
|
||||
__entry->pfn = pte->pfn;
|
||||
__entry->eaddr = pte->pte.eaddr;
|
||||
__entry->vpage = pte->pte.vpage;
|
||||
__entry->raddr = pte->pte.raddr;
|
||||
__entry->flags = (pte->pte.may_read ? 0x4 : 0) |
|
||||
(pte->pte.may_write ? 0x2 : 0) |
|
||||
(pte->pte.may_execute ? 0x1 : 0);
|
||||
),
|
||||
|
||||
TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
|
||||
__entry->host_vpn, __entry->pfn, __entry->eaddr,
|
||||
__entry->vpage, __entry->raddr, __entry->flags)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_book3s_mmu_flush,
|
||||
TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1,
|
||||
unsigned long long p2),
|
||||
TP_ARGS(type, vcpu, p1, p2),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( int, count )
|
||||
__field( unsigned long long, p1 )
|
||||
__field( unsigned long long, p2 )
|
||||
__field( const char *, type )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->count = to_book3s(vcpu)->hpte_cache_count;
|
||||
__entry->p1 = p1;
|
||||
__entry->p2 = p2;
|
||||
__entry->type = type;
|
||||
),
|
||||
|
||||
TP_printk("Flush %d %sPTEs: %llx - %llx",
|
||||
__entry->count, __entry->type, __entry->p1, __entry->p2)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_book3s_slb_found,
|
||||
TP_PROTO(unsigned long long gvsid, unsigned long long hvsid),
|
||||
TP_ARGS(gvsid, hvsid),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned long long, gvsid )
|
||||
__field( unsigned long long, hvsid )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->gvsid = gvsid;
|
||||
__entry->hvsid = hvsid;
|
||||
),
|
||||
|
||||
TP_printk("%llx -> %llx", __entry->gvsid, __entry->hvsid)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_book3s_slb_fail,
|
||||
TP_PROTO(u16 sid_map_mask, unsigned long long gvsid),
|
||||
TP_ARGS(sid_map_mask, gvsid),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned short, sid_map_mask )
|
||||
__field( unsigned long long, gvsid )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->sid_map_mask = sid_map_mask;
|
||||
__entry->gvsid = gvsid;
|
||||
),
|
||||
|
||||
TP_printk("%x/%x: %llx", __entry->sid_map_mask,
|
||||
SID_MAP_MASK - __entry->sid_map_mask, __entry->gvsid)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_book3s_slb_map,
|
||||
TP_PROTO(u16 sid_map_mask, unsigned long long gvsid,
|
||||
unsigned long long hvsid),
|
||||
TP_ARGS(sid_map_mask, gvsid, hvsid),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned short, sid_map_mask )
|
||||
__field( unsigned long long, guest_vsid )
|
||||
__field( unsigned long long, host_vsid )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->sid_map_mask = sid_map_mask;
|
||||
__entry->guest_vsid = gvsid;
|
||||
__entry->host_vsid = hvsid;
|
||||
),
|
||||
|
||||
TP_printk("%x: %llx -> %llx", __entry->sid_map_mask,
|
||||
__entry->guest_vsid, __entry->host_vsid)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_book3s_slbmte,
|
||||
TP_PROTO(u64 slb_vsid, u64 slb_esid),
|
||||
TP_ARGS(slb_vsid, slb_esid),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( u64, slb_vsid )
|
||||
__field( u64, slb_esid )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->slb_vsid = slb_vsid;
|
||||
__entry->slb_esid = slb_esid;
|
||||
),
|
||||
|
||||
TP_printk("%llx, %llx", __entry->slb_vsid, __entry->slb_esid)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_exit,
|
||||
TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
|
||||
TP_ARGS(exit_nr, vcpu),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned int, exit_nr )
|
||||
__field( unsigned long, pc )
|
||||
__field( unsigned long, msr )
|
||||
__field( unsigned long, dar )
|
||||
__field( unsigned long, srr1 )
|
||||
__field( unsigned long, last_inst )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->exit_nr = exit_nr;
|
||||
__entry->pc = kvmppc_get_pc(vcpu);
|
||||
__entry->dar = kvmppc_get_fault_dar(vcpu);
|
||||
__entry->msr = vcpu->arch.shared->msr;
|
||||
__entry->srr1 = vcpu->arch.shadow_srr1;
|
||||
__entry->last_inst = vcpu->arch.last_inst;
|
||||
),
|
||||
|
||||
TP_printk("exit=%s"
|
||||
" | pc=0x%lx"
|
||||
" | msr=0x%lx"
|
||||
" | dar=0x%lx"
|
||||
" | srr1=0x%lx"
|
||||
" | last_inst=0x%lx"
|
||||
,
|
||||
__print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
|
||||
__entry->pc,
|
||||
__entry->msr,
|
||||
__entry->dar,
|
||||
__entry->srr1,
|
||||
__entry->last_inst
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_unmap_hva,
|
||||
TP_PROTO(unsigned long hva),
|
||||
TP_ARGS(hva),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned long, hva )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->hva = hva;
|
||||
),
|
||||
|
||||
TP_printk("unmap hva 0x%lx\n", __entry->hva)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_KVM_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
|
@ -1089,12 +1089,13 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
|
|||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
void kvm_arch_free_memslot(struct kvm_memory_slot *free,
|
||||
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
|
||||
struct kvm_memory_slot *dont)
|
||||
{
|
||||
}
|
||||
|
||||
int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
|
||||
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||
unsigned long npages)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -7091,7 +7091,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
|
|||
kfree(rcu_dereference_check(kvm->arch.apic_map, 1));
|
||||
}
|
||||
|
||||
void kvm_arch_free_memslot(struct kvm_memory_slot *free,
|
||||
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
|
||||
struct kvm_memory_slot *dont)
|
||||
{
|
||||
int i;
|
||||
|
@ -7112,7 +7112,8 @@ void kvm_arch_free_memslot(struct kvm_memory_slot *free,
|
|||
}
|
||||
}
|
||||
|
||||
int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
|
||||
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||
unsigned long npages)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
|
|
@ -507,9 +507,10 @@ int kvm_set_memory_region(struct kvm *kvm,
|
|||
struct kvm_userspace_memory_region *mem);
|
||||
int __kvm_set_memory_region(struct kvm *kvm,
|
||||
struct kvm_userspace_memory_region *mem);
|
||||
void kvm_arch_free_memslot(struct kvm_memory_slot *free,
|
||||
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
|
||||
struct kvm_memory_slot *dont);
|
||||
int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
|
||||
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||
unsigned long npages);
|
||||
void kvm_arch_memslots_updated(struct kvm *kvm);
|
||||
int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot,
|
||||
|
|
|
@ -518,6 +518,10 @@ struct kvm_ppc_smmu_info {
|
|||
/* machine type bits, to be used as argument to KVM_CREATE_VM */
|
||||
#define KVM_VM_S390_UCONTROL 1
|
||||
|
||||
/* on ppc, 0 indicate default, 1 should force HV and 2 PR */
|
||||
#define KVM_VM_PPC_HV 1
|
||||
#define KVM_VM_PPC_PR 2
|
||||
|
||||
#define KVM_S390_SIE_PAGE_OFFSET 1
|
||||
|
||||
/*
|
||||
|
|
|
@ -187,6 +187,7 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
|
|||
++kvm->stat.remote_tlb_flush;
|
||||
cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
|
||||
|
||||
void kvm_reload_remote_mmus(struct kvm *kvm)
|
||||
{
|
||||
|
@ -541,13 +542,13 @@ static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
|
|||
/*
|
||||
* Free any memory in @free but not in @dont.
|
||||
*/
|
||||
static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
|
||||
static void kvm_free_physmem_slot(struct kvm *kvm, struct kvm_memory_slot *free,
|
||||
struct kvm_memory_slot *dont)
|
||||
{
|
||||
if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
|
||||
kvm_destroy_dirty_bitmap(free);
|
||||
|
||||
kvm_arch_free_memslot(free, dont);
|
||||
kvm_arch_free_memslot(kvm, free, dont);
|
||||
|
||||
free->npages = 0;
|
||||
}
|
||||
|
@ -558,7 +559,7 @@ void kvm_free_physmem(struct kvm *kvm)
|
|||
struct kvm_memory_slot *memslot;
|
||||
|
||||
kvm_for_each_memslot(memslot, slots)
|
||||
kvm_free_physmem_slot(memslot, NULL);
|
||||
kvm_free_physmem_slot(kvm, memslot, NULL);
|
||||
|
||||
kfree(kvm->memslots);
|
||||
}
|
||||
|
@ -822,7 +823,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
|||
if (change == KVM_MR_CREATE) {
|
||||
new.userspace_addr = mem->userspace_addr;
|
||||
|
||||
if (kvm_arch_create_memslot(&new, npages))
|
||||
if (kvm_arch_create_memslot(kvm, &new, npages))
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
|
@ -883,7 +884,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
|||
|
||||
kvm_arch_commit_memory_region(kvm, mem, &old, change);
|
||||
|
||||
kvm_free_physmem_slot(&old, &new);
|
||||
kvm_free_physmem_slot(kvm, &old, &new);
|
||||
kfree(old_memslots);
|
||||
|
||||
/*
|
||||
|
@ -905,7 +906,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
|||
out_slots:
|
||||
kfree(slots);
|
||||
out_free:
|
||||
kvm_free_physmem_slot(&new, &old);
|
||||
kvm_free_physmem_slot(kvm, &new, &old);
|
||||
out:
|
||||
return r;
|
||||
}
|
||||
|
@ -964,6 +965,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
|
|||
out:
|
||||
return r;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
|
||||
|
||||
bool kvm_largepages_enabled(void)
|
||||
{
|
||||
|
@ -1654,6 +1656,7 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
|
|||
memslot = gfn_to_memslot(kvm, gfn);
|
||||
mark_page_dirty_in_slot(kvm, memslot, gfn);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mark_page_dirty);
|
||||
|
||||
/*
|
||||
* The vCPU has executed a HLT instruction with in-kernel mode enabled.
|
||||
|
@ -1679,6 +1682,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
|
|||
|
||||
finish_wait(&vcpu->wq, &wait);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_vcpu_block);
|
||||
|
||||
#ifndef CONFIG_S390
|
||||
/*
|
||||
|
|
Загрузка…
Ссылка в новой задаче