* 'for-upstream' of https://github.com/agraf/linux-2.6: (28 commits)
  KVM: PPC: booke: Get/set guest EPCR register using ONE_REG interface
  KVM: PPC: bookehv: Add EPCR support in mtspr/mfspr emulation
  KVM: PPC: bookehv: Add guest computation mode for irq delivery
  KVM: PPC: Make EPCR a valid field for booke64 and bookehv
  KVM: PPC: booke: Extend MAS2 EPN mask for 64-bit
  KVM: PPC: e500: Mask MAS2 EPN high 32-bits in 32/64 tlbwe emulation
  KVM: PPC: Mask ea's high 32-bits in 32/64 instr emulation
  KVM: PPC: e500: Add emulation helper for getting instruction ea
  KVM: PPC: bookehv64: Add support for interrupt handling
  KVM: PPC: bookehv: Remove GET_VCPU macro from exception handler
  KVM: PPC: booke: Fix get_tb() compile error on 64-bit
  KVM: PPC: e500: Silence bogus GCC warning in tlb code
  KVM: PPC: Book3S HV: Handle guest-caused machine checks on POWER7 without panicking
  KVM: PPC: Book3S HV: Improve handling of local vs. global TLB invalidations
  MAINTAINERS: Add git tree link for PPC KVM
  KVM: PPC: Book3S PR: MSR_DE doesn't exist on Book 3S
  KVM: PPC: Book3S PR: Fix VSX handling
  KVM: PPC: Book3S PR: Emulate PURR, SPURR and DSCR registers
  KVM: PPC: Book3S HV: Don't give the guest RW access to RO pages
  KVM: PPC: Book3S HV: Report correct HPT entry index when reading HPT
  ...
This commit is contained in:
Marcelo Tosatti 2012-12-09 18:44:10 -02:00
Родитель 8f536b7697 352df1deb2
Коммит d2ff4fc557
34 изменённых файлов: 1279 добавлений и 229 удалений

Просмотреть файл

@ -1773,6 +1773,7 @@ registers, find a list below:
PPC | KVM_REG_PPC_VPA_ADDR | 64 PPC | KVM_REG_PPC_VPA_ADDR | 64
PPC | KVM_REG_PPC_VPA_SLB | 128 PPC | KVM_REG_PPC_VPA_SLB | 128
PPC | KVM_REG_PPC_VPA_DTL | 128 PPC | KVM_REG_PPC_VPA_DTL | 128
PPC | KVM_REG_PPC_EPCR | 32
4.69 KVM_GET_ONE_REG 4.69 KVM_GET_ONE_REG
@ -2071,6 +2072,60 @@ KVM_S390_INT_EXTERNAL_CALL (vcpu) - sigp external call; source cpu in parm
Note that the vcpu ioctl is asynchronous to vcpu execution. Note that the vcpu ioctl is asynchronous to vcpu execution.
4.78 KVM_PPC_GET_HTAB_FD
Capability: KVM_CAP_PPC_HTAB_FD
Architectures: powerpc
Type: vm ioctl
Parameters: Pointer to struct kvm_get_htab_fd (in)
Returns: file descriptor number (>= 0) on success, -1 on error
This returns a file descriptor that can be used either to read out the
entries in the guest's hashed page table (HPT), or to write entries to
initialize the HPT. The returned fd can only be written to if the
KVM_GET_HTAB_WRITE bit is set in the flags field of the argument, and
can only be read if that bit is clear. The argument struct looks like
this:
/* For KVM_PPC_GET_HTAB_FD */
struct kvm_get_htab_fd {
__u64 flags;
__u64 start_index;
__u64 reserved[2];
};
/* Values for kvm_get_htab_fd.flags */
#define KVM_GET_HTAB_BOLTED_ONLY ((__u64)0x1)
#define KVM_GET_HTAB_WRITE ((__u64)0x2)
The `start_index' field gives the index in the HPT of the entry at
which to start reading. It is ignored when writing.
Reads on the fd will initially supply information about all
"interesting" HPT entries. Interesting entries are those with the
bolted bit set, if the KVM_GET_HTAB_BOLTED_ONLY bit is set, otherwise
all entries. When the end of the HPT is reached, the read() will
return. If read() is called again on the fd, it will start again from
the beginning of the HPT, but will only return HPT entries that have
changed since they were last read.
Data read or written is structured as a header (8 bytes) followed by a
series of valid HPT entries (16 bytes) each. The header indicates how
many valid HPT entries there are and how many invalid entries follow
the valid entries. The invalid entries are not represented explicitly
in the stream. The header format is:
struct kvm_get_htab_header {
__u32 index;
__u16 n_valid;
__u16 n_invalid;
};
Writes to the fd create HPT entries starting at the index given in the
header; first `n_valid' valid entries with contents from the data
written, then `n_invalid' invalid entries, invalidating any previously
valid entries found.
5. The kvm_run structure 5. The kvm_run structure
------------------------ ------------------------

Просмотреть файл

@ -4253,6 +4253,7 @@ KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC
M: Alexander Graf <agraf@suse.de> M: Alexander Graf <agraf@suse.de>
L: kvm-ppc@vger.kernel.org L: kvm-ppc@vger.kernel.org
W: http://kvm.qumranet.com W: http://kvm.qumranet.com
T: git git://github.com/agraf/linux-2.6.git
S: Supported S: Supported
F: arch/powerpc/include/asm/kvm* F: arch/powerpc/include/asm/kvm*
F: arch/powerpc/kvm/ F: arch/powerpc/kvm/

Просмотреть файл

@ -81,6 +81,8 @@ struct kvmppc_vcpu_book3s {
u64 sdr1; u64 sdr1;
u64 hior; u64 hior;
u64 msr_mask; u64 msr_mask;
u64 purr_offset;
u64 spurr_offset;
#ifdef CONFIG_PPC_BOOK3S_32 #ifdef CONFIG_PPC_BOOK3S_32
u32 vsid_pool[VSID_POOL_SIZE]; u32 vsid_pool[VSID_POOL_SIZE];
u32 vsid_next; u32 vsid_next;
@ -157,8 +159,12 @@ extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr); extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr);
extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
long pte_index, unsigned long pteh, unsigned long ptel); long pte_index, unsigned long pteh, unsigned long ptel);
extern long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
long pte_index, unsigned long pteh, unsigned long ptel); long pte_index, unsigned long pteh, unsigned long ptel,
pgd_t *pgdir, bool realmode, unsigned long *idx_ret);
extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
unsigned long pte_index, unsigned long avpn,
unsigned long *hpret);
extern long kvmppc_hv_get_dirty_log(struct kvm *kvm, extern long kvmppc_hv_get_dirty_log(struct kvm *kvm,
struct kvm_memory_slot *memslot, unsigned long *map); struct kvm_memory_slot *memslot, unsigned long *map);

Просмотреть файл

@ -50,6 +50,15 @@ extern int kvm_hpt_order; /* order of preallocated HPTs */
#define HPTE_V_HVLOCK 0x40UL #define HPTE_V_HVLOCK 0x40UL
#define HPTE_V_ABSENT 0x20UL #define HPTE_V_ABSENT 0x20UL
/*
* We use this bit in the guest_rpte field of the revmap entry
* to indicate a modified HPTE.
*/
#define HPTE_GR_MODIFIED (1ul << 62)
/* These bits are reserved in the guest view of the HPTE */
#define HPTE_GR_RESERVED HPTE_GR_MODIFIED
static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits) static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits)
{ {
unsigned long tmp, old; unsigned long tmp, old;
@ -237,4 +246,26 @@ static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
return !(memslot->base_gfn & mask) && !(memslot->npages & mask); return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
} }
/*
* This works for 4k, 64k and 16M pages on POWER7,
* and 4k and 16M pages on PPC970.
*/
static inline unsigned long slb_pgsize_encoding(unsigned long psize)
{
unsigned long senc = 0;
if (psize > 0x1000) {
senc = SLB_VSID_L;
if (psize == 0x10000)
senc |= SLB_VSID_LP_01;
}
return senc;
}
static inline int is_vrma_hpte(unsigned long hpte_v)
{
return (hpte_v & ~0xffffffUL) ==
(HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
}
#endif /* __ASM_KVM_BOOK3S_64_H__ */ #endif /* __ASM_KVM_BOOK3S_64_H__ */

Просмотреть файл

@ -17,6 +17,7 @@
* there are no exceptions for which we fall through directly to * there are no exceptions for which we fall through directly to
* the normal host handler. * the normal host handler.
* *
* 32-bit host
* Expected inputs (normal exceptions): * Expected inputs (normal exceptions):
* SCRATCH0 = saved r10 * SCRATCH0 = saved r10
* r10 = thread struct * r10 = thread struct
@ -33,6 +34,30 @@
* *(r8 + GPR9) = saved r9 * *(r8 + GPR9) = saved r9
* *(r8 + GPR10) = saved r10 (r10 not yet clobbered) * *(r8 + GPR10) = saved r10 (r10 not yet clobbered)
* *(r8 + GPR11) = saved r11 * *(r8 + GPR11) = saved r11
*
* 64-bit host
* Expected inputs (GEN/GDBELL/DBG/MC exception types):
* r10 = saved CR
* r13 = PACA_POINTER
* *(r13 + PACA_EX##type + EX_R10) = saved r10
* *(r13 + PACA_EX##type + EX_R11) = saved r11
* SPRN_SPRG_##type##_SCRATCH = saved r13
*
* Expected inputs (CRIT exception type):
* r10 = saved CR
* r13 = PACA_POINTER
* *(r13 + PACA_EX##type + EX_R10) = saved r10
* *(r13 + PACA_EX##type + EX_R11) = saved r11
* *(r13 + PACA_EX##type + EX_R13) = saved r13
*
* Expected inputs (TLB exception type):
* r10 = saved CR
* r13 = PACA_POINTER
* *(r13 + PACA_EX##type + EX_TLB_R10) = saved r10
* *(r13 + PACA_EX##type + EX_TLB_R11) = saved r11
* SPRN_SPRG_GEN_SCRATCH = saved r13
*
* Only the bolted version of TLB miss exception handlers is supported now.
*/ */
.macro DO_KVM intno srr1 .macro DO_KVM intno srr1
#ifdef CONFIG_KVM_BOOKE_HV #ifdef CONFIG_KVM_BOOKE_HV

Просмотреть файл

@ -246,10 +246,12 @@ struct kvm_arch {
int using_mmu_notifiers; int using_mmu_notifiers;
u32 hpt_order; u32 hpt_order;
atomic_t vcpus_running; atomic_t vcpus_running;
u32 online_vcores;
unsigned long hpt_npte; unsigned long hpt_npte;
unsigned long hpt_mask; unsigned long hpt_mask;
atomic_t hpte_mod_interest;
spinlock_t slot_phys_lock; spinlock_t slot_phys_lock;
unsigned short last_vcpu[NR_CPUS]; cpumask_t need_tlb_flush;
struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
struct kvmppc_linear_info *hpt_li; struct kvmppc_linear_info *hpt_li;
#endif /* CONFIG_KVM_BOOK3S_64_HV */ #endif /* CONFIG_KVM_BOOK3S_64_HV */
@ -274,6 +276,7 @@ struct kvmppc_vcore {
int nap_count; int nap_count;
int napping_threads; int napping_threads;
u16 pcpu; u16 pcpu;
u16 last_cpu;
u8 vcore_state; u8 vcore_state;
u8 in_guest; u8 in_guest;
struct list_head runnable_threads; struct list_head runnable_threads;
@ -403,13 +406,18 @@ struct kvm_vcpu_arch {
u32 host_mas4; u32 host_mas4;
u32 host_mas6; u32 host_mas6;
u32 shadow_epcr; u32 shadow_epcr;
u32 epcr;
u32 shadow_msrp; u32 shadow_msrp;
u32 eplc; u32 eplc;
u32 epsc; u32 epsc;
u32 oldpir; u32 oldpir;
#endif #endif
#if defined(CONFIG_BOOKE)
#if defined(CONFIG_KVM_BOOKE_HV) || defined(CONFIG_64BIT)
u32 epcr;
#endif
#endif
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
/* For Gekko paired singles */ /* For Gekko paired singles */
u32 qpr[32]; u32 qpr[32];
@ -522,7 +530,6 @@ struct kvm_vcpu_arch {
u64 dec_jiffies; u64 dec_jiffies;
u64 dec_expires; u64 dec_expires;
unsigned long pending_exceptions; unsigned long pending_exceptions;
u16 last_cpu;
u8 ceded; u8 ceded;
u8 prodded; u8 prodded;
u32 last_inst; u32 last_inst;

Просмотреть файл

@ -164,6 +164,8 @@ extern void kvmppc_bookehv_exit(void);
extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu); extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
/* /*
* Cuts out inst bits with ordering according to spec. * Cuts out inst bits with ordering according to spec.
* That means the leftmost bit is zero. All given bits are included. * That means the leftmost bit is zero. All given bits are included.
@ -293,4 +295,25 @@ static inline void kvmppc_lazy_ee_enable(void)
#endif #endif
} }
static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
{
ulong ea;
ulong msr_64bit = 0;
ea = kvmppc_get_gpr(vcpu, rb);
if (ra)
ea += kvmppc_get_gpr(vcpu, ra);
#if defined(CONFIG_PPC_BOOK3E_64)
msr_64bit = MSR_CM;
#elif defined(CONFIG_PPC_BOOK3S_64)
msr_64bit = MSR_SF;
#endif
if (!(vcpu->arch.shared->msr & msr_64bit))
ea = (uint32_t)ea;
return ea;
}
#endif /* __POWERPC_KVM_PPC_H__ */ #endif /* __POWERPC_KVM_PPC_H__ */

Просмотреть файл

@ -59,7 +59,7 @@
#define MAS1_TSIZE_SHIFT 7 #define MAS1_TSIZE_SHIFT 7
#define MAS1_TSIZE(x) (((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK) #define MAS1_TSIZE(x) (((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK)
#define MAS2_EPN 0xFFFFF000 #define MAS2_EPN (~0xFFFUL)
#define MAS2_X0 0x00000040 #define MAS2_X0 0x00000040
#define MAS2_X1 0x00000020 #define MAS2_X1 0x00000020
#define MAS2_W 0x00000010 #define MAS2_W 0x00000010

Просмотреть файл

@ -121,6 +121,16 @@ extern char initial_stab[];
#define PP_RXRX 3 /* Supervisor read, User read */ #define PP_RXRX 3 /* Supervisor read, User read */
#define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */ #define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */
/* Fields for tlbiel instruction in architecture 2.06 */
#define TLBIEL_INVAL_SEL_MASK 0xc00 /* invalidation selector */
#define TLBIEL_INVAL_PAGE 0x000 /* invalidate a single page */
#define TLBIEL_INVAL_SET_LPID 0x800 /* invalidate a set for current LPID */
#define TLBIEL_INVAL_SET 0xc00 /* invalidate a set for all LPIDs */
#define TLBIEL_INVAL_SET_MASK 0xfff000 /* set number to inval. */
#define TLBIEL_INVAL_SET_SHIFT 12
#define POWER7_TLB_SETS 128 /* # sets in POWER7 TLB */
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
struct hash_pte { struct hash_pte {

Просмотреть файл

@ -518,6 +518,7 @@
#define SRR1_WS_DEEPER 0x00020000 /* Some resources not maintained */ #define SRR1_WS_DEEPER 0x00020000 /* Some resources not maintained */
#define SRR1_WS_DEEP 0x00010000 /* All resources maintained */ #define SRR1_WS_DEEP 0x00010000 /* All resources maintained */
#define SRR1_PROGFPE 0x00100000 /* Floating Point Enabled */ #define SRR1_PROGFPE 0x00100000 /* Floating Point Enabled */
#define SRR1_PROGILL 0x00080000 /* Illegal instruction */
#define SRR1_PROGPRIV 0x00040000 /* Privileged instruction */ #define SRR1_PROGPRIV 0x00040000 /* Privileged instruction */
#define SRR1_PROGTRAP 0x00020000 /* Trap */ #define SRR1_PROGTRAP 0x00020000 /* Trap */
#define SRR1_PROGADDR 0x00010000 /* SRR0 contains subsequent addr */ #define SRR1_PROGADDR 0x00010000 /* SRR0 contains subsequent addr */

Просмотреть файл

@ -331,6 +331,31 @@ struct kvm_book3e_206_tlb_params {
__u32 reserved[8]; __u32 reserved[8];
}; };
/* For KVM_PPC_GET_HTAB_FD */
struct kvm_get_htab_fd {
__u64 flags;
__u64 start_index;
__u64 reserved[2];
};
/* Values for kvm_get_htab_fd.flags */
#define KVM_GET_HTAB_BOLTED_ONLY ((__u64)0x1)
#define KVM_GET_HTAB_WRITE ((__u64)0x2)
/*
* Data read on the file descriptor is formatted as a series of
* records, each consisting of a header followed by a series of
* `n_valid' HPTEs (16 bytes each), which are all valid. Following
* those valid HPTEs there are `n_invalid' invalid HPTEs, which
* are not represented explicitly in the stream. The same format
* is used for writing.
*/
struct kvm_get_htab_header {
__u32 index;
__u16 n_valid;
__u16 n_invalid;
};
#define KVM_REG_PPC_HIOR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x1) #define KVM_REG_PPC_HIOR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x1)
#define KVM_REG_PPC_IAC1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x2) #define KVM_REG_PPC_IAC1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x2)
#define KVM_REG_PPC_IAC2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x3) #define KVM_REG_PPC_IAC2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x3)
@ -386,4 +411,6 @@ struct kvm_book3e_206_tlb_params {
#define KVM_REG_PPC_VPA_SLB (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x83) #define KVM_REG_PPC_VPA_SLB (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x83)
#define KVM_REG_PPC_VPA_DTL (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x84) #define KVM_REG_PPC_VPA_DTL (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x84)
#define KVM_REG_PPC_EPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x85)
#endif /* __LINUX_KVM_POWERPC_H */ #endif /* __LINUX_KVM_POWERPC_H */

Просмотреть файл

@ -441,8 +441,7 @@ int main(void)
DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr)); DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1)); DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1));
DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock)); DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock));
DEFINE(KVM_ONLINE_CPUS, offsetof(struct kvm, online_vcpus.counter)); DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits));
DEFINE(KVM_LAST_VCPU, offsetof(struct kvm, arch.last_vcpu));
DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr)); DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr));
DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor)); DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor));
DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v)); DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v));
@ -470,7 +469,6 @@ int main(void)
DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max)); DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
DEFINE(VCPU_LAST_CPU, offsetof(struct kvm_vcpu, arch.last_cpu));
DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr)); DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr));
DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar)); DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar));
DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));

Просмотреть файл

@ -20,6 +20,7 @@ config KVM
bool bool
select PREEMPT_NOTIFIERS select PREEMPT_NOTIFIERS
select ANON_INODES select ANON_INODES
select HAVE_KVM_EVENTFD
config KVM_BOOK3S_HANDLER config KVM_BOOK3S_HANDLER
bool bool

Просмотреть файл

@ -6,7 +6,8 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
ccflags-y := -Ivirt/kvm -Iarch/powerpc/kvm ccflags-y := -Ivirt/kvm -Iarch/powerpc/kvm
common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o \
eventfd.o)
CFLAGS_44x_tlb.o := -I. CFLAGS_44x_tlb.o := -I.
CFLAGS_e500_tlb.o := -I. CFLAGS_e500_tlb.o := -I.
@ -72,10 +73,12 @@ kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
book3s_hv_rmhandlers.o \ book3s_hv_rmhandlers.o \
book3s_hv_rm_mmu.o \ book3s_hv_rm_mmu.o \
book3s_64_vio_hv.o \ book3s_64_vio_hv.o \
book3s_hv_ras.o \
book3s_hv_builtin.o book3s_hv_builtin.o
kvm-book3s_64-module-objs := \ kvm-book3s_64-module-objs := \
../../../virt/kvm/kvm_main.o \ ../../../virt/kvm/kvm_main.o \
../../../virt/kvm/eventfd.o \
powerpc.o \ powerpc.o \
emulate.o \ emulate.o \
book3s.o \ book3s.o \

Просмотреть файл

@ -25,6 +25,8 @@
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/srcu.h> #include <linux/srcu.h>
#include <linux/anon_inodes.h>
#include <linux/file.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/kvm_ppc.h> #include <asm/kvm_ppc.h>
@ -41,6 +43,11 @@
/* Power architecture requires HPT is at least 256kB */ /* Power architecture requires HPT is at least 256kB */
#define PPC_MIN_HPT_ORDER 18 #define PPC_MIN_HPT_ORDER 18
static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
long pte_index, unsigned long pteh,
unsigned long ptel, unsigned long *pte_idx_ret);
static void kvmppc_rmap_reset(struct kvm *kvm);
long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
{ {
unsigned long hpt; unsigned long hpt;
@ -138,10 +145,11 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp)
/* Set the entire HPT to 0, i.e. invalid HPTEs */ /* Set the entire HPT to 0, i.e. invalid HPTEs */
memset((void *)kvm->arch.hpt_virt, 0, 1ul << order); memset((void *)kvm->arch.hpt_virt, 0, 1ul << order);
/* /*
* Set the whole last_vcpu array to an invalid vcpu number. * Reset all the reverse-mapping chains for all memslots
* This ensures that each vcpu will flush its TLB on next entry.
*/ */
memset(kvm->arch.last_vcpu, 0xff, sizeof(kvm->arch.last_vcpu)); kvmppc_rmap_reset(kvm);
/* Ensure that each vcpu will flush its TLB on next entry. */
cpumask_setall(&kvm->arch.need_tlb_flush);
*htab_orderp = order; *htab_orderp = order;
err = 0; err = 0;
} else { } else {
@ -185,6 +193,7 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
unsigned long addr, hash; unsigned long addr, hash;
unsigned long psize; unsigned long psize;
unsigned long hp0, hp1; unsigned long hp0, hp1;
unsigned long idx_ret;
long ret; long ret;
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
@ -216,7 +225,8 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
hash = (hash << 3) + 7; hash = (hash << 3) + 7;
hp_v = hp0 | ((addr >> 16) & ~0x7fUL); hp_v = hp0 | ((addr >> 16) & ~0x7fUL);
hp_r = hp1 | addr; hp_r = hp1 | addr;
ret = kvmppc_virtmode_h_enter(vcpu, H_EXACT, hash, hp_v, hp_r); ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, hash, hp_v, hp_r,
&idx_ret);
if (ret != H_SUCCESS) { if (ret != H_SUCCESS) {
pr_err("KVM: map_vrma at %lx failed, ret=%ld\n", pr_err("KVM: map_vrma at %lx failed, ret=%ld\n",
addr, ret); addr, ret);
@ -354,15 +364,10 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
return err; return err;
} }
/* long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
* We come here on a H_ENTER call from the guest when we are not long pte_index, unsigned long pteh,
* using mmu notifiers and we don't have the requested page pinned unsigned long ptel, unsigned long *pte_idx_ret)
* already.
*/
long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
long pte_index, unsigned long pteh, unsigned long ptel)
{ {
struct kvm *kvm = vcpu->kvm;
unsigned long psize, gpa, gfn; unsigned long psize, gpa, gfn;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
long ret; long ret;
@ -390,8 +395,8 @@ long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
do_insert: do_insert:
/* Protect linux PTE lookup from page table destruction */ /* Protect linux PTE lookup from page table destruction */
rcu_read_lock_sched(); /* this disables preemption too */ rcu_read_lock_sched(); /* this disables preemption too */
vcpu->arch.pgdir = current->mm->pgd; ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel,
ret = kvmppc_h_enter(vcpu, flags, pte_index, pteh, ptel); current->mm->pgd, false, pte_idx_ret);
rcu_read_unlock_sched(); rcu_read_unlock_sched();
if (ret == H_TOO_HARD) { if (ret == H_TOO_HARD) {
/* this can't happen */ /* this can't happen */
@ -402,6 +407,19 @@ long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
} }
/*
* We come here on a H_ENTER call from the guest when we are not
* using mmu notifiers and we don't have the requested page pinned
* already.
*/
long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
long pte_index, unsigned long pteh,
unsigned long ptel)
{
return kvmppc_virtmode_do_h_enter(vcpu->kvm, flags, pte_index,
pteh, ptel, &vcpu->arch.gpr[4]);
}
static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu, static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu,
gva_t eaddr) gva_t eaddr)
{ {
@ -756,6 +774,25 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
goto out_put; goto out_put;
} }
static void kvmppc_rmap_reset(struct kvm *kvm)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
int srcu_idx;
srcu_idx = srcu_read_lock(&kvm->srcu);
slots = kvm->memslots;
kvm_for_each_memslot(memslot, slots) {
/*
* This assumes it is acceptable to lose reference and
* change bits across a reset.
*/
memset(memslot->arch.rmap, 0,
memslot->npages * sizeof(*memslot->arch.rmap));
}
srcu_read_unlock(&kvm->srcu, srcu_idx);
}
static int kvm_handle_hva_range(struct kvm *kvm, static int kvm_handle_hva_range(struct kvm *kvm,
unsigned long start, unsigned long start,
unsigned long end, unsigned long end,
@ -1131,6 +1168,348 @@ void kvmppc_unpin_guest_page(struct kvm *kvm, void *va)
put_page(page); put_page(page);
} }
/*
* Functions for reading and writing the hash table via reads and
* writes on a file descriptor.
*
* Reads return the guest view of the hash table, which has to be
* pieced together from the real hash table and the guest_rpte
* values in the revmap array.
*
* On writes, each HPTE written is considered in turn, and if it
* is valid, it is written to the HPT as if an H_ENTER with the
* exact flag set was done. When the invalid count is non-zero
* in the header written to the stream, the kernel will make
* sure that that many HPTEs are invalid, and invalidate them
* if not.
*/
struct kvm_htab_ctx {
unsigned long index;
unsigned long flags;
struct kvm *kvm;
int first_pass;
};
#define HPTE_SIZE (2 * sizeof(unsigned long))
static long record_hpte(unsigned long flags, unsigned long *hptp,
unsigned long *hpte, struct revmap_entry *revp,
int want_valid, int first_pass)
{
unsigned long v, r;
int ok = 1;
int valid, dirty;
/* Unmodified entries are uninteresting except on the first pass */
dirty = !!(revp->guest_rpte & HPTE_GR_MODIFIED);
if (!first_pass && !dirty)
return 0;
valid = 0;
if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) {
valid = 1;
if ((flags & KVM_GET_HTAB_BOLTED_ONLY) &&
!(hptp[0] & HPTE_V_BOLTED))
valid = 0;
}
if (valid != want_valid)
return 0;
v = r = 0;
if (valid || dirty) {
/* lock the HPTE so it's stable and read it */
preempt_disable();
while (!try_lock_hpte(hptp, HPTE_V_HVLOCK))
cpu_relax();
v = hptp[0];
if (v & HPTE_V_ABSENT) {
v &= ~HPTE_V_ABSENT;
v |= HPTE_V_VALID;
}
/* re-evaluate valid and dirty from synchronized HPTE value */
valid = !!(v & HPTE_V_VALID);
if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && !(v & HPTE_V_BOLTED))
valid = 0;
r = revp->guest_rpte | (hptp[1] & (HPTE_R_R | HPTE_R_C));
dirty = !!(revp->guest_rpte & HPTE_GR_MODIFIED);
/* only clear modified if this is the right sort of entry */
if (valid == want_valid && dirty) {
r &= ~HPTE_GR_MODIFIED;
revp->guest_rpte = r;
}
asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
hptp[0] &= ~HPTE_V_HVLOCK;
preempt_enable();
if (!(valid == want_valid && (first_pass || dirty)))
ok = 0;
}
hpte[0] = v;
hpte[1] = r;
return ok;
}
static ssize_t kvm_htab_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct kvm_htab_ctx *ctx = file->private_data;
struct kvm *kvm = ctx->kvm;
struct kvm_get_htab_header hdr;
unsigned long *hptp;
struct revmap_entry *revp;
unsigned long i, nb, nw;
unsigned long __user *lbuf;
struct kvm_get_htab_header __user *hptr;
unsigned long flags;
int first_pass;
unsigned long hpte[2];
if (!access_ok(VERIFY_WRITE, buf, count))
return -EFAULT;
first_pass = ctx->first_pass;
flags = ctx->flags;
i = ctx->index;
hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
revp = kvm->arch.revmap + i;
lbuf = (unsigned long __user *)buf;
nb = 0;
while (nb + sizeof(hdr) + HPTE_SIZE < count) {
/* Initialize header */
hptr = (struct kvm_get_htab_header __user *)buf;
hdr.n_valid = 0;
hdr.n_invalid = 0;
nw = nb;
nb += sizeof(hdr);
lbuf = (unsigned long __user *)(buf + sizeof(hdr));
/* Skip uninteresting entries, i.e. clean on not-first pass */
if (!first_pass) {
while (i < kvm->arch.hpt_npte &&
!(revp->guest_rpte & HPTE_GR_MODIFIED)) {
++i;
hptp += 2;
++revp;
}
}
hdr.index = i;
/* Grab a series of valid entries */
while (i < kvm->arch.hpt_npte &&
hdr.n_valid < 0xffff &&
nb + HPTE_SIZE < count &&
record_hpte(flags, hptp, hpte, revp, 1, first_pass)) {
/* valid entry, write it out */
++hdr.n_valid;
if (__put_user(hpte[0], lbuf) ||
__put_user(hpte[1], lbuf + 1))
return -EFAULT;
nb += HPTE_SIZE;
lbuf += 2;
++i;
hptp += 2;
++revp;
}
/* Now skip invalid entries while we can */
while (i < kvm->arch.hpt_npte &&
hdr.n_invalid < 0xffff &&
record_hpte(flags, hptp, hpte, revp, 0, first_pass)) {
/* found an invalid entry */
++hdr.n_invalid;
++i;
hptp += 2;
++revp;
}
if (hdr.n_valid || hdr.n_invalid) {
/* write back the header */
if (__copy_to_user(hptr, &hdr, sizeof(hdr)))
return -EFAULT;
nw = nb;
buf = (char __user *)lbuf;
} else {
nb = nw;
}
/* Check if we've wrapped around the hash table */
if (i >= kvm->arch.hpt_npte) {
i = 0;
ctx->first_pass = 0;
break;
}
}
ctx->index = i;
return nb;
}
static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct kvm_htab_ctx *ctx = file->private_data;
struct kvm *kvm = ctx->kvm;
struct kvm_get_htab_header hdr;
unsigned long i, j;
unsigned long v, r;
unsigned long __user *lbuf;
unsigned long *hptp;
unsigned long tmp[2];
ssize_t nb;
long int err, ret;
int rma_setup;
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT;
/* lock out vcpus from running while we're doing this */
mutex_lock(&kvm->lock);
rma_setup = kvm->arch.rma_setup_done;
if (rma_setup) {
kvm->arch.rma_setup_done = 0; /* temporarily */
/* order rma_setup_done vs. vcpus_running */
smp_mb();
if (atomic_read(&kvm->arch.vcpus_running)) {
kvm->arch.rma_setup_done = 1;
mutex_unlock(&kvm->lock);
return -EBUSY;
}
}
err = 0;
for (nb = 0; nb + sizeof(hdr) <= count; ) {
err = -EFAULT;
if (__copy_from_user(&hdr, buf, sizeof(hdr)))
break;
err = 0;
if (nb + hdr.n_valid * HPTE_SIZE > count)
break;
nb += sizeof(hdr);
buf += sizeof(hdr);
err = -EINVAL;
i = hdr.index;
if (i >= kvm->arch.hpt_npte ||
i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt_npte)
break;
hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
lbuf = (unsigned long __user *)buf;
for (j = 0; j < hdr.n_valid; ++j) {
err = -EFAULT;
if (__get_user(v, lbuf) || __get_user(r, lbuf + 1))
goto out;
err = -EINVAL;
if (!(v & HPTE_V_VALID))
goto out;
lbuf += 2;
nb += HPTE_SIZE;
if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT))
kvmppc_do_h_remove(kvm, 0, i, 0, tmp);
err = -EIO;
ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r,
tmp);
if (ret != H_SUCCESS) {
pr_err("kvm_htab_write ret %ld i=%ld v=%lx "
"r=%lx\n", ret, i, v, r);
goto out;
}
if (!rma_setup && is_vrma_hpte(v)) {
unsigned long psize = hpte_page_size(v, r);
unsigned long senc = slb_pgsize_encoding(psize);
unsigned long lpcr;
kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
(VRMA_VSID << SLB_VSID_SHIFT_1T);
lpcr = kvm->arch.lpcr & ~LPCR_VRMASD;
lpcr |= senc << (LPCR_VRMASD_SH - 4);
kvm->arch.lpcr = lpcr;
rma_setup = 1;
}
++i;
hptp += 2;
}
for (j = 0; j < hdr.n_invalid; ++j) {
if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT))
kvmppc_do_h_remove(kvm, 0, i, 0, tmp);
++i;
hptp += 2;
}
err = 0;
}
out:
/* Order HPTE updates vs. rma_setup_done */
smp_wmb();
kvm->arch.rma_setup_done = rma_setup;
mutex_unlock(&kvm->lock);
if (err)
return err;
return nb;
}
static int kvm_htab_release(struct inode *inode, struct file *filp)
{
struct kvm_htab_ctx *ctx = filp->private_data;
filp->private_data = NULL;
if (!(ctx->flags & KVM_GET_HTAB_WRITE))
atomic_dec(&ctx->kvm->arch.hpte_mod_interest);
kvm_put_kvm(ctx->kvm);
kfree(ctx);
return 0;
}
static struct file_operations kvm_htab_fops = {
.read = kvm_htab_read,
.write = kvm_htab_write,
.llseek = default_llseek,
.release = kvm_htab_release,
};
int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf)
{
int ret;
struct kvm_htab_ctx *ctx;
int rwflag;
/* reject flags we don't recognize */
if (ghf->flags & ~(KVM_GET_HTAB_BOLTED_ONLY | KVM_GET_HTAB_WRITE))
return -EINVAL;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
kvm_get_kvm(kvm);
ctx->kvm = kvm;
ctx->index = ghf->start_index;
ctx->flags = ghf->flags;
ctx->first_pass = 1;
rwflag = (ghf->flags & KVM_GET_HTAB_WRITE) ? O_WRONLY : O_RDONLY;
ret = anon_inode_getfd("kvm-htab", &kvm_htab_fops, ctx, rwflag);
if (ret < 0) {
kvm_put_kvm(kvm);
return ret;
}
if (rwflag == O_RDONLY) {
mutex_lock(&kvm->slots_lock);
atomic_inc(&kvm->arch.hpte_mod_interest);
/* make sure kvmppc_do_h_enter etc. see the increment */
synchronize_srcu_expedited(&kvm->srcu);
mutex_unlock(&kvm->slots_lock);
}
return ret;
}
void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu) void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_mmu *mmu = &vcpu->arch.mmu; struct kvmppc_mmu *mmu = &vcpu->arch.mmu;

Просмотреть файл

@ -22,6 +22,7 @@
#include <asm/kvm_book3s.h> #include <asm/kvm_book3s.h>
#include <asm/reg.h> #include <asm/reg.h>
#include <asm/switch_to.h> #include <asm/switch_to.h>
#include <asm/time.h>
#define OP_19_XOP_RFID 18 #define OP_19_XOP_RFID 18
#define OP_19_XOP_RFI 50 #define OP_19_XOP_RFI 50
@ -395,6 +396,12 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
(mfmsr() & MSR_HV)) (mfmsr() & MSR_HV))
vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
break; break;
case SPRN_PURR:
to_book3s(vcpu)->purr_offset = spr_val - get_tb();
break;
case SPRN_SPURR:
to_book3s(vcpu)->spurr_offset = spr_val - get_tb();
break;
case SPRN_GQR0: case SPRN_GQR0:
case SPRN_GQR1: case SPRN_GQR1:
case SPRN_GQR2: case SPRN_GQR2:
@ -412,6 +419,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
case SPRN_CTRLF: case SPRN_CTRLF:
case SPRN_CTRLT: case SPRN_CTRLT:
case SPRN_L2CR: case SPRN_L2CR:
case SPRN_DSCR:
case SPRN_MMCR0_GEKKO: case SPRN_MMCR0_GEKKO:
case SPRN_MMCR1_GEKKO: case SPRN_MMCR1_GEKKO:
case SPRN_PMC1_GEKKO: case SPRN_PMC1_GEKKO:
@ -483,9 +491,15 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
*spr_val = to_book3s(vcpu)->hid[5]; *spr_val = to_book3s(vcpu)->hid[5];
break; break;
case SPRN_CFAR: case SPRN_CFAR:
case SPRN_PURR: case SPRN_DSCR:
*spr_val = 0; *spr_val = 0;
break; break;
case SPRN_PURR:
*spr_val = get_tb() + to_book3s(vcpu)->purr_offset;
break;
case SPRN_SPURR:
*spr_val = get_tb() + to_book3s(vcpu)->purr_offset;
break;
case SPRN_GQR0: case SPRN_GQR0:
case SPRN_GQR1: case SPRN_GQR1:
case SPRN_GQR2: case SPRN_GQR2:

Просмотреть файл

@ -28,8 +28,5 @@ EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu);
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec); EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec);
#endif #endif
#ifdef CONFIG_VSX
EXPORT_SYMBOL_GPL(kvmppc_load_up_vsx);
#endif
#endif #endif

Просмотреть файл

@ -545,6 +545,17 @@ static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
case BOOK3S_INTERRUPT_PERFMON: case BOOK3S_INTERRUPT_PERFMON:
r = RESUME_GUEST; r = RESUME_GUEST;
break; break;
case BOOK3S_INTERRUPT_MACHINE_CHECK:
/*
* Deliver a machine check interrupt to the guest.
* We have to do this, even if the host has handled the
* machine check, because machine checks use SRR0/1 and
* the interrupt might have trashed guest state in them.
*/
kvmppc_book3s_queue_irqprio(vcpu,
BOOK3S_INTERRUPT_MACHINE_CHECK);
r = RESUME_GUEST;
break;
case BOOK3S_INTERRUPT_PROGRAM: case BOOK3S_INTERRUPT_PROGRAM:
{ {
ulong flags; ulong flags;
@ -853,7 +864,6 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
goto free_vcpu; goto free_vcpu;
vcpu->arch.shared = &vcpu->arch.shregs; vcpu->arch.shared = &vcpu->arch.shregs;
vcpu->arch.last_cpu = -1;
vcpu->arch.mmcr[0] = MMCR0_FC; vcpu->arch.mmcr[0] = MMCR0_FC;
vcpu->arch.ctrl = CTRL_RUNLATCH; vcpu->arch.ctrl = CTRL_RUNLATCH;
/* default to host PVR, since we can't spoof it */ /* default to host PVR, since we can't spoof it */
@ -880,6 +890,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
vcore->preempt_tb = TB_NIL; vcore->preempt_tb = TB_NIL;
} }
kvm->arch.vcores[core] = vcore; kvm->arch.vcores[core] = vcore;
kvm->arch.online_vcores++;
} }
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
@ -1563,18 +1574,6 @@ out:
return r; return r;
} }
static unsigned long slb_pgsize_encoding(unsigned long psize)
{
unsigned long senc = 0;
if (psize > 0x1000) {
senc = SLB_VSID_L;
if (psize == 0x10000)
senc |= SLB_VSID_LP_01;
}
return senc;
}
static void unpin_slot(struct kvm_memory_slot *memslot) static void unpin_slot(struct kvm_memory_slot *memslot)
{ {
unsigned long *physp; unsigned long *physp;
@ -1814,6 +1813,13 @@ int kvmppc_core_init_vm(struct kvm *kvm)
return -ENOMEM; return -ENOMEM;
kvm->arch.lpid = lpid; kvm->arch.lpid = lpid;
/*
* Since we don't flush the TLB when tearing down a VM,
* and this lpid might have previously been used,
* make sure we flush on each core before running the new VM.
*/
cpumask_setall(&kvm->arch.need_tlb_flush);
INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
kvm->arch.rma = NULL; kvm->arch.rma = NULL;

Просмотреть файл

@ -0,0 +1,144 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* Copyright 2012 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*/
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/kernel.h>
#include <asm/opal.h>
/* SRR1 bits for machine check on POWER7 */
#define SRR1_MC_LDSTERR (1ul << (63-42))
#define SRR1_MC_IFETCH_SH (63-45)
#define SRR1_MC_IFETCH_MASK 0x7
#define SRR1_MC_IFETCH_SLBPAR 2 /* SLB parity error */
#define SRR1_MC_IFETCH_SLBMULTI 3 /* SLB multi-hit */
#define SRR1_MC_IFETCH_SLBPARMULTI 4 /* SLB parity + multi-hit */
#define SRR1_MC_IFETCH_TLBMULTI 5 /* I-TLB multi-hit */
/* DSISR bits for machine check on POWER7 */
#define DSISR_MC_DERAT_MULTI 0x800 /* D-ERAT multi-hit */
#define DSISR_MC_TLB_MULTI 0x400 /* D-TLB multi-hit */
#define DSISR_MC_SLB_PARITY 0x100 /* SLB parity error */
#define DSISR_MC_SLB_MULTI 0x080 /* SLB multi-hit */
#define DSISR_MC_SLB_PARMULTI 0x040 /* SLB parity + multi-hit */
/* POWER7 SLB flush and reload */
static void reload_slb(struct kvm_vcpu *vcpu)
{
struct slb_shadow *slb;
unsigned long i, n;
/* First clear out SLB */
asm volatile("slbmte %0,%0; slbia" : : "r" (0));
/* Do they have an SLB shadow buffer registered? */
slb = vcpu->arch.slb_shadow.pinned_addr;
if (!slb)
return;
/* Sanity check */
n = min_t(u32, slb->persistent, SLB_MIN_SIZE);
if ((void *) &slb->save_area[n] > vcpu->arch.slb_shadow.pinned_end)
return;
/* Load up the SLB from that */
for (i = 0; i < n; ++i) {
unsigned long rb = slb->save_area[i].esid;
unsigned long rs = slb->save_area[i].vsid;
rb = (rb & ~0xFFFul) | i; /* insert entry number */
asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));
}
}
/* POWER7 TLB flush */
static void flush_tlb_power7(struct kvm_vcpu *vcpu)
{
unsigned long i, rb;
rb = TLBIEL_INVAL_SET_LPID;
for (i = 0; i < POWER7_TLB_SETS; ++i) {
asm volatile("tlbiel %0" : : "r" (rb));
rb += 1 << TLBIEL_INVAL_SET_SHIFT;
}
}
/*
* On POWER7, see if we can handle a machine check that occurred inside
* the guest in real mode, without switching to the host partition.
*
* Returns: 0 => exit guest, 1 => deliver machine check to guest
*/
static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
{
unsigned long srr1 = vcpu->arch.shregs.msr;
struct opal_machine_check_event *opal_evt;
long handled = 1;
if (srr1 & SRR1_MC_LDSTERR) {
/* error on load/store */
unsigned long dsisr = vcpu->arch.shregs.dsisr;
if (dsisr & (DSISR_MC_SLB_PARMULTI | DSISR_MC_SLB_MULTI |
DSISR_MC_SLB_PARITY | DSISR_MC_DERAT_MULTI)) {
/* flush and reload SLB; flushes D-ERAT too */
reload_slb(vcpu);
dsisr &= ~(DSISR_MC_SLB_PARMULTI | DSISR_MC_SLB_MULTI |
DSISR_MC_SLB_PARITY | DSISR_MC_DERAT_MULTI);
}
if (dsisr & DSISR_MC_TLB_MULTI) {
flush_tlb_power7(vcpu);
dsisr &= ~DSISR_MC_TLB_MULTI;
}
/* Any other errors we don't understand? */
if (dsisr & 0xffffffffUL)
handled = 0;
}
switch ((srr1 >> SRR1_MC_IFETCH_SH) & SRR1_MC_IFETCH_MASK) {
case 0:
break;
case SRR1_MC_IFETCH_SLBPAR:
case SRR1_MC_IFETCH_SLBMULTI:
case SRR1_MC_IFETCH_SLBPARMULTI:
reload_slb(vcpu);
break;
case SRR1_MC_IFETCH_TLBMULTI:
flush_tlb_power7(vcpu);
break;
default:
handled = 0;
}
/*
* See if OPAL has already handled the condition.
* We assume that if the condition is recovered then OPAL
* will have generated an error log event that we will pick
* up and log later.
*/
opal_evt = local_paca->opal_mc_evt;
if (opal_evt->version == OpalMCE_V1 &&
(opal_evt->severity == OpalMCE_SEV_NO_ERROR ||
opal_evt->disposition == OpalMCE_DISPOSITION_RECOVERED))
handled = 1;
if (handled)
opal_evt->in_use = 0;
return handled;
}
long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu)
{
if (cpu_has_feature(CPU_FTR_ARCH_206))
return kvmppc_realmode_mc_power7(vcpu);
return 0;
}

Просмотреть файл

@ -35,6 +35,37 @@ static void *real_vmalloc_addr(void *x)
return __va(addr); return __va(addr);
} }
/* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
static int global_invalidates(struct kvm *kvm, unsigned long flags)
{
int global;
/*
* If there is only one vcore, and it's currently running,
* we can use tlbiel as long as we mark all other physical
* cores as potentially having stale TLB entries for this lpid.
* If we're not using MMU notifiers, we never take pages away
* from the guest, so we can use tlbiel if requested.
* Otherwise, don't use tlbiel.
*/
if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcore)
global = 0;
else if (kvm->arch.using_mmu_notifiers)
global = 1;
else
global = !(flags & H_LOCAL);
if (!global) {
/* any other core might now have stale TLB entries... */
smp_wmb();
cpumask_setall(&kvm->arch.need_tlb_flush);
cpumask_clear_cpu(local_paca->kvm_hstate.kvm_vcore->pcpu,
&kvm->arch.need_tlb_flush);
}
return global;
}
/* /*
* Add this HPTE into the chain for the real page. * Add this HPTE into the chain for the real page.
* Must be called with the chain locked; it unlocks the chain. * Must be called with the chain locked; it unlocks the chain.
@ -59,13 +90,24 @@ void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
head->back = pte_index; head->back = pte_index;
} else { } else {
rev->forw = rev->back = pte_index; rev->forw = rev->back = pte_index;
i = pte_index; *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) |
pte_index | KVMPPC_RMAP_PRESENT;
} }
smp_wmb(); unlock_rmap(rmap);
*rmap = i | KVMPPC_RMAP_REFERENCED | KVMPPC_RMAP_PRESENT; /* unlock */
} }
EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain); EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
/*
* Note modification of an HPTE; set the HPTE modified bit
* if anyone is interested.
*/
static inline void note_hpte_modification(struct kvm *kvm,
struct revmap_entry *rev)
{
if (atomic_read(&kvm->arch.hpte_mod_interest))
rev->guest_rpte |= HPTE_GR_MODIFIED;
}
/* Remove this HPTE from the chain for a real page */ /* Remove this HPTE from the chain for a real page */
static void remove_revmap_chain(struct kvm *kvm, long pte_index, static void remove_revmap_chain(struct kvm *kvm, long pte_index,
struct revmap_entry *rev, struct revmap_entry *rev,
@ -103,14 +145,14 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index,
unlock_rmap(rmap); unlock_rmap(rmap);
} }
static pte_t lookup_linux_pte(struct kvm_vcpu *vcpu, unsigned long hva, static pte_t lookup_linux_pte(pgd_t *pgdir, unsigned long hva,
int writing, unsigned long *pte_sizep) int writing, unsigned long *pte_sizep)
{ {
pte_t *ptep; pte_t *ptep;
unsigned long ps = *pte_sizep; unsigned long ps = *pte_sizep;
unsigned int shift; unsigned int shift;
ptep = find_linux_pte_or_hugepte(vcpu->arch.pgdir, hva, &shift); ptep = find_linux_pte_or_hugepte(pgdir, hva, &shift);
if (!ptep) if (!ptep)
return __pte(0); return __pte(0);
if (shift) if (shift)
@ -130,15 +172,15 @@ static inline void unlock_hpte(unsigned long *hpte, unsigned long hpte_v)
hpte[0] = hpte_v; hpte[0] = hpte_v;
} }
long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
long pte_index, unsigned long pteh, unsigned long ptel) long pte_index, unsigned long pteh, unsigned long ptel,
pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret)
{ {
struct kvm *kvm = vcpu->kvm;
unsigned long i, pa, gpa, gfn, psize; unsigned long i, pa, gpa, gfn, psize;
unsigned long slot_fn, hva; unsigned long slot_fn, hva;
unsigned long *hpte; unsigned long *hpte;
struct revmap_entry *rev; struct revmap_entry *rev;
unsigned long g_ptel = ptel; unsigned long g_ptel;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
unsigned long *physp, pte_size; unsigned long *physp, pte_size;
unsigned long is_io; unsigned long is_io;
@ -147,13 +189,14 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned int writing; unsigned int writing;
unsigned long mmu_seq; unsigned long mmu_seq;
unsigned long rcbits; unsigned long rcbits;
bool realmode = vcpu->arch.vcore->vcore_state == VCORE_RUNNING;
psize = hpte_page_size(pteh, ptel); psize = hpte_page_size(pteh, ptel);
if (!psize) if (!psize)
return H_PARAMETER; return H_PARAMETER;
writing = hpte_is_writable(ptel); writing = hpte_is_writable(ptel);
pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID); pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
ptel &= ~HPTE_GR_RESERVED;
g_ptel = ptel;
/* used later to detect if we might have been invalidated */ /* used later to detect if we might have been invalidated */
mmu_seq = kvm->mmu_notifier_seq; mmu_seq = kvm->mmu_notifier_seq;
@ -201,7 +244,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
/* Look up the Linux PTE for the backing page */ /* Look up the Linux PTE for the backing page */
pte_size = psize; pte_size = psize;
pte = lookup_linux_pte(vcpu, hva, writing, &pte_size); pte = lookup_linux_pte(pgdir, hva, writing, &pte_size);
if (pte_present(pte)) { if (pte_present(pte)) {
if (writing && !pte_write(pte)) if (writing && !pte_write(pte))
/* make the actual HPTE be read-only */ /* make the actual HPTE be read-only */
@ -210,6 +253,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
pa = pte_pfn(pte) << PAGE_SHIFT; pa = pte_pfn(pte) << PAGE_SHIFT;
} }
} }
if (pte_size < psize) if (pte_size < psize)
return H_PARAMETER; return H_PARAMETER;
if (pa && pte_size > psize) if (pa && pte_size > psize)
@ -287,8 +331,10 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
rev = &kvm->arch.revmap[pte_index]; rev = &kvm->arch.revmap[pte_index];
if (realmode) if (realmode)
rev = real_vmalloc_addr(rev); rev = real_vmalloc_addr(rev);
if (rev) if (rev) {
rev->guest_rpte = g_ptel; rev->guest_rpte = g_ptel;
note_hpte_modification(kvm, rev);
}
/* Link HPTE into reverse-map chain */ /* Link HPTE into reverse-map chain */
if (pteh & HPTE_V_VALID) { if (pteh & HPTE_V_VALID) {
@ -297,7 +343,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
lock_rmap(rmap); lock_rmap(rmap);
/* Check for pending invalidations under the rmap chain lock */ /* Check for pending invalidations under the rmap chain lock */
if (kvm->arch.using_mmu_notifiers && if (kvm->arch.using_mmu_notifiers &&
mmu_notifier_retry(vcpu->kvm, mmu_seq)) { mmu_notifier_retry(kvm, mmu_seq)) {
/* inval in progress, write a non-present HPTE */ /* inval in progress, write a non-present HPTE */
pteh |= HPTE_V_ABSENT; pteh |= HPTE_V_ABSENT;
pteh &= ~HPTE_V_VALID; pteh &= ~HPTE_V_VALID;
@ -318,10 +364,17 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
hpte[0] = pteh; hpte[0] = pteh;
asm volatile("ptesync" : : : "memory"); asm volatile("ptesync" : : : "memory");
vcpu->arch.gpr[4] = pte_index; *pte_idx_ret = pte_index;
return H_SUCCESS; return H_SUCCESS;
} }
EXPORT_SYMBOL_GPL(kvmppc_h_enter); EXPORT_SYMBOL_GPL(kvmppc_do_h_enter);
long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
long pte_index, unsigned long pteh, unsigned long ptel)
{
return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel,
vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]);
}
#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
@ -343,11 +396,10 @@ static inline int try_lock_tlbie(unsigned int *lock)
return old == 0; return old == 0;
} }
long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
unsigned long pte_index, unsigned long avpn, unsigned long pte_index, unsigned long avpn,
unsigned long va) unsigned long *hpret)
{ {
struct kvm *kvm = vcpu->kvm;
unsigned long *hpte; unsigned long *hpte;
unsigned long v, r, rb; unsigned long v, r, rb;
struct revmap_entry *rev; struct revmap_entry *rev;
@ -369,7 +421,7 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
if (v & HPTE_V_VALID) { if (v & HPTE_V_VALID) {
hpte[0] &= ~HPTE_V_VALID; hpte[0] &= ~HPTE_V_VALID;
rb = compute_tlbie_rb(v, hpte[1], pte_index); rb = compute_tlbie_rb(v, hpte[1], pte_index);
if (!(flags & H_LOCAL) && atomic_read(&kvm->online_vcpus) > 1) { if (global_invalidates(kvm, flags)) {
while (!try_lock_tlbie(&kvm->arch.tlbie_lock)) while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
cpu_relax(); cpu_relax();
asm volatile("ptesync" : : : "memory"); asm volatile("ptesync" : : : "memory");
@ -385,13 +437,22 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
/* Read PTE low word after tlbie to get final R/C values */ /* Read PTE low word after tlbie to get final R/C values */
remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]); remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]);
} }
r = rev->guest_rpte; r = rev->guest_rpte & ~HPTE_GR_RESERVED;
note_hpte_modification(kvm, rev);
unlock_hpte(hpte, 0); unlock_hpte(hpte, 0);
vcpu->arch.gpr[4] = v; hpret[0] = v;
vcpu->arch.gpr[5] = r; hpret[1] = r;
return H_SUCCESS; return H_SUCCESS;
} }
EXPORT_SYMBOL_GPL(kvmppc_do_h_remove);
long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index, unsigned long avpn)
{
return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
&vcpu->arch.gpr[4]);
}
long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
{ {
@ -459,6 +520,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
args[j] = ((0x80 | flags) << 56) + pte_index; args[j] = ((0x80 | flags) << 56) + pte_index;
rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
note_hpte_modification(kvm, rev);
if (!(hp[0] & HPTE_V_VALID)) { if (!(hp[0] & HPTE_V_VALID)) {
/* insert R and C bits from PTE */ /* insert R and C bits from PTE */
@ -534,8 +596,6 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
return H_NOT_FOUND; return H_NOT_FOUND;
} }
if (atomic_read(&kvm->online_vcpus) == 1)
flags |= H_LOCAL;
v = hpte[0]; v = hpte[0];
bits = (flags << 55) & HPTE_R_PP0; bits = (flags << 55) & HPTE_R_PP0;
bits |= (flags << 48) & HPTE_R_KEY_HI; bits |= (flags << 48) & HPTE_R_KEY_HI;
@ -548,6 +608,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
if (rev) { if (rev) {
r = (rev->guest_rpte & ~mask) | bits; r = (rev->guest_rpte & ~mask) | bits;
rev->guest_rpte = r; rev->guest_rpte = r;
note_hpte_modification(kvm, rev);
} }
r = (hpte[1] & ~mask) | bits; r = (hpte[1] & ~mask) | bits;
@ -555,7 +616,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
if (v & HPTE_V_VALID) { if (v & HPTE_V_VALID) {
rb = compute_tlbie_rb(v, r, pte_index); rb = compute_tlbie_rb(v, r, pte_index);
hpte[0] = v & ~HPTE_V_VALID; hpte[0] = v & ~HPTE_V_VALID;
if (!(flags & H_LOCAL)) { if (global_invalidates(kvm, flags)) {
while(!try_lock_tlbie(&kvm->arch.tlbie_lock)) while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
cpu_relax(); cpu_relax();
asm volatile("ptesync" : : : "memory"); asm volatile("ptesync" : : : "memory");
@ -568,6 +629,28 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
asm volatile("tlbiel %0" : : "r" (rb)); asm volatile("tlbiel %0" : : "r" (rb));
asm volatile("ptesync" : : : "memory"); asm volatile("ptesync" : : : "memory");
} }
/*
* If the host has this page as readonly but the guest
* wants to make it read/write, reduce the permissions.
* Checking the host permissions involves finding the
* memslot and then the Linux PTE for the page.
*/
if (hpte_is_writable(r) && kvm->arch.using_mmu_notifiers) {
unsigned long psize, gfn, hva;
struct kvm_memory_slot *memslot;
pgd_t *pgdir = vcpu->arch.pgdir;
pte_t pte;
psize = hpte_page_size(v, r);
gfn = ((r & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
if (memslot) {
hva = __gfn_to_hva_memslot(memslot, gfn);
pte = lookup_linux_pte(pgdir, hva, 1, &psize);
if (pte_present(pte) && !pte_write(pte))
r = hpte_make_readonly(r);
}
}
} }
hpte[1] = r; hpte[1] = r;
eieio(); eieio();
@ -599,8 +682,10 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
v &= ~HPTE_V_ABSENT; v &= ~HPTE_V_ABSENT;
v |= HPTE_V_VALID; v |= HPTE_V_VALID;
} }
if (v & HPTE_V_VALID) if (v & HPTE_V_VALID) {
r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C)); r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
r &= ~HPTE_GR_RESERVED;
}
vcpu->arch.gpr[4 + i * 2] = v; vcpu->arch.gpr[4 + i * 2] = v;
vcpu->arch.gpr[5 + i * 2] = r; vcpu->arch.gpr[5 + i * 2] = r;
} }

Просмотреть файл

@ -27,6 +27,7 @@
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/exception-64s.h> #include <asm/exception-64s.h>
#include <asm/kvm_book3s_asm.h> #include <asm/kvm_book3s_asm.h>
#include <asm/mmu-hash64.h>
/***************************************************************************** /*****************************************************************************
* * * *
@ -313,7 +314,33 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
mtspr SPRN_SDR1,r6 /* switch to partition page table */ mtspr SPRN_SDR1,r6 /* switch to partition page table */
mtspr SPRN_LPID,r7 mtspr SPRN_LPID,r7
isync isync
/* See if we need to flush the TLB */
lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
clrldi r7,r6,64-6 /* extract bit number (6 bits) */
srdi r6,r6,6 /* doubleword number */
sldi r6,r6,3 /* address offset */
add r6,r6,r9
addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
li r0,1 li r0,1
sld r0,r0,r7
ld r7,0(r6)
and. r7,r7,r0
beq 22f
23: ldarx r7,0,r6 /* if set, clear the bit */
andc r7,r7,r0
stdcx. r7,0,r6
bne 23b
li r6,128 /* and flush the TLB */
mtctr r6
li r7,0x800 /* IS field = 0b10 */
ptesync
28: tlbiel r7
addi r7,r7,0x1000
bdnz 28b
ptesync
22: li r0,1
stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
b 10f b 10f
@ -336,36 +363,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
mr r9,r4 mr r9,r4
blt hdec_soon blt hdec_soon
/*
* Invalidate the TLB if we could possibly have stale TLB
* entries for this partition on this core due to the use
* of tlbiel.
* XXX maybe only need this on primary thread?
*/
ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
lwz r5,VCPU_VCPUID(r4)
lhz r6,PACAPACAINDEX(r13)
rldimi r6,r5,0,62 /* XXX map as if threads 1:1 p:v */
lhz r8,VCPU_LAST_CPU(r4)
sldi r7,r6,1 /* see if this is the same vcpu */
add r7,r7,r9 /* as last ran on this pcpu */
lhz r0,KVM_LAST_VCPU(r7)
cmpw r6,r8 /* on the same cpu core as last time? */
bne 3f
cmpw r0,r5 /* same vcpu as this core last ran? */
beq 1f
3: sth r6,VCPU_LAST_CPU(r4) /* if not, invalidate partition TLB */
sth r5,KVM_LAST_VCPU(r7)
li r6,128
mtctr r6
li r7,0x800 /* IS field = 0b10 */
ptesync
2: tlbiel r7
addi r7,r7,0x1000
bdnz 2b
ptesync
1:
/* Save purr/spurr */ /* Save purr/spurr */
mfspr r5,SPRN_PURR mfspr r5,SPRN_PURR
mfspr r6,SPRN_SPURR mfspr r6,SPRN_SPURR
@ -682,8 +679,7 @@ BEGIN_FTR_SECTION
1: 1:
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
nohpte_cont: guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
hcall_real_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
/* Save DEC */ /* Save DEC */
mfspr r5,SPRN_DEC mfspr r5,SPRN_DEC
mftb r6 mftb r6
@ -704,6 +700,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
std r6, VCPU_FAULT_DAR(r9) std r6, VCPU_FAULT_DAR(r9)
stw r7, VCPU_FAULT_DSISR(r9) stw r7, VCPU_FAULT_DSISR(r9)
/* See if it is a machine check */
cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
beq machine_check_realmode
mc_cont:
/* Save guest CTRL register, set runlatch to 1 */ /* Save guest CTRL register, set runlatch to 1 */
6: mfspr r6,SPRN_CTRLF 6: mfspr r6,SPRN_CTRLF
stw r6,VCPU_CTRL(r9) stw r6,VCPU_CTRL(r9)
@ -1116,38 +1117,41 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
/* /*
* For external and machine check interrupts, we need * For external and machine check interrupts, we need
* to call the Linux handler to process the interrupt. * to call the Linux handler to process the interrupt.
* We do that by jumping to the interrupt vector address * We do that by jumping to absolute address 0x500 for
* which we have in r12. The [h]rfid at the end of the * external interrupts, or the machine_check_fwnmi label
* for machine checks (since firmware might have patched
* the vector area at 0x200). The [h]rfid at the end of the
* handler will return to the book3s_hv_interrupts.S code. * handler will return to the book3s_hv_interrupts.S code.
* For other interrupts we do the rfid to get back * For other interrupts we do the rfid to get back
* to the book3s_interrupts.S code here. * to the book3s_hv_interrupts.S code here.
*/ */
ld r8, HSTATE_VMHANDLER(r13) ld r8, HSTATE_VMHANDLER(r13)
ld r7, HSTATE_HOST_MSR(r13) ld r7, HSTATE_HOST_MSR(r13)
cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
BEGIN_FTR_SECTION
beq 11f beq 11f
cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* RFI into the highmem handler, or branch to interrupt handler */ /* RFI into the highmem handler, or branch to interrupt handler */
12: mfmsr r6 mfmsr r6
mtctr r12
li r0, MSR_RI li r0, MSR_RI
andc r6, r6, r0 andc r6, r6, r0
mtmsrd r6, 1 /* Clear RI in MSR */ mtmsrd r6, 1 /* Clear RI in MSR */
mtsrr0 r8 mtsrr0 r8
mtsrr1 r7 mtsrr1 r7
beqctr beqa 0x500 /* external interrupt (PPC970) */
beq cr1, 13f /* machine check */
RFI RFI
11: /* On POWER7, we have external interrupts set to use HSRR0/1 */
BEGIN_FTR_SECTION 11: mtspr SPRN_HSRR0, r8
b 12b
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
mtspr SPRN_HSRR0, r8
mtspr SPRN_HSRR1, r7 mtspr SPRN_HSRR1, r7
ba 0x500 ba 0x500
13: b machine_check_fwnmi
/* /*
* Check whether an HDSI is an HPTE not found fault or something else. * Check whether an HDSI is an HPTE not found fault or something else.
* If it is an HPTE not found fault that is due to the guest accessing * If it is an HPTE not found fault that is due to the guest accessing
@ -1180,7 +1184,7 @@ kvmppc_hdsi:
cmpdi r3, 0 /* retry the instruction */ cmpdi r3, 0 /* retry the instruction */
beq 6f beq 6f
cmpdi r3, -1 /* handle in kernel mode */ cmpdi r3, -1 /* handle in kernel mode */
beq nohpte_cont beq guest_exit_cont
cmpdi r3, -2 /* MMIO emulation; need instr word */ cmpdi r3, -2 /* MMIO emulation; need instr word */
beq 2f beq 2f
@ -1194,6 +1198,7 @@ kvmppc_hdsi:
li r10, BOOK3S_INTERRUPT_DATA_STORAGE li r10, BOOK3S_INTERRUPT_DATA_STORAGE
li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
rotldi r11, r11, 63 rotldi r11, r11, 63
fast_interrupt_c_return:
6: ld r7, VCPU_CTR(r9) 6: ld r7, VCPU_CTR(r9)
lwz r8, VCPU_XER(r9) lwz r8, VCPU_XER(r9)
mtctr r7 mtctr r7
@ -1226,7 +1231,7 @@ kvmppc_hdsi:
/* Unset guest mode. */ /* Unset guest mode. */
li r0, KVM_GUEST_MODE_NONE li r0, KVM_GUEST_MODE_NONE
stb r0, HSTATE_IN_GUEST(r13) stb r0, HSTATE_IN_GUEST(r13)
b nohpte_cont b guest_exit_cont
/* /*
* Similarly for an HISI, reflect it to the guest as an ISI unless * Similarly for an HISI, reflect it to the guest as an ISI unless
@ -1252,9 +1257,9 @@ kvmppc_hisi:
ld r11, VCPU_MSR(r9) ld r11, VCPU_MSR(r9)
li r12, BOOK3S_INTERRUPT_H_INST_STORAGE li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
cmpdi r3, 0 /* retry the instruction */ cmpdi r3, 0 /* retry the instruction */
beq 6f beq fast_interrupt_c_return
cmpdi r3, -1 /* handle in kernel mode */ cmpdi r3, -1 /* handle in kernel mode */
beq nohpte_cont beq guest_exit_cont
/* Synthesize an ISI for the guest */ /* Synthesize an ISI for the guest */
mr r11, r3 mr r11, r3
@ -1263,12 +1268,7 @@ kvmppc_hisi:
li r10, BOOK3S_INTERRUPT_INST_STORAGE li r10, BOOK3S_INTERRUPT_INST_STORAGE
li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
rotldi r11, r11, 63 rotldi r11, r11, 63
6: ld r7, VCPU_CTR(r9) b fast_interrupt_c_return
lwz r8, VCPU_XER(r9)
mtctr r7
mtxer r8
mr r4, r9
b fast_guest_return
3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
ld r5, KVM_VRMA_SLB_V(r6) ld r5, KVM_VRMA_SLB_V(r6)
@ -1284,14 +1284,14 @@ kvmppc_hisi:
hcall_try_real_mode: hcall_try_real_mode:
ld r3,VCPU_GPR(R3)(r9) ld r3,VCPU_GPR(R3)(r9)
andi. r0,r11,MSR_PR andi. r0,r11,MSR_PR
bne hcall_real_cont bne guest_exit_cont
clrrdi r3,r3,2 clrrdi r3,r3,2
cmpldi r3,hcall_real_table_end - hcall_real_table cmpldi r3,hcall_real_table_end - hcall_real_table
bge hcall_real_cont bge guest_exit_cont
LOAD_REG_ADDR(r4, hcall_real_table) LOAD_REG_ADDR(r4, hcall_real_table)
lwzx r3,r3,r4 lwzx r3,r3,r4
cmpwi r3,0 cmpwi r3,0
beq hcall_real_cont beq guest_exit_cont
add r3,r3,r4 add r3,r3,r4
mtctr r3 mtctr r3
mr r3,r9 /* get vcpu pointer */ mr r3,r9 /* get vcpu pointer */
@ -1312,7 +1312,7 @@ hcall_real_fallback:
li r12,BOOK3S_INTERRUPT_SYSCALL li r12,BOOK3S_INTERRUPT_SYSCALL
ld r9, HSTATE_KVM_VCPU(r13) ld r9, HSTATE_KVM_VCPU(r13)
b hcall_real_cont b guest_exit_cont
.globl hcall_real_table .globl hcall_real_table
hcall_real_table: hcall_real_table:
@ -1571,6 +1571,21 @@ kvm_cede_exit:
li r3,H_TOO_HARD li r3,H_TOO_HARD
blr blr
/* Try to handle a machine check in real mode */
machine_check_realmode:
mr r3, r9 /* get vcpu pointer */
bl .kvmppc_realmode_machine_check
nop
cmpdi r3, 0 /* continue exiting from guest? */
ld r9, HSTATE_KVM_VCPU(r13)
li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
beq mc_cont
/* If not, deliver a machine check. SRR0/1 are already set */
li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
rotldi r11, r11, 63
b fast_interrupt_c_return
secondary_too_late: secondary_too_late:
ld r5,HSTATE_KVM_VCORE(r13) ld r5,HSTATE_KVM_VCORE(r13)
HMT_LOW HMT_LOW

Просмотреть файл

@ -81,9 +81,7 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
svcpu_put(svcpu); svcpu_put(svcpu);
#endif #endif
kvmppc_giveup_ext(vcpu, MSR_FP); kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
kvmppc_giveup_ext(vcpu, MSR_VEC);
kvmppc_giveup_ext(vcpu, MSR_VSX);
vcpu->cpu = -1; vcpu->cpu = -1;
} }
@ -147,7 +145,7 @@ static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
ulong smsr = vcpu->arch.shared->msr; ulong smsr = vcpu->arch.shared->msr;
/* Guest MSR values */ /* Guest MSR values */
smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_DE; smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE;
/* Process MSR values */ /* Process MSR values */
smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
/* External providers the guest reserved */ /* External providers the guest reserved */
@ -433,10 +431,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
static inline int get_fpr_index(int i) static inline int get_fpr_index(int i)
{ {
#ifdef CONFIG_VSX return i * TS_FPRWIDTH;
i *= 2;
#endif
return i;
} }
/* Give up external provider (FPU, Altivec, VSX) */ /* Give up external provider (FPU, Altivec, VSX) */
@ -450,41 +445,49 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
u64 *thread_fpr = (u64*)t->fpr; u64 *thread_fpr = (u64*)t->fpr;
int i; int i;
if (!(vcpu->arch.guest_owned_ext & msr)) /*
* VSX instructions can access FP and vector registers, so if
* we are giving up VSX, make sure we give up FP and VMX as well.
*/
if (msr & MSR_VSX)
msr |= MSR_FP | MSR_VEC;
msr &= vcpu->arch.guest_owned_ext;
if (!msr)
return; return;
#ifdef DEBUG_EXT #ifdef DEBUG_EXT
printk(KERN_INFO "Giving up ext 0x%lx\n", msr); printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
#endif #endif
switch (msr) { if (msr & MSR_FP) {
case MSR_FP: /*
* Note that on CPUs with VSX, giveup_fpu stores
* both the traditional FP registers and the added VSX
* registers into thread.fpr[].
*/
giveup_fpu(current); giveup_fpu(current);
for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
vcpu_fpr[i] = thread_fpr[get_fpr_index(i)]; vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
vcpu->arch.fpscr = t->fpscr.val; vcpu->arch.fpscr = t->fpscr.val;
break;
case MSR_VEC: #ifdef CONFIG_VSX
if (cpu_has_feature(CPU_FTR_VSX))
for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
#endif
}
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) {
giveup_altivec(current); giveup_altivec(current);
memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr)); memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
vcpu->arch.vscr = t->vscr; vcpu->arch.vscr = t->vscr;
#endif
break;
case MSR_VSX:
#ifdef CONFIG_VSX
__giveup_vsx(current);
for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
#endif
break;
default:
BUG();
} }
#endif
vcpu->arch.guest_owned_ext &= ~msr; vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
current->thread.regs->msr &= ~msr;
kvmppc_recalc_shadow_msr(vcpu); kvmppc_recalc_shadow_msr(vcpu);
} }
@ -544,47 +547,56 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
return RESUME_GUEST; return RESUME_GUEST;
} }
/* We already own the ext */ if (msr == MSR_VSX) {
if (vcpu->arch.guest_owned_ext & msr) { /* No VSX? Give an illegal instruction interrupt */
return RESUME_GUEST; #ifdef CONFIG_VSX
if (!cpu_has_feature(CPU_FTR_VSX))
#endif
{
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
return RESUME_GUEST;
}
/*
* We have to load up all the FP and VMX registers before
* we can let the guest use VSX instructions.
*/
msr = MSR_FP | MSR_VEC | MSR_VSX;
} }
/* See if we already own all the ext(s) needed */
msr &= ~vcpu->arch.guest_owned_ext;
if (!msr)
return RESUME_GUEST;
#ifdef DEBUG_EXT #ifdef DEBUG_EXT
printk(KERN_INFO "Loading up ext 0x%lx\n", msr); printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
#endif #endif
current->thread.regs->msr |= msr; current->thread.regs->msr |= msr;
switch (msr) { if (msr & MSR_FP) {
case MSR_FP:
for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
thread_fpr[get_fpr_index(i)] = vcpu_fpr[i]; thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
#ifdef CONFIG_VSX
for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
#endif
t->fpscr.val = vcpu->arch.fpscr; t->fpscr.val = vcpu->arch.fpscr;
t->fpexc_mode = 0; t->fpexc_mode = 0;
kvmppc_load_up_fpu(); kvmppc_load_up_fpu();
break; }
case MSR_VEC:
if (msr & MSR_VEC) {
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
t->vscr = vcpu->arch.vscr; t->vscr = vcpu->arch.vscr;
t->vrsave = -1; t->vrsave = -1;
kvmppc_load_up_altivec(); kvmppc_load_up_altivec();
#endif #endif
break;
case MSR_VSX:
#ifdef CONFIG_VSX
for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
kvmppc_load_up_vsx();
#endif
break;
default:
BUG();
} }
vcpu->arch.guest_owned_ext |= msr; vcpu->arch.guest_owned_ext |= msr;
kvmppc_recalc_shadow_msr(vcpu); kvmppc_recalc_shadow_msr(vcpu);
return RESUME_GUEST; return RESUME_GUEST;
@ -1134,7 +1146,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
/* Save VSX state in stack */ /* Save VSX state in stack */
used_vsr = current->thread.used_vsr; used_vsr = current->thread.used_vsr;
if (used_vsr && (current->thread.regs->msr & MSR_VSX)) if (used_vsr && (current->thread.regs->msr & MSR_VSX))
__giveup_vsx(current); __giveup_vsx(current);
#endif #endif
/* Remember the MSR with disabled extensions */ /* Remember the MSR with disabled extensions */
@ -1151,14 +1163,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
/* No need for kvm_guest_exit. It's done in handle_exit. /* No need for kvm_guest_exit. It's done in handle_exit.
We also get here with interrupts enabled. */ We also get here with interrupts enabled. */
/* Make sure we save the guest FPU/Altivec/VSX state */
kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
current->thread.regs->msr = ext_msr; current->thread.regs->msr = ext_msr;
/* Make sure we save the guest FPU/Altivec/VSX state */ /* Restore FPU/VSX state from stack */
kvmppc_giveup_ext(vcpu, MSR_FP);
kvmppc_giveup_ext(vcpu, MSR_VEC);
kvmppc_giveup_ext(vcpu, MSR_VSX);
/* Restore FPU state from stack */
memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr)); memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
current->thread.fpscr.val = fpscr; current->thread.fpscr.val = fpscr;
current->thread.fpexc_mode = fpexc_mode; current->thread.fpexc_mode = fpexc_mode;

Просмотреть файл

@ -234,8 +234,5 @@ define_load_up(fpu)
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
define_load_up(altivec) define_load_up(altivec)
#endif #endif
#ifdef CONFIG_VSX
define_load_up(vsx)
#endif
#include "book3s_segment.S" #include "book3s_segment.S"

Просмотреть файл

@ -36,6 +36,7 @@
#include <asm/dbell.h> #include <asm/dbell.h>
#include <asm/hw_irq.h> #include <asm/hw_irq.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/time.h>
#include "timing.h" #include "timing.h"
#include "booke.h" #include "booke.h"
@ -311,6 +312,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
bool crit; bool crit;
bool keep_irq = false; bool keep_irq = false;
enum int_class int_class; enum int_class int_class;
ulong new_msr = vcpu->arch.shared->msr;
/* Truncate crit indicators in 32 bit mode */ /* Truncate crit indicators in 32 bit mode */
if (!(vcpu->arch.shared->msr & MSR_SF)) { if (!(vcpu->arch.shared->msr & MSR_SF)) {
@ -406,7 +408,13 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
set_guest_esr(vcpu, vcpu->arch.queued_esr); set_guest_esr(vcpu, vcpu->arch.queued_esr);
if (update_dear == true) if (update_dear == true)
set_guest_dear(vcpu, vcpu->arch.queued_dear); set_guest_dear(vcpu, vcpu->arch.queued_dear);
kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
new_msr &= msr_mask;
#if defined(CONFIG_64BIT)
if (vcpu->arch.epcr & SPRN_EPCR_ICM)
new_msr |= MSR_CM;
#endif
kvmppc_set_msr(vcpu, new_msr);
if (!keep_irq) if (!keep_irq)
clear_bit(priority, &vcpu->arch.pending_exceptions); clear_bit(priority, &vcpu->arch.pending_exceptions);
@ -1380,6 +1388,11 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
&vcpu->arch.dbg_reg.dac[dac], sizeof(u64)); &vcpu->arch.dbg_reg.dac[dac], sizeof(u64));
break; break;
} }
#if defined(CONFIG_64BIT)
case KVM_REG_PPC_EPCR:
r = put_user(vcpu->arch.epcr, (u32 __user *)(long)reg->addr);
break;
#endif
default: default:
break; break;
} }
@ -1407,6 +1420,15 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
(u64 __user *)(long)reg->addr, sizeof(u64)); (u64 __user *)(long)reg->addr, sizeof(u64));
break; break;
} }
#if defined(CONFIG_64BIT)
case KVM_REG_PPC_EPCR: {
u32 new_epcr;
r = get_user(new_epcr, (u32 __user *)(long)reg->addr);
if (r == 0)
kvmppc_set_epcr(vcpu, new_epcr);
break;
}
#endif
default: default:
break; break;
} }
@ -1465,6 +1487,18 @@ void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
{ {
} }
void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr)
{
#if defined(CONFIG_64BIT)
vcpu->arch.epcr = new_epcr;
#ifdef CONFIG_KVM_BOOKE_HV
vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM;
if (vcpu->arch.epcr & SPRN_EPCR_ICM)
vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM;
#endif
#endif
}
void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr) void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
{ {
vcpu->arch.tcr = new_tcr; vcpu->arch.tcr = new_tcr;

Просмотреть файл

@ -69,6 +69,7 @@ extern unsigned long kvmppc_booke_handlers;
void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr); void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr);
void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr); void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr);
void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr);
void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr); void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr);
void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits); void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits); void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);

Просмотреть файл

@ -240,7 +240,14 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
case SPRN_MCSR: case SPRN_MCSR:
vcpu->arch.mcsr &= ~spr_val; vcpu->arch.mcsr &= ~spr_val;
break; break;
#if defined(CONFIG_64BIT)
case SPRN_EPCR:
kvmppc_set_epcr(vcpu, spr_val);
#ifdef CONFIG_KVM_BOOKE_HV
mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr);
#endif
break;
#endif
default: default:
emulated = EMULATE_FAIL; emulated = EMULATE_FAIL;
} }
@ -335,6 +342,11 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
case SPRN_MCSR: case SPRN_MCSR:
*spr_val = vcpu->arch.mcsr; *spr_val = vcpu->arch.mcsr;
break; break;
#if defined(CONFIG_64BIT)
case SPRN_EPCR:
*spr_val = vcpu->arch.epcr;
break;
#endif
default: default:
emulated = EMULATE_FAIL; emulated = EMULATE_FAIL;

Просмотреть файл

@ -16,6 +16,7 @@
* *
* Author: Varun Sethi <varun.sethi@freescale.com> * Author: Varun Sethi <varun.sethi@freescale.com>
* Author: Scott Wood <scotwood@freescale.com> * Author: Scott Wood <scotwood@freescale.com>
* Author: Mihai Caraman <mihai.caraman@freescale.com>
* *
* This file is derived from arch/powerpc/kvm/booke_interrupts.S * This file is derived from arch/powerpc/kvm/booke_interrupts.S
*/ */
@ -30,31 +31,33 @@
#include <asm/bitsperlong.h> #include <asm/bitsperlong.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#ifdef CONFIG_64BIT
#include <asm/exception-64e.h>
#else
#include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */ #include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */
#endif
#define GET_VCPU(vcpu, thread) \
PPC_LL vcpu, THREAD_KVM_VCPU(thread)
#define LONGBYTES (BITS_PER_LONG / 8) #define LONGBYTES (BITS_PER_LONG / 8)
#define VCPU_GUEST_SPRG(n) (VCPU_GUEST_SPRGS + (n * LONGBYTES)) #define VCPU_GUEST_SPRG(n) (VCPU_GUEST_SPRGS + (n * LONGBYTES))
/* The host stack layout: */ /* The host stack layout: */
#define HOST_R1 (0 * LONGBYTES) /* Implied by stwu. */ #define HOST_R1 0 /* Implied by stwu. */
#define HOST_CALLEE_LR (1 * LONGBYTES) #define HOST_CALLEE_LR PPC_LR_STKOFF
#define HOST_RUN (2 * LONGBYTES) /* struct kvm_run */ #define HOST_RUN (HOST_CALLEE_LR + LONGBYTES)
/* /*
* r2 is special: it holds 'current', and it made nonvolatile in the * r2 is special: it holds 'current', and it made nonvolatile in the
* kernel with the -ffixed-r2 gcc option. * kernel with the -ffixed-r2 gcc option.
*/ */
#define HOST_R2 (3 * LONGBYTES) #define HOST_R2 (HOST_RUN + LONGBYTES)
#define HOST_CR (4 * LONGBYTES) #define HOST_CR (HOST_R2 + LONGBYTES)
#define HOST_NV_GPRS (5 * LONGBYTES) #define HOST_NV_GPRS (HOST_CR + LONGBYTES)
#define __HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * LONGBYTES)) #define __HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * LONGBYTES))
#define HOST_NV_GPR(n) __HOST_NV_GPR(__REG_##n) #define HOST_NV_GPR(n) __HOST_NV_GPR(__REG_##n)
#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + LONGBYTES) #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + LONGBYTES)
#define HOST_STACK_SIZE ((HOST_MIN_STACK_SIZE + 15) & ~15) /* Align. */ #define HOST_STACK_SIZE ((HOST_MIN_STACK_SIZE + 15) & ~15) /* Align. */
#define HOST_STACK_LR (HOST_STACK_SIZE + LONGBYTES) /* In caller stack frame. */ /* LR in caller stack frame. */
#define HOST_STACK_LR (HOST_STACK_SIZE + PPC_LR_STKOFF)
#define NEED_EMU 0x00000001 /* emulation -- save nv regs */ #define NEED_EMU 0x00000001 /* emulation -- save nv regs */
#define NEED_DEAR 0x00000002 /* save faulting DEAR */ #define NEED_DEAR 0x00000002 /* save faulting DEAR */
@ -201,12 +204,128 @@
b kvmppc_resume_host b kvmppc_resume_host
.endm .endm
#ifdef CONFIG_64BIT
/* Exception types */
#define EX_GEN 1
#define EX_GDBELL 2
#define EX_DBG 3
#define EX_MC 4
#define EX_CRIT 5
#define EX_TLB 6
/*
* For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h
*/
.macro kvm_handler intno type scratch, paca_ex, ex_r10, ex_r11, srr0, srr1, flags
_GLOBAL(kvmppc_handler_\intno\()_\srr1)
mr r11, r4
/*
* Get vcpu from Paca: paca->__current.thread->kvm_vcpu
*/
PPC_LL r4, PACACURRENT(r13)
PPC_LL r4, (THREAD + THREAD_KVM_VCPU)(r4)
stw r10, VCPU_CR(r4)
PPC_STL r11, VCPU_GPR(R4)(r4)
PPC_STL r5, VCPU_GPR(R5)(r4)
.if \type == EX_CRIT
PPC_LL r5, (\paca_ex + EX_R13)(r13)
.else
mfspr r5, \scratch
.endif
PPC_STL r6, VCPU_GPR(R6)(r4)
PPC_STL r8, VCPU_GPR(R8)(r4)
PPC_STL r9, VCPU_GPR(R9)(r4)
PPC_STL r5, VCPU_GPR(R13)(r4)
PPC_LL r6, (\paca_ex + \ex_r10)(r13)
PPC_LL r8, (\paca_ex + \ex_r11)(r13)
PPC_STL r3, VCPU_GPR(R3)(r4)
PPC_STL r7, VCPU_GPR(R7)(r4)
PPC_STL r12, VCPU_GPR(R12)(r4)
PPC_STL r6, VCPU_GPR(R10)(r4)
PPC_STL r8, VCPU_GPR(R11)(r4)
mfctr r5
PPC_STL r5, VCPU_CTR(r4)
mfspr r5, \srr0
mfspr r6, \srr1
kvm_handler_common \intno, \srr0, \flags
.endm
#define EX_PARAMS(type) \
EX_##type, \
SPRN_SPRG_##type##_SCRATCH, \
PACA_EX##type, \
EX_R10, \
EX_R11
#define EX_PARAMS_TLB \
EX_TLB, \
SPRN_SPRG_GEN_SCRATCH, \
PACA_EXTLB, \
EX_TLB_R10, \
EX_TLB_R11
kvm_handler BOOKE_INTERRUPT_CRITICAL, EX_PARAMS(CRIT), \
SPRN_CSRR0, SPRN_CSRR1, 0
kvm_handler BOOKE_INTERRUPT_MACHINE_CHECK, EX_PARAMS(MC), \
SPRN_MCSRR0, SPRN_MCSRR1, 0
kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, EX_PARAMS(GEN), \
SPRN_SRR0, SPRN_SRR1,(NEED_EMU | NEED_DEAR | NEED_ESR)
kvm_handler BOOKE_INTERRUPT_INST_STORAGE, EX_PARAMS(GEN), \
SPRN_SRR0, SPRN_SRR1, NEED_ESR
kvm_handler BOOKE_INTERRUPT_EXTERNAL, EX_PARAMS(GEN), \
SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_ALIGNMENT, EX_PARAMS(GEN), \
SPRN_SRR0, SPRN_SRR1,(NEED_DEAR | NEED_ESR)
kvm_handler BOOKE_INTERRUPT_PROGRAM, EX_PARAMS(GEN), \
SPRN_SRR0, SPRN_SRR1,NEED_ESR
kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, EX_PARAMS(GEN), \
SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, EX_PARAMS(GEN), \
SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_DECREMENTER, EX_PARAMS(GEN), \
SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_FIT, EX_PARAMS(GEN), \
SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_WATCHDOG, EX_PARAMS(CRIT),\
SPRN_CSRR0, SPRN_CSRR1, 0
/*
* Only bolted TLB miss exception handlers are supported for now
*/
kvm_handler BOOKE_INTERRUPT_DTLB_MISS, EX_PARAMS_TLB, \
SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
kvm_handler BOOKE_INTERRUPT_ITLB_MISS, EX_PARAMS_TLB, \
SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_SPE_UNAVAIL, EX_PARAMS(GEN), \
SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_SPE_FP_DATA, EX_PARAMS(GEN), \
SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_SPE_FP_ROUND, EX_PARAMS(GEN), \
SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, EX_PARAMS(GEN), \
SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_DOORBELL, EX_PARAMS(GEN), \
SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_DOORBELL_CRITICAL, EX_PARAMS(CRIT), \
SPRN_CSRR0, SPRN_CSRR1, 0
kvm_handler BOOKE_INTERRUPT_HV_PRIV, EX_PARAMS(GEN), \
SPRN_SRR0, SPRN_SRR1, NEED_EMU
kvm_handler BOOKE_INTERRUPT_HV_SYSCALL, EX_PARAMS(GEN), \
SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_GUEST_DBELL, EX_PARAMS(GDBELL), \
SPRN_GSRR0, SPRN_GSRR1, 0
kvm_handler BOOKE_INTERRUPT_GUEST_DBELL_CRIT, EX_PARAMS(CRIT), \
SPRN_CSRR0, SPRN_CSRR1, 0
kvm_handler BOOKE_INTERRUPT_DEBUG, EX_PARAMS(DBG), \
SPRN_DSRR0, SPRN_DSRR1, 0
kvm_handler BOOKE_INTERRUPT_DEBUG, EX_PARAMS(CRIT), \
SPRN_CSRR0, SPRN_CSRR1, 0
#else
/* /*
* For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h * For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h
*/ */
.macro kvm_handler intno srr0, srr1, flags .macro kvm_handler intno srr0, srr1, flags
_GLOBAL(kvmppc_handler_\intno\()_\srr1) _GLOBAL(kvmppc_handler_\intno\()_\srr1)
GET_VCPU(r11, r10) PPC_LL r11, THREAD_KVM_VCPU(r10)
PPC_STL r3, VCPU_GPR(R3)(r11) PPC_STL r3, VCPU_GPR(R3)(r11)
mfspr r3, SPRN_SPRG_RSCRATCH0 mfspr r3, SPRN_SPRG_RSCRATCH0
PPC_STL r4, VCPU_GPR(R4)(r11) PPC_STL r4, VCPU_GPR(R4)(r11)
@ -233,7 +352,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1)
.macro kvm_lvl_handler intno scratch srr0, srr1, flags .macro kvm_lvl_handler intno scratch srr0, srr1, flags
_GLOBAL(kvmppc_handler_\intno\()_\srr1) _GLOBAL(kvmppc_handler_\intno\()_\srr1)
mfspr r10, SPRN_SPRG_THREAD mfspr r10, SPRN_SPRG_THREAD
GET_VCPU(r11, r10) PPC_LL r11, THREAD_KVM_VCPU(r10)
PPC_STL r3, VCPU_GPR(R3)(r11) PPC_STL r3, VCPU_GPR(R3)(r11)
mfspr r3, \scratch mfspr r3, \scratch
PPC_STL r4, VCPU_GPR(R4)(r11) PPC_STL r4, VCPU_GPR(R4)(r11)
@ -295,7 +414,7 @@ kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \
SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \ kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \
SPRN_SPRG_RSCRATCH_DBG, SPRN_DSRR0, SPRN_DSRR1, 0 SPRN_SPRG_RSCRATCH_DBG, SPRN_DSRR0, SPRN_DSRR1, 0
#endif
/* Registers: /* Registers:
* SPRG_SCRATCH0: guest r10 * SPRG_SCRATCH0: guest r10

Просмотреть файл

@ -129,9 +129,9 @@ int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500,
ulong value); ulong value);
int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu); int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu);
int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu); int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu);
int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb); int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea);
int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int rt, int ra, int rb); int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea);
int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb); int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea);
int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500); int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500);
void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500); void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500);
@ -154,7 +154,7 @@ get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe)
static inline gva_t get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry *tlbe) static inline gva_t get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry *tlbe)
{ {
return tlbe->mas2 & 0xfffff000; return tlbe->mas2 & MAS2_EPN;
} }
static inline u64 get_tlb_bytes(const struct kvm_book3e_206_tlb_entry *tlbe) static inline u64 get_tlb_bytes(const struct kvm_book3e_206_tlb_entry *tlbe)

Просмотреть файл

@ -89,6 +89,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
int ra = get_ra(inst); int ra = get_ra(inst);
int rb = get_rb(inst); int rb = get_rb(inst);
int rt = get_rt(inst); int rt = get_rt(inst);
gva_t ea;
switch (get_op(inst)) { switch (get_op(inst)) {
case 31: case 31:
@ -113,15 +114,20 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
break; break;
case XOP_TLBSX: case XOP_TLBSX:
emulated = kvmppc_e500_emul_tlbsx(vcpu,rb); ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
emulated = kvmppc_e500_emul_tlbsx(vcpu, ea);
break; break;
case XOP_TLBILX: case XOP_TLBILX: {
emulated = kvmppc_e500_emul_tlbilx(vcpu, rt, ra, rb); int type = rt & 0x3;
ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
emulated = kvmppc_e500_emul_tlbilx(vcpu, type, ea);
break; break;
}
case XOP_TLBIVAX: case XOP_TLBIVAX:
emulated = kvmppc_e500_emul_tlbivax(vcpu, ra, rb); ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
emulated = kvmppc_e500_emul_tlbivax(vcpu, ea);
break; break;
default: default:

Просмотреть файл

@ -415,7 +415,8 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
struct tlbe_ref *ref) struct tlbe_ref *ref)
{ {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
unsigned long pfn, hva; unsigned long pfn = 0; /* silence GCC warning */
unsigned long hva;
int pfnmap = 0; int pfnmap = 0;
int tsize = BOOK3E_PAGESZ_4K; int tsize = BOOK3E_PAGESZ_4K;
@ -688,14 +689,11 @@ int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
return EMULATE_DONE; return EMULATE_DONE;
} }
int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb) int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea)
{ {
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
unsigned int ia; unsigned int ia;
int esel, tlbsel; int esel, tlbsel;
gva_t ea;
ea = ((ra) ? kvmppc_get_gpr(vcpu, ra) : 0) + kvmppc_get_gpr(vcpu, rb);
ia = (ea >> 2) & 0x1; ia = (ea >> 2) & 0x1;
@ -722,7 +720,7 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
} }
static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
int pid, int rt) int pid, int type)
{ {
struct kvm_book3e_206_tlb_entry *tlbe; struct kvm_book3e_206_tlb_entry *tlbe;
int tid, esel; int tid, esel;
@ -731,7 +729,7 @@ static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) { for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) {
tlbe = get_entry(vcpu_e500, tlbsel, esel); tlbe = get_entry(vcpu_e500, tlbsel, esel);
tid = get_tlb_tid(tlbe); tid = get_tlb_tid(tlbe);
if (rt == 0 || tid == pid) { if (type == 0 || tid == pid) {
inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
} }
@ -739,14 +737,9 @@ static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
} }
static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid, static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid,
int ra, int rb) gva_t ea)
{ {
int tlbsel, esel; int tlbsel, esel;
gva_t ea;
ea = kvmppc_get_gpr(&vcpu_e500->vcpu, rb);
if (ra)
ea += kvmppc_get_gpr(&vcpu_e500->vcpu, ra);
for (tlbsel = 0; tlbsel < 2; tlbsel++) { for (tlbsel = 0; tlbsel < 2; tlbsel++) {
esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, -1); esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, -1);
@ -758,16 +751,16 @@ static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid,
} }
} }
int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int rt, int ra, int rb) int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea)
{ {
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int pid = get_cur_spid(vcpu); int pid = get_cur_spid(vcpu);
if (rt == 0 || rt == 1) { if (type == 0 || type == 1) {
tlbilx_all(vcpu_e500, 0, pid, rt); tlbilx_all(vcpu_e500, 0, pid, type);
tlbilx_all(vcpu_e500, 1, pid, rt); tlbilx_all(vcpu_e500, 1, pid, type);
} else if (rt == 3) { } else if (type == 3) {
tlbilx_one(vcpu_e500, pid, ra, rb); tlbilx_one(vcpu_e500, pid, ea);
} }
return EMULATE_DONE; return EMULATE_DONE;
@ -792,16 +785,13 @@ int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
return EMULATE_DONE; return EMULATE_DONE;
} }
int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb) int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea)
{ {
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int as = !!get_cur_sas(vcpu); int as = !!get_cur_sas(vcpu);
unsigned int pid = get_cur_spid(vcpu); unsigned int pid = get_cur_spid(vcpu);
int esel, tlbsel; int esel, tlbsel;
struct kvm_book3e_206_tlb_entry *gtlbe = NULL; struct kvm_book3e_206_tlb_entry *gtlbe = NULL;
gva_t ea;
ea = kvmppc_get_gpr(vcpu, rb);
for (tlbsel = 0; tlbsel < 2; tlbsel++) { for (tlbsel = 0; tlbsel < 2; tlbsel++) {
esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as); esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
@ -881,6 +871,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
gtlbe->mas1 = vcpu->arch.shared->mas1; gtlbe->mas1 = vcpu->arch.shared->mas1;
gtlbe->mas2 = vcpu->arch.shared->mas2; gtlbe->mas2 = vcpu->arch.shared->mas2;
if (!(vcpu->arch.shared->msr & MSR_CM))
gtlbe->mas2 &= 0xffffffffUL;
gtlbe->mas7_3 = vcpu->arch.shared->mas7_3; gtlbe->mas7_3 = vcpu->arch.shared->mas7_3;
trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1,

Просмотреть файл

@ -314,6 +314,7 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_PPC_IRQ_LEVEL: case KVM_CAP_PPC_IRQ_LEVEL:
case KVM_CAP_ENABLE_CAP: case KVM_CAP_ENABLE_CAP:
case KVM_CAP_ONE_REG: case KVM_CAP_ONE_REG:
case KVM_CAP_IOEVENTFD:
r = 1; r = 1;
break; break;
#ifndef CONFIG_KVM_BOOK3S_64_HV #ifndef CONFIG_KVM_BOOK3S_64_HV
@ -353,6 +354,12 @@ int kvm_dev_ioctl_check_extension(long ext)
r = 1; r = 1;
#else #else
r = 0; r = 0;
break;
#endif
#ifdef CONFIG_KVM_BOOK3S_64_HV
case KVM_CAP_PPC_HTAB_FD:
r = 1;
break;
#endif #endif
break; break;
case KVM_CAP_NR_VCPUS: case KVM_CAP_NR_VCPUS:
@ -618,6 +625,13 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->mmio_is_write = 0; vcpu->mmio_is_write = 0;
vcpu->arch.mmio_sign_extend = 0; vcpu->arch.mmio_sign_extend = 0;
if (!kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
bytes, &run->mmio.data)) {
kvmppc_complete_mmio_load(vcpu, run);
vcpu->mmio_needed = 0;
return EMULATE_DONE;
}
return EMULATE_DO_MMIO; return EMULATE_DO_MMIO;
} }
@ -627,8 +641,8 @@ int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
{ {
int r; int r;
r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
vcpu->arch.mmio_sign_extend = 1; vcpu->arch.mmio_sign_extend = 1;
r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
return r; return r;
} }
@ -666,6 +680,13 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
} }
} }
if (!kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
bytes, &run->mmio.data)) {
kvmppc_complete_mmio_load(vcpu, run);
vcpu->mmio_needed = 0;
return EMULATE_DONE;
}
return EMULATE_DO_MMIO; return EMULATE_DO_MMIO;
} }
@ -939,6 +960,17 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = 0; r = 0;
break; break;
} }
case KVM_PPC_GET_HTAB_FD: {
struct kvm *kvm = filp->private_data;
struct kvm_get_htab_fd ghf;
r = -EFAULT;
if (copy_from_user(&ghf, argp, sizeof(ghf)))
break;
r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
break;
}
#endif /* CONFIG_KVM_BOOK3S_64_HV */ #endif /* CONFIG_KVM_BOOK3S_64_HV */
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64

Просмотреть файл

@ -901,10 +901,20 @@ static inline void kvm_free_irq_routing(struct kvm *kvm) {}
#ifdef CONFIG_HAVE_KVM_EVENTFD #ifdef CONFIG_HAVE_KVM_EVENTFD
void kvm_eventfd_init(struct kvm *kvm); void kvm_eventfd_init(struct kvm *kvm);
int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
#ifdef CONFIG_HAVE_KVM_IRQCHIP
int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args); int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
void kvm_irqfd_release(struct kvm *kvm); void kvm_irqfd_release(struct kvm *kvm);
void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *); void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); #else
static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
{
return -EINVAL;
}
static inline void kvm_irqfd_release(struct kvm *kvm) {}
#endif
#else #else

Просмотреть файл

@ -634,6 +634,7 @@ struct kvm_ppc_smmu_info {
#endif #endif
#define KVM_CAP_IRQFD_RESAMPLE 82 #define KVM_CAP_IRQFD_RESAMPLE 82
#define KVM_CAP_PPC_BOOKE_WATCHDOG 83 #define KVM_CAP_PPC_BOOKE_WATCHDOG 83
#define KVM_CAP_PPC_HTAB_FD 84
#ifdef KVM_CAP_IRQ_ROUTING #ifdef KVM_CAP_IRQ_ROUTING
@ -859,6 +860,8 @@ struct kvm_s390_ucas_mapping {
#define KVM_CREATE_SPAPR_TCE _IOW(KVMIO, 0xa8, struct kvm_create_spapr_tce) #define KVM_CREATE_SPAPR_TCE _IOW(KVMIO, 0xa8, struct kvm_create_spapr_tce)
/* Available with KVM_CAP_RMA */ /* Available with KVM_CAP_RMA */
#define KVM_ALLOCATE_RMA _IOR(KVMIO, 0xa9, struct kvm_allocate_rma) #define KVM_ALLOCATE_RMA _IOR(KVMIO, 0xa9, struct kvm_allocate_rma)
/* Available with KVM_CAP_PPC_HTAB_FD */
#define KVM_PPC_GET_HTAB_FD _IOW(KVMIO, 0xaa, struct kvm_get_htab_fd)
/* /*
* ioctls for vcpu fds * ioctls for vcpu fds

Просмотреть файл

@ -35,6 +35,7 @@
#include "iodev.h" #include "iodev.h"
#ifdef __KVM_HAVE_IOAPIC
/* /*
* -------------------------------------------------------------------- * --------------------------------------------------------------------
* irqfd: Allows an fd to be used to inject an interrupt to the guest * irqfd: Allows an fd to be used to inject an interrupt to the guest
@ -425,17 +426,21 @@ fail:
kfree(irqfd); kfree(irqfd);
return ret; return ret;
} }
#endif
void void
kvm_eventfd_init(struct kvm *kvm) kvm_eventfd_init(struct kvm *kvm)
{ {
#ifdef __KVM_HAVE_IOAPIC
spin_lock_init(&kvm->irqfds.lock); spin_lock_init(&kvm->irqfds.lock);
INIT_LIST_HEAD(&kvm->irqfds.items); INIT_LIST_HEAD(&kvm->irqfds.items);
INIT_LIST_HEAD(&kvm->irqfds.resampler_list); INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
mutex_init(&kvm->irqfds.resampler_lock); mutex_init(&kvm->irqfds.resampler_lock);
#endif
INIT_LIST_HEAD(&kvm->ioeventfds); INIT_LIST_HEAD(&kvm->ioeventfds);
} }
#ifdef __KVM_HAVE_IOAPIC
/* /*
* shutdown any irqfd's that match fd+gsi * shutdown any irqfd's that match fd+gsi
*/ */
@ -555,6 +560,7 @@ static void __exit irqfd_module_exit(void)
module_init(irqfd_module_init); module_init(irqfd_module_init);
module_exit(irqfd_module_exit); module_exit(irqfd_module_exit);
#endif
/* /*
* -------------------------------------------------------------------- * --------------------------------------------------------------------