The PPC folks had a large amount of changes queued for 3.13, and now they
are fixing the bugs. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQIcBAABAgAGBQJStImwAAoJEBvWZb6bTYbyvR0P/2tH/IuHe7xDaXyWy3JVlmzF CmdnOLTPSlQjpLv7BRQ0K5TAU6DZWisRnXGUp1e8+Do4Ho9OuZzJugCr1Lt/4kTA kZT2xWP5U4AbLTjoxlVckybk4Ci0oP+iZGqV8d95NurEb1oR1halAZ+7BTqujwch jGSd3gk6mVN4np09Bj06P0nddttJubIki1VeZyQUFILqAIkzWv4qyL/awibYCFQA +jHEcND8b5D9bkMniMojXaR0BGIdMZOKWGvKUdxbth+FbZgPqzOLwXoCVM5EmuuH 9aIee65y34+WXT4EHIou5Q4HyDxuKpciv1A7UhwLxEcfgUklvHOV/nZeQAKFIBIt uabgHO/Psj6i9qSCuAJX8xYgB+BmktE8d+/r1XmIgQ/gPYRumOl5BVJo6OOIaGrF M6cgccPD1dnMzFt4ccxoM1OhJivh30XfHAKKco7i8DhwcHh1cYcYlDqPEOy3wBA5 i4n99N/5gCSIB87y1EjvDw1CMiJ5PzuialvscH/a4knL9JFuukKS6O+C2z5LULKN TixvTZMZWuHdNWezahcjSpbDeqWPBdB8RIEbGi2xBAHU2hsuxV2acjhdQ0vVgP48 qo8lLiXv4W030y9H+iflg5R6b3tJ5dmNKZN1fYiwhs4ijgL3wOu8iWia57sQFdyD Nb+X/MeeD+tD5JYVyqvr =k+i/ -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull KVM fixes from Paolo Bonzini: "The PPC folks had a large amount of changes queued for 3.13, and now they are fixing the bugs" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: PPC: Book3S HV: Don't drop low-order page address bits powerpc: book3s: kvm: Don't abuse host r2 in exit path powerpc/kvm/booke: Fix build break due to stack frame size warning KVM: PPC: Book3S: PR: Enable interrupts earlier KVM: PPC: Book3S: PR: Make svcpu -> vcpu store preempt savvy KVM: PPC: Book3S: PR: Export kvmppc_copy_to|from_svcpu KVM: PPC: Book3S: PR: Don't clobber our exit handler id powerpc: kvm: fix rare but potential deadlock scene KVM: PPC: Book3S HV: Take SRCU read lock around kvm_read_guest() call KVM: PPC: Book3S HV: Make tbacct_lock irq-safe KVM: PPC: Book3S HV: Refine barriers in guest entry/exit KVM: PPC: Book3S HV: Fix physical address calculations
This commit is contained in:
Коммит
46dd0835ca
|
@ -192,6 +192,10 @@ extern void kvmppc_load_up_vsx(void);
|
|||
extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
|
||||
extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
|
||||
extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
|
||||
extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
|
||||
struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu);
|
||||
|
||||
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
|
|
@ -79,6 +79,7 @@ struct kvmppc_host_state {
|
|||
ulong vmhandler;
|
||||
ulong scratch0;
|
||||
ulong scratch1;
|
||||
ulong scratch2;
|
||||
u8 in_guest;
|
||||
u8 restore_hid5;
|
||||
u8 napping;
|
||||
|
@ -106,6 +107,7 @@ struct kvmppc_host_state {
|
|||
};
|
||||
|
||||
struct kvmppc_book3s_shadow_vcpu {
|
||||
bool in_use;
|
||||
ulong gpr[14];
|
||||
u32 cr;
|
||||
u32 xer;
|
||||
|
|
|
@ -35,7 +35,7 @@ extern void giveup_vsx(struct task_struct *);
|
|||
extern void enable_kernel_spe(void);
|
||||
extern void giveup_spe(struct task_struct *);
|
||||
extern void load_up_spe(struct task_struct *);
|
||||
extern void switch_booke_debug_regs(struct thread_struct *new_thread);
|
||||
extern void switch_booke_debug_regs(struct debug_reg *new_debug);
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
extern void discard_lazy_cpu_state(void);
|
||||
|
|
|
@ -576,6 +576,7 @@ int main(void)
|
|||
HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
|
||||
HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
|
||||
HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
|
||||
HSTATE_FIELD(HSTATE_SCRATCH2, scratch2);
|
||||
HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
|
||||
HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
|
||||
HSTATE_FIELD(HSTATE_NAPPING, napping);
|
||||
|
|
|
@ -339,7 +339,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
|
|||
#endif
|
||||
}
|
||||
|
||||
static void prime_debug_regs(struct thread_struct *thread)
|
||||
static void prime_debug_regs(struct debug_reg *debug)
|
||||
{
|
||||
/*
|
||||
* We could have inherited MSR_DE from userspace, since
|
||||
|
@ -348,22 +348,22 @@ static void prime_debug_regs(struct thread_struct *thread)
|
|||
*/
|
||||
mtmsr(mfmsr() & ~MSR_DE);
|
||||
|
||||
mtspr(SPRN_IAC1, thread->debug.iac1);
|
||||
mtspr(SPRN_IAC2, thread->debug.iac2);
|
||||
mtspr(SPRN_IAC1, debug->iac1);
|
||||
mtspr(SPRN_IAC2, debug->iac2);
|
||||
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
|
||||
mtspr(SPRN_IAC3, thread->debug.iac3);
|
||||
mtspr(SPRN_IAC4, thread->debug.iac4);
|
||||
mtspr(SPRN_IAC3, debug->iac3);
|
||||
mtspr(SPRN_IAC4, debug->iac4);
|
||||
#endif
|
||||
mtspr(SPRN_DAC1, thread->debug.dac1);
|
||||
mtspr(SPRN_DAC2, thread->debug.dac2);
|
||||
mtspr(SPRN_DAC1, debug->dac1);
|
||||
mtspr(SPRN_DAC2, debug->dac2);
|
||||
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
|
||||
mtspr(SPRN_DVC1, thread->debug.dvc1);
|
||||
mtspr(SPRN_DVC2, thread->debug.dvc2);
|
||||
mtspr(SPRN_DVC1, debug->dvc1);
|
||||
mtspr(SPRN_DVC2, debug->dvc2);
|
||||
#endif
|
||||
mtspr(SPRN_DBCR0, thread->debug.dbcr0);
|
||||
mtspr(SPRN_DBCR1, thread->debug.dbcr1);
|
||||
mtspr(SPRN_DBCR0, debug->dbcr0);
|
||||
mtspr(SPRN_DBCR1, debug->dbcr1);
|
||||
#ifdef CONFIG_BOOKE
|
||||
mtspr(SPRN_DBCR2, thread->debug.dbcr2);
|
||||
mtspr(SPRN_DBCR2, debug->dbcr2);
|
||||
#endif
|
||||
}
|
||||
/*
|
||||
|
@ -371,11 +371,11 @@ static void prime_debug_regs(struct thread_struct *thread)
|
|||
* debug registers, set the debug registers from the values
|
||||
* stored in the new thread.
|
||||
*/
|
||||
void switch_booke_debug_regs(struct thread_struct *new_thread)
|
||||
void switch_booke_debug_regs(struct debug_reg *new_debug)
|
||||
{
|
||||
if ((current->thread.debug.dbcr0 & DBCR0_IDM)
|
||||
|| (new_thread->debug.dbcr0 & DBCR0_IDM))
|
||||
prime_debug_regs(new_thread);
|
||||
|| (new_debug->dbcr0 & DBCR0_IDM))
|
||||
prime_debug_regs(new_debug);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
|
||||
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
|
||||
|
@ -683,7 +683,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
|
|||
#endif /* CONFIG_SMP */
|
||||
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
||||
switch_booke_debug_regs(&new->thread);
|
||||
switch_booke_debug_regs(&new->thread.debug);
|
||||
#else
|
||||
/*
|
||||
* For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
|
||||
|
|
|
@ -469,11 +469,14 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
|
|||
slb_v = vcpu->kvm->arch.vrma_slb_v;
|
||||
}
|
||||
|
||||
preempt_disable();
|
||||
/* Find the HPTE in the hash table */
|
||||
index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
|
||||
HPTE_V_VALID | HPTE_V_ABSENT);
|
||||
if (index < 0)
|
||||
if (index < 0) {
|
||||
preempt_enable();
|
||||
return -ENOENT;
|
||||
}
|
||||
hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
|
||||
v = hptep[0] & ~HPTE_V_HVLOCK;
|
||||
gr = kvm->arch.revmap[index].guest_rpte;
|
||||
|
@ -481,6 +484,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
|
|||
/* Unlock the HPTE */
|
||||
asm volatile("lwsync" : : : "memory");
|
||||
hptep[0] = v;
|
||||
preempt_enable();
|
||||
|
||||
gpte->eaddr = eaddr;
|
||||
gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
|
||||
|
@ -665,6 +669,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
return -EFAULT;
|
||||
} else {
|
||||
page = pages[0];
|
||||
pfn = page_to_pfn(page);
|
||||
if (PageHuge(page)) {
|
||||
page = compound_head(page);
|
||||
pte_size <<= compound_order(page);
|
||||
|
@ -689,7 +694,6 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
}
|
||||
rcu_read_unlock_sched();
|
||||
}
|
||||
pfn = page_to_pfn(page);
|
||||
}
|
||||
|
||||
ret = -EFAULT;
|
||||
|
@ -707,8 +711,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M;
|
||||
}
|
||||
|
||||
/* Set the HPTE to point to pfn */
|
||||
r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT);
|
||||
/*
|
||||
* Set the HPTE to point to pfn.
|
||||
* Since the pfn is at PAGE_SIZE granularity, make sure we
|
||||
* don't mask out lower-order bits if psize < PAGE_SIZE.
|
||||
*/
|
||||
if (psize < PAGE_SIZE)
|
||||
psize = PAGE_SIZE;
|
||||
r = (r & ~(HPTE_R_PP0 - psize)) | ((pfn << PAGE_SHIFT) & ~(psize - 1));
|
||||
if (hpte_is_writable(r) && !write_ok)
|
||||
r = hpte_make_readonly(r);
|
||||
ret = RESUME_GUEST;
|
||||
|
|
|
@ -131,8 +131,9 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
|
|||
static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&vcpu->arch.tbacct_lock);
|
||||
spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
|
||||
if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE &&
|
||||
vc->preempt_tb != TB_NIL) {
|
||||
vc->stolen_tb += mftb() - vc->preempt_tb;
|
||||
|
@ -143,19 +144,20 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
|
|||
vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
|
||||
vcpu->arch.busy_preempt = TB_NIL;
|
||||
}
|
||||
spin_unlock(&vcpu->arch.tbacct_lock);
|
||||
spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
|
||||
}
|
||||
|
||||
static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&vcpu->arch.tbacct_lock);
|
||||
spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
|
||||
if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
|
||||
vc->preempt_tb = mftb();
|
||||
if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
|
||||
vcpu->arch.busy_preempt = mftb();
|
||||
spin_unlock(&vcpu->arch.tbacct_lock);
|
||||
spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
|
||||
}
|
||||
|
||||
static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
|
||||
|
@ -486,11 +488,11 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
|
|||
*/
|
||||
if (vc->vcore_state != VCORE_INACTIVE &&
|
||||
vc->runner->arch.run_task != current) {
|
||||
spin_lock(&vc->runner->arch.tbacct_lock);
|
||||
spin_lock_irq(&vc->runner->arch.tbacct_lock);
|
||||
p = vc->stolen_tb;
|
||||
if (vc->preempt_tb != TB_NIL)
|
||||
p += now - vc->preempt_tb;
|
||||
spin_unlock(&vc->runner->arch.tbacct_lock);
|
||||
spin_unlock_irq(&vc->runner->arch.tbacct_lock);
|
||||
} else {
|
||||
p = vc->stolen_tb;
|
||||
}
|
||||
|
@ -512,10 +514,10 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
|
|||
core_stolen = vcore_stolen_time(vc, now);
|
||||
stolen = core_stolen - vcpu->arch.stolen_logged;
|
||||
vcpu->arch.stolen_logged = core_stolen;
|
||||
spin_lock(&vcpu->arch.tbacct_lock);
|
||||
spin_lock_irq(&vcpu->arch.tbacct_lock);
|
||||
stolen += vcpu->arch.busy_stolen;
|
||||
vcpu->arch.busy_stolen = 0;
|
||||
spin_unlock(&vcpu->arch.tbacct_lock);
|
||||
spin_unlock_irq(&vcpu->arch.tbacct_lock);
|
||||
if (!dt || !vpa)
|
||||
return;
|
||||
memset(dt, 0, sizeof(struct dtl_entry));
|
||||
|
@ -589,7 +591,9 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
|
|||
if (list_empty(&vcpu->kvm->arch.rtas_tokens))
|
||||
return RESUME_HOST;
|
||||
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
rc = kvmppc_rtas_hcall(vcpu);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
|
||||
if (rc == -ENOENT)
|
||||
return RESUME_HOST;
|
||||
|
@ -1115,13 +1119,13 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
|
|||
|
||||
if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
|
||||
return;
|
||||
spin_lock(&vcpu->arch.tbacct_lock);
|
||||
spin_lock_irq(&vcpu->arch.tbacct_lock);
|
||||
now = mftb();
|
||||
vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
|
||||
vcpu->arch.stolen_logged;
|
||||
vcpu->arch.busy_preempt = now;
|
||||
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
|
||||
spin_unlock(&vcpu->arch.tbacct_lock);
|
||||
spin_unlock_irq(&vcpu->arch.tbacct_lock);
|
||||
--vc->n_runnable;
|
||||
list_del(&vcpu->arch.run_list);
|
||||
}
|
||||
|
|
|
@ -225,6 +225,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
|
|||
is_io = pa & (HPTE_R_I | HPTE_R_W);
|
||||
pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
|
||||
pa &= PAGE_MASK;
|
||||
pa |= gpa & ~PAGE_MASK;
|
||||
} else {
|
||||
/* Translate to host virtual address */
|
||||
hva = __gfn_to_hva_memslot(memslot, gfn);
|
||||
|
@ -238,13 +239,13 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
|
|||
ptel = hpte_make_readonly(ptel);
|
||||
is_io = hpte_cache_bits(pte_val(pte));
|
||||
pa = pte_pfn(pte) << PAGE_SHIFT;
|
||||
pa |= hva & (pte_size - 1);
|
||||
pa |= gpa & ~PAGE_MASK;
|
||||
}
|
||||
}
|
||||
|
||||
if (pte_size < psize)
|
||||
return H_PARAMETER;
|
||||
if (pa && pte_size > psize)
|
||||
pa |= gpa & (pte_size - 1);
|
||||
|
||||
ptel &= ~(HPTE_R_PP0 - psize);
|
||||
ptel |= pa;
|
||||
|
@ -749,6 +750,10 @@ static int slb_base_page_shift[4] = {
|
|||
20, /* 1M, unsupported */
|
||||
};
|
||||
|
||||
/* When called from virtmode, this func should be protected by
|
||||
* preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
|
||||
* can trigger deadlock issue.
|
||||
*/
|
||||
long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
|
||||
unsigned long valid)
|
||||
{
|
||||
|
|
|
@ -153,7 +153,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
|||
|
||||
13: b machine_check_fwnmi
|
||||
|
||||
|
||||
/*
|
||||
* We come in here when wakened from nap mode on a secondary hw thread.
|
||||
* Relocation is off and most register values are lost.
|
||||
|
@ -224,6 +223,11 @@ kvm_start_guest:
|
|||
/* Clear our vcpu pointer so we don't come back in early */
|
||||
li r0, 0
|
||||
std r0, HSTATE_KVM_VCPU(r13)
|
||||
/*
|
||||
* Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing
|
||||
* the nap_count, because once the increment to nap_count is
|
||||
* visible we could be given another vcpu.
|
||||
*/
|
||||
lwsync
|
||||
/* Clear any pending IPI - we're an offline thread */
|
||||
ld r5, HSTATE_XICS_PHYS(r13)
|
||||
|
@ -241,7 +245,6 @@ kvm_start_guest:
|
|||
/* increment the nap count and then go to nap mode */
|
||||
ld r4, HSTATE_KVM_VCORE(r13)
|
||||
addi r4, r4, VCORE_NAP_COUNT
|
||||
lwsync /* make previous updates visible */
|
||||
51: lwarx r3, 0, r4
|
||||
addi r3, r3, 1
|
||||
stwcx. r3, 0, r4
|
||||
|
@ -751,15 +754,14 @@ kvmppc_interrupt_hv:
|
|||
* guest CR, R12 saved in shadow VCPU SCRATCH1/0
|
||||
* guest R13 saved in SPRN_SCRATCH0
|
||||
*/
|
||||
/* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
|
||||
std r9, HSTATE_HOST_R2(r13)
|
||||
std r9, HSTATE_SCRATCH2(r13)
|
||||
|
||||
lbz r9, HSTATE_IN_GUEST(r13)
|
||||
cmpwi r9, KVM_GUEST_MODE_HOST_HV
|
||||
beq kvmppc_bad_host_intr
|
||||
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
|
||||
cmpwi r9, KVM_GUEST_MODE_GUEST
|
||||
ld r9, HSTATE_HOST_R2(r13)
|
||||
ld r9, HSTATE_SCRATCH2(r13)
|
||||
beq kvmppc_interrupt_pr
|
||||
#endif
|
||||
/* We're now back in the host but in guest MMU context */
|
||||
|
@ -779,7 +781,7 @@ kvmppc_interrupt_hv:
|
|||
std r6, VCPU_GPR(R6)(r9)
|
||||
std r7, VCPU_GPR(R7)(r9)
|
||||
std r8, VCPU_GPR(R8)(r9)
|
||||
ld r0, HSTATE_HOST_R2(r13)
|
||||
ld r0, HSTATE_SCRATCH2(r13)
|
||||
std r0, VCPU_GPR(R9)(r9)
|
||||
std r10, VCPU_GPR(R10)(r9)
|
||||
std r11, VCPU_GPR(R11)(r9)
|
||||
|
@ -990,14 +992,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
|
|||
*/
|
||||
/* Increment the threads-exiting-guest count in the 0xff00
|
||||
bits of vcore->entry_exit_count */
|
||||
lwsync
|
||||
ld r5,HSTATE_KVM_VCORE(r13)
|
||||
addi r6,r5,VCORE_ENTRY_EXIT
|
||||
41: lwarx r3,0,r6
|
||||
addi r0,r3,0x100
|
||||
stwcx. r0,0,r6
|
||||
bne 41b
|
||||
lwsync
|
||||
isync /* order stwcx. vs. reading napping_threads */
|
||||
|
||||
/*
|
||||
* At this point we have an interrupt that we have to pass
|
||||
|
@ -1030,6 +1031,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
|
|||
sld r0,r0,r4
|
||||
andc. r3,r3,r0 /* no sense IPI'ing ourselves */
|
||||
beq 43f
|
||||
/* Order entry/exit update vs. IPIs */
|
||||
sync
|
||||
mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
|
||||
subf r6,r4,r13
|
||||
42: andi. r0,r3,1
|
||||
|
@ -1638,10 +1641,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
|
|||
bge kvm_cede_exit
|
||||
stwcx. r4,0,r6
|
||||
bne 31b
|
||||
/* order napping_threads update vs testing entry_exit_count */
|
||||
isync
|
||||
li r0,1
|
||||
stb r0,HSTATE_NAPPING(r13)
|
||||
/* order napping_threads update vs testing entry_exit_count */
|
||||
lwsync
|
||||
mr r4,r3
|
||||
lwz r7,VCORE_ENTRY_EXIT(r5)
|
||||
cmpwi r7,0x100
|
||||
|
|
|
@ -129,29 +129,32 @@ kvm_start_lightweight:
|
|||
* R12 = exit handler id
|
||||
* R13 = PACA
|
||||
* SVCPU.* = guest *
|
||||
* MSR.EE = 1
|
||||
*
|
||||
*/
|
||||
|
||||
PPC_LL r3, GPR4(r1) /* vcpu pointer */
|
||||
|
||||
/*
|
||||
* kvmppc_copy_from_svcpu can clobber volatile registers, save
|
||||
* the exit handler id to the vcpu and restore it from there later.
|
||||
*/
|
||||
stw r12, VCPU_TRAP(r3)
|
||||
|
||||
/* Transfer reg values from shadow vcpu back to vcpu struct */
|
||||
/* On 64-bit, interrupts are still off at this point */
|
||||
PPC_LL r3, GPR4(r1) /* vcpu pointer */
|
||||
|
||||
GET_SHADOW_VCPU(r4)
|
||||
bl FUNC(kvmppc_copy_from_svcpu)
|
||||
nop
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
/* Re-enable interrupts */
|
||||
ld r3, HSTATE_HOST_MSR(r13)
|
||||
ori r3, r3, MSR_EE
|
||||
MTMSR_EERI(r3)
|
||||
|
||||
/*
|
||||
* Reload kernel SPRG3 value.
|
||||
* No need to save guest value as usermode can't modify SPRG3.
|
||||
*/
|
||||
ld r3, PACA_SPRG3(r13)
|
||||
mtspr SPRN_SPRG3, r3
|
||||
|
||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
|
||||
/* R7 = vcpu */
|
||||
|
@ -177,7 +180,7 @@ kvm_start_lightweight:
|
|||
PPC_STL r31, VCPU_GPR(R31)(r7)
|
||||
|
||||
/* Pass the exit number as 3rd argument to kvmppc_handle_exit */
|
||||
mr r5, r12
|
||||
lwz r5, VCPU_TRAP(r7)
|
||||
|
||||
/* Restore r3 (kvm_run) and r4 (vcpu) */
|
||||
REST_2GPRS(3, r1)
|
||||
|
|
|
@ -66,6 +66,7 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
|
|||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
|
||||
svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
|
||||
svcpu->in_use = 0;
|
||||
svcpu_put(svcpu);
|
||||
#endif
|
||||
vcpu->cpu = smp_processor_id();
|
||||
|
@ -78,6 +79,9 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
||||
if (svcpu->in_use) {
|
||||
kvmppc_copy_from_svcpu(vcpu, svcpu);
|
||||
}
|
||||
memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
|
||||
to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
|
||||
svcpu_put(svcpu);
|
||||
|
@ -110,12 +114,26 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
|
|||
svcpu->ctr = vcpu->arch.ctr;
|
||||
svcpu->lr = vcpu->arch.lr;
|
||||
svcpu->pc = vcpu->arch.pc;
|
||||
svcpu->in_use = true;
|
||||
}
|
||||
|
||||
/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
|
||||
void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
|
||||
struct kvmppc_book3s_shadow_vcpu *svcpu)
|
||||
{
|
||||
/*
|
||||
* vcpu_put would just call us again because in_use hasn't
|
||||
* been updated yet.
|
||||
*/
|
||||
preempt_disable();
|
||||
|
||||
/*
|
||||
* Maybe we were already preempted and synced the svcpu from
|
||||
* our preempt notifiers. Don't bother touching this svcpu then.
|
||||
*/
|
||||
if (!svcpu->in_use)
|
||||
goto out;
|
||||
|
||||
vcpu->arch.gpr[0] = svcpu->gpr[0];
|
||||
vcpu->arch.gpr[1] = svcpu->gpr[1];
|
||||
vcpu->arch.gpr[2] = svcpu->gpr[2];
|
||||
|
@ -139,6 +157,10 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
|
|||
vcpu->arch.fault_dar = svcpu->fault_dar;
|
||||
vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
|
||||
vcpu->arch.last_inst = svcpu->last_inst;
|
||||
svcpu->in_use = false;
|
||||
|
||||
out:
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -153,15 +153,11 @@ _GLOBAL(kvmppc_entry_trampoline)
|
|||
|
||||
li r6, MSR_IR | MSR_DR
|
||||
andc r6, r5, r6 /* Clear DR and IR in MSR value */
|
||||
#ifdef CONFIG_PPC_BOOK3S_32
|
||||
/*
|
||||
* Set EE in HOST_MSR so that it's enabled when we get into our
|
||||
* C exit handler function. On 64-bit we delay enabling
|
||||
* interrupts until we have finished transferring stuff
|
||||
* to or from the PACA.
|
||||
* C exit handler function.
|
||||
*/
|
||||
ori r5, r5, MSR_EE
|
||||
#endif
|
||||
mtsrr0 r7
|
||||
mtsrr1 r6
|
||||
RFI
|
||||
|
|
|
@ -681,7 +681,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
|
|||
int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret, s;
|
||||
struct thread_struct thread;
|
||||
struct debug_reg debug;
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
struct thread_fp_state fp;
|
||||
int fpexc_mode;
|
||||
|
@ -723,9 +723,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
|||
#endif
|
||||
|
||||
/* Switch to guest debug context */
|
||||
thread.debug = vcpu->arch.shadow_dbg_reg;
|
||||
switch_booke_debug_regs(&thread);
|
||||
thread.debug = current->thread.debug;
|
||||
debug = vcpu->arch.shadow_dbg_reg;
|
||||
switch_booke_debug_regs(&debug);
|
||||
debug = current->thread.debug;
|
||||
current->thread.debug = vcpu->arch.shadow_dbg_reg;
|
||||
|
||||
kvmppc_fix_ee_before_entry();
|
||||
|
@ -736,8 +736,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
|||
We also get here with interrupts enabled. */
|
||||
|
||||
/* Switch back to user space debug context */
|
||||
switch_booke_debug_regs(&thread);
|
||||
current->thread.debug = thread.debug;
|
||||
switch_booke_debug_regs(&debug);
|
||||
current->thread.debug = debug;
|
||||
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
kvmppc_save_guest_fp(vcpu);
|
||||
|
|
Загрузка…
Ссылка в новой задаче