KVM: PPC: Book3S HV: Use hypercalls for TLB invalidation when nested
This adds code to call the H_TLB_INVALIDATE hypercall when running as a guest, in the cases where we need to invalidate TLBs (or other MMU caches) as part of managing the mappings for a nested guest. Calling H_TLB_INVALIDATE lets the nested hypervisor inform the parent hypervisor about changes to partition-scoped page tables or the partition table without needing to do hypervisor-privileged tlbie instructions. Reviewed-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Paul Mackerras <paulus@ozlabs.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Родитель
e3b6b46615
Коммит
690ed4cad8
|
@ -24,6 +24,7 @@
|
|||
#include <asm/bitops.h>
|
||||
#include <asm/book3s/64/mmu-hash.h>
|
||||
#include <asm/cpu_has_feature.h>
|
||||
#include <asm/ppc-opcode.h>
|
||||
|
||||
#ifdef CONFIG_PPC_PSERIES
|
||||
static inline bool kvmhv_on_pseries(void)
|
||||
|
@ -117,6 +118,10 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
|
|||
bool create);
|
||||
void kvmhv_put_nested(struct kvm_nested_guest *gp);
|
||||
|
||||
/* Encoding of first parameter for H_TLB_INVALIDATE */
|
||||
#define H_TLBIE_P1_ENC(ric, prs, r) (___PPC_RIC(ric) | ___PPC_PRS(prs) | \
|
||||
___PPC_R(r))
|
||||
|
||||
/* Power architecture requires HPT is at least 256kiB, at most 64TiB */
|
||||
#define PPC_MIN_HPT_ORDER 18
|
||||
#define PPC_MAX_HPT_ORDER 46
|
||||
|
|
|
@ -201,17 +201,43 @@ static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
|
|||
unsigned int pshift, unsigned int lpid)
|
||||
{
|
||||
unsigned long psize = PAGE_SIZE;
|
||||
int psi;
|
||||
long rc;
|
||||
unsigned long rb;
|
||||
|
||||
if (pshift)
|
||||
psize = 1UL << pshift;
|
||||
else
|
||||
pshift = PAGE_SHIFT;
|
||||
|
||||
addr &= ~(psize - 1);
|
||||
radix__flush_tlb_lpid_page(lpid, addr, psize);
|
||||
|
||||
if (!kvmhv_on_pseries()) {
|
||||
radix__flush_tlb_lpid_page(lpid, addr, psize);
|
||||
return;
|
||||
}
|
||||
|
||||
psi = shift_to_mmu_psize(pshift);
|
||||
rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
|
||||
rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1),
|
||||
lpid, rb);
|
||||
if (rc)
|
||||
pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc);
|
||||
}
|
||||
|
||||
static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid)
|
||||
{
|
||||
radix__flush_pwc_lpid(lpid);
|
||||
long rc;
|
||||
|
||||
if (!kvmhv_on_pseries()) {
|
||||
radix__flush_pwc_lpid(lpid);
|
||||
return;
|
||||
}
|
||||
|
||||
rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1),
|
||||
lpid, TLBIEL_INVAL_SET_LPID);
|
||||
if (rc)
|
||||
pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc);
|
||||
}
|
||||
|
||||
static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
|
||||
|
|
|
@ -299,14 +299,32 @@ void kvmhv_nested_exit(void)
|
|||
}
|
||||
}
|
||||
|
||||
static void kvmhv_flush_lpid(unsigned int lpid)
|
||||
{
|
||||
long rc;
|
||||
|
||||
if (!kvmhv_on_pseries()) {
|
||||
radix__flush_tlb_lpid(lpid);
|
||||
return;
|
||||
}
|
||||
|
||||
rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1),
|
||||
lpid, TLBIEL_INVAL_SET_LPID);
|
||||
if (rc)
|
||||
pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc);
|
||||
}
|
||||
|
||||
void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1)
|
||||
{
|
||||
if (cpu_has_feature(CPU_FTR_HVMODE)) {
|
||||
if (!kvmhv_on_pseries()) {
|
||||
mmu_partition_table_set_entry(lpid, dw0, dw1);
|
||||
} else {
|
||||
pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0);
|
||||
pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1);
|
||||
return;
|
||||
}
|
||||
|
||||
pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0);
|
||||
pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1);
|
||||
/* L0 will do the necessary barriers */
|
||||
kvmhv_flush_lpid(lpid);
|
||||
}
|
||||
|
||||
static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp)
|
||||
|
@ -493,7 +511,7 @@ static void kvmhv_flush_nested(struct kvm_nested_guest *gp)
|
|||
spin_lock(&kvm->mmu_lock);
|
||||
kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid);
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
radix__flush_tlb_lpid(gp->shadow_lpid);
|
||||
kvmhv_flush_lpid(gp->shadow_lpid);
|
||||
kvmhv_update_ptbl_cache(gp);
|
||||
if (gp->l1_gr_to_hr == 0)
|
||||
kvmhv_remove_nested(gp);
|
||||
|
@ -777,7 +795,7 @@ static void kvmhv_emulate_tlbie_lpid(struct kvm_vcpu *vcpu,
|
|||
spin_lock(&kvm->mmu_lock);
|
||||
kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
|
||||
gp->shadow_lpid);
|
||||
radix__flush_tlb_lpid(gp->shadow_lpid);
|
||||
kvmhv_flush_lpid(gp->shadow_lpid);
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
break;
|
||||
case 1:
|
||||
|
|
Загрузка…
Ссылка в новой задаче