KVM: PPC: Book3S HV: Avoid lockdep debugging in TCE realmode handlers
The kvmppc_tce_to_ua() helper is called from real and virtual modes
and it works fine as long as CONFIG_DEBUG_LOCKDEP is not enabled.
However if the lockdep debugging is on, the lockdep will most likely break
in kvm_memslots() because of srcu_dereference_check() so we need to use
PPC-own kvm_memslots_raw() which uses realmode safe
rcu_dereference_raw_notrace().
This creates a realmode copy of kvmppc_tce_to_ua() which replaces
kvm_memslots() with kvm_memslots_raw().
Since kvmppc_rm_tce_to_ua() becomes static and can only be used inside
HV KVM, this moves it earlier under CONFIG_KVM_BOOK3S_HV_POSSIBLE.
This moves truly virtual-mode kvmppc_tce_to_ua() to where it belongs and
drops the prmap parameter which was never used in the virtual mode.
Fixes: d3695aa4f4
("KVM: PPC: Add support for multiple-TCE hcalls", 2016-02-15)
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
This commit is contained in:
Родитель
3309bec85e
Коммит
2001825efc
|
@ -197,8 +197,6 @@ extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
|
|||
(iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
|
||||
(stt)->size, (ioba), (npages)) ? \
|
||||
H_PARAMETER : H_SUCCESS)
|
||||
extern long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
|
||||
unsigned long *ua, unsigned long **prmap);
|
||||
extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt,
|
||||
unsigned long idx, unsigned long tce);
|
||||
extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
||||
|
|
|
@ -363,6 +363,22 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
|
||||
unsigned long *ua)
|
||||
{
|
||||
unsigned long gfn = tce >> PAGE_SHIFT;
|
||||
struct kvm_memory_slot *memslot;
|
||||
|
||||
memslot = search_memslots(kvm_memslots(kvm), gfn);
|
||||
if (!memslot)
|
||||
return -EINVAL;
|
||||
|
||||
*ua = __gfn_to_hva_memslot(memslot, gfn) |
|
||||
(tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
|
||||
unsigned long tce)
|
||||
{
|
||||
|
@ -378,7 +394,7 @@ static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
|
|||
if (iommu_tce_check_gpa(stt->page_shift, gpa))
|
||||
return H_TOO_HARD;
|
||||
|
||||
if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL))
|
||||
if (kvmppc_tce_to_ua(stt->kvm, tce, &ua))
|
||||
return H_TOO_HARD;
|
||||
|
||||
list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
|
||||
|
@ -551,7 +567,7 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
|||
|
||||
dir = iommu_tce_direction(tce);
|
||||
|
||||
if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
|
||||
if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
|
||||
ret = H_PARAMETER;
|
||||
goto unlock_exit;
|
||||
}
|
||||
|
@ -612,7 +628,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
|||
return ret;
|
||||
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL)) {
|
||||
if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua)) {
|
||||
ret = H_TOO_HARD;
|
||||
goto unlock_exit;
|
||||
}
|
||||
|
@ -647,7 +663,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
|||
}
|
||||
tce = be64_to_cpu(tce);
|
||||
|
||||
if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
|
||||
if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua))
|
||||
return H_PARAMETER;
|
||||
|
||||
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
||||
|
|
|
@ -88,6 +88,25 @@ struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
|
|||
EXPORT_SYMBOL_GPL(kvmppc_find_table);
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
static long kvmppc_rm_tce_to_ua(struct kvm *kvm, unsigned long tce,
|
||||
unsigned long *ua, unsigned long **prmap)
|
||||
{
|
||||
unsigned long gfn = tce >> PAGE_SHIFT;
|
||||
struct kvm_memory_slot *memslot;
|
||||
|
||||
memslot = search_memslots(kvm_memslots_raw(kvm), gfn);
|
||||
if (!memslot)
|
||||
return -EINVAL;
|
||||
|
||||
*ua = __gfn_to_hva_memslot(memslot, gfn) |
|
||||
(tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
|
||||
|
||||
if (prmap)
|
||||
*prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Validates TCE address.
|
||||
* At the moment flags and page mask are validated.
|
||||
|
@ -111,7 +130,7 @@ static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
|
|||
if (iommu_tce_check_gpa(stt->page_shift, gpa))
|
||||
return H_PARAMETER;
|
||||
|
||||
if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL))
|
||||
if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua, NULL))
|
||||
return H_TOO_HARD;
|
||||
|
||||
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
||||
|
@ -181,28 +200,6 @@ void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_tce_put);
|
||||
|
||||
long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
|
||||
unsigned long *ua, unsigned long **prmap)
|
||||
{
|
||||
unsigned long gfn = tce >> PAGE_SHIFT;
|
||||
struct kvm_memory_slot *memslot;
|
||||
|
||||
memslot = search_memslots(kvm_memslots(kvm), gfn);
|
||||
if (!memslot)
|
||||
return -EINVAL;
|
||||
|
||||
*ua = __gfn_to_hva_memslot(memslot, gfn) |
|
||||
(tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
if (prmap)
|
||||
*prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_tce_to_ua);
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl,
|
||||
unsigned long entry, unsigned long *hpa,
|
||||
|
@ -390,7 +387,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
|||
return ret;
|
||||
|
||||
dir = iommu_tce_direction(tce);
|
||||
if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
|
||||
if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
|
||||
return H_PARAMETER;
|
||||
|
||||
entry = ioba >> stt->page_shift;
|
||||
|
@ -492,7 +489,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
|||
*/
|
||||
struct mm_iommu_table_group_mem_t *mem;
|
||||
|
||||
if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL))
|
||||
if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL))
|
||||
return H_TOO_HARD;
|
||||
|
||||
mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
|
||||
|
@ -508,7 +505,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
|||
* We do not require memory to be preregistered in this case
|
||||
* so lock rmap and do __find_linux_pte_or_hugepte().
|
||||
*/
|
||||
if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
|
||||
if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
|
||||
return H_TOO_HARD;
|
||||
|
||||
rmap = (void *) vmalloc_to_phys(rmap);
|
||||
|
@ -542,7 +539,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
|||
unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
|
||||
|
||||
ua = 0;
|
||||
if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
|
||||
if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
|
||||
return H_PARAMETER;
|
||||
|
||||
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
||||
|
|
Загрузка…
Ссылка в новой задаче