KVM: count number of assigned devices
If there are no assigned devices, the guest PAT are not providing any useful information and can be overridden to writeback; VMX always does this because it has the "IPAT" bit in its extended page table entries, but SVM does not have anything similar. Hook into VFIO and legacy device assignment so that they provide this information to KVM. Reviewed-by: Alex Williamson <alex.williamson@redhat.com> Tested-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Родитель
370777daab
Коммит
5544eb9b81
|
@ -604,6 +604,8 @@ struct kvm_arch {
|
|||
bool iommu_noncoherent;
|
||||
#define __KVM_HAVE_ARCH_NONCOHERENT_DMA
|
||||
atomic_t noncoherent_dma_count;
|
||||
#define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
|
||||
atomic_t assigned_device_count;
|
||||
struct kvm_pic *vpic;
|
||||
struct kvm_ioapic *vioapic;
|
||||
struct kvm_pit *vpit;
|
||||
|
|
|
@ -200,6 +200,7 @@ int kvm_assign_device(struct kvm *kvm, struct pci_dev *pdev)
|
|||
goto out_unmap;
|
||||
}
|
||||
|
||||
kvm_arch_start_assignment(kvm);
|
||||
pci_set_dev_assigned(pdev);
|
||||
|
||||
dev_info(&pdev->dev, "kvm assign device\n");
|
||||
|
@ -224,6 +225,7 @@ int kvm_deassign_device(struct kvm *kvm, struct pci_dev *pdev)
|
|||
iommu_detach_device(domain, &pdev->dev);
|
||||
|
||||
pci_clear_dev_assigned(pdev);
|
||||
kvm_arch_end_assignment(kvm);
|
||||
|
||||
dev_info(&pdev->dev, "kvm deassign device\n");
|
||||
|
||||
|
|
|
@ -8213,6 +8213,24 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
|
|||
kvm_x86_ops->interrupt_allowed(vcpu);
|
||||
}
|
||||
|
||||
void kvm_arch_start_assignment(struct kvm *kvm)
|
||||
{
|
||||
atomic_inc(&kvm->arch.assigned_device_count);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);
|
||||
|
||||
void kvm_arch_end_assignment(struct kvm *kvm)
|
||||
{
|
||||
atomic_dec(&kvm->arch.assigned_device_count);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_arch_end_assignment);
|
||||
|
||||
bool kvm_arch_has_assigned_device(struct kvm *kvm)
|
||||
{
|
||||
return atomic_read(&kvm->arch.assigned_device_count);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device);
|
||||
|
||||
void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
|
||||
{
|
||||
atomic_inc(&kvm->arch.noncoherent_dma_count);
|
||||
|
|
|
@ -734,6 +734,24 @@ static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
|
|||
return false;
|
||||
}
|
||||
#endif
|
||||
#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
|
||||
void kvm_arch_start_assignment(struct kvm *kvm);
|
||||
void kvm_arch_end_assignment(struct kvm *kvm);
|
||||
bool kvm_arch_has_assigned_device(struct kvm *kvm);
|
||||
#else
|
||||
static inline void kvm_arch_start_assignment(struct kvm *kvm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void kvm_arch_end_assignment(struct kvm *kvm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
|
|
@ -155,6 +155,8 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
|
|||
list_add_tail(&kvg->node, &kv->group_list);
|
||||
kvg->vfio_group = vfio_group;
|
||||
|
||||
kvm_arch_start_assignment(dev->kvm);
|
||||
|
||||
mutex_unlock(&kv->lock);
|
||||
|
||||
kvm_vfio_update_coherency(dev);
|
||||
|
@ -190,6 +192,8 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
|
|||
break;
|
||||
}
|
||||
|
||||
kvm_arch_end_assignment(dev->kvm);
|
||||
|
||||
mutex_unlock(&kv->lock);
|
||||
|
||||
kvm_vfio_group_put_external_user(vfio_group);
|
||||
|
@ -239,6 +243,7 @@ static void kvm_vfio_destroy(struct kvm_device *dev)
|
|||
kvm_vfio_group_put_external_user(kvg->vfio_group);
|
||||
list_del(&kvg->node);
|
||||
kfree(kvg);
|
||||
kvm_arch_end_assignment(dev->kvm);
|
||||
}
|
||||
|
||||
kvm_vfio_update_coherency(dev);
|
||||
|
|
Загрузка…
Ссылка в новой задаче