KVM: x86: SVM: fix avic_kick_target_vcpus_fast

There are two issues in avic_kick_target_vcpus_fast

1. It is legal to issue an IPI request with APIC_DEST_NOSHORT
   and a physical destination of 0xFF (or 0xFFFFFFFF in case of x2apic),
   which must be treated as a broadcast destination.

   Fix this by explicitly checking for it.
   Also don’t use ‘index’ in this case as it gives no new information.

2. It is legal to issue a logical IPI request to more than one target.
   Index field only provides index in physical id table of first
   such target and therefore can't be used before we are sure
   that only a single target was addressed.

   Instead, parse the ICRL/ICRH, double check that a unicast interrupt
   was requested, and use that info to figure out the physical id
   of the target vCPU.
   At that point there is no need to use the index field as well.

In addition to fixing the above	issues,	also skip the call to
kvm_apic_match_dest.

It is possible to do this now, because now as long as AVIC is not
inhibited, it is guaranteed that none of the vCPUs changed their
apic id from its default value.

This fixes boot of windows guest with AVIC enabled because it uses
IPI with 0xFF destination and no destination shorthand.

Fixes: 7223fd2d53 ("KVM: SVM: Use target APIC ID to complete AVIC IRQs when possible")
Cc: stable@vger.kernel.org

Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20220606180829.102503-5-mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Maxim Levitsky 2022-06-06 21:08:26 +03:00 коммит произвёл Paolo Bonzini
Родитель f5f9089f76
Коммит 603ccef42c
1 изменённых файлов: 72 добавлений и 39 удалений

Просмотреть файл

@ -291,58 +291,91 @@ void avic_ring_doorbell(struct kvm_vcpu *vcpu)
static int avic_kick_target_vcpus_fast(struct kvm *kvm, struct kvm_lapic *source, static int avic_kick_target_vcpus_fast(struct kvm *kvm, struct kvm_lapic *source,
u32 icrl, u32 icrh, u32 index) u32 icrl, u32 icrh, u32 index)
{ {
u32 dest, apic_id; u32 l1_physical_id, dest;
struct kvm_vcpu *vcpu; struct kvm_vcpu *target_vcpu;
int dest_mode = icrl & APIC_DEST_MASK; int dest_mode = icrl & APIC_DEST_MASK;
int shorthand = icrl & APIC_SHORT_MASK; int shorthand = icrl & APIC_SHORT_MASK;
struct kvm_svm *kvm_svm = to_kvm_svm(kvm); struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
u32 *avic_logical_id_table = page_address(kvm_svm->avic_logical_id_table_page);
if (shorthand != APIC_DEST_NOSHORT) if (shorthand != APIC_DEST_NOSHORT)
return -EINVAL; return -EINVAL;
/* if (apic_x2apic_mode(source))
* The AVIC incomplete IPI #vmexit info provides index into
* the physical APIC ID table, which can be used to derive
* guest physical APIC ID.
*/
if (dest_mode == APIC_DEST_PHYSICAL) {
apic_id = index;
} else {
if (!apic_x2apic_mode(source)) {
/* For xAPIC logical mode, the index is for logical APIC table. */
apic_id = avic_logical_id_table[index] & 0x1ff;
} else {
return -EINVAL;
}
}
/*
* Assuming vcpu ID is the same as physical apic ID,
* and use it to retrieve the target vCPU.
*/
vcpu = kvm_get_vcpu_by_id(kvm, apic_id);
if (!vcpu)
return -EINVAL;
if (apic_x2apic_mode(vcpu->arch.apic))
dest = icrh; dest = icrh;
else else
dest = GET_APIC_DEST_FIELD(icrh); dest = GET_APIC_DEST_FIELD(icrh);
/* if (dest_mode == APIC_DEST_PHYSICAL) {
* Try matching the destination APIC ID with the vCPU. /* broadcast destination, use slow path */
*/ if (apic_x2apic_mode(source) && dest == X2APIC_BROADCAST)
if (kvm_apic_match_dest(vcpu, source, shorthand, dest, dest_mode)) { return -EINVAL;
vcpu->arch.apic->irr_pending = true; if (!apic_x2apic_mode(source) && dest == APIC_BROADCAST)
svm_complete_interrupt_delivery(vcpu, return -EINVAL;
icrl & APIC_MODE_MASK,
icrl & APIC_INT_LEVELTRIG, l1_physical_id = dest;
icrl & APIC_VECTOR_MASK);
return 0; if (WARN_ON_ONCE(l1_physical_id != index))
return -EINVAL;
} else {
u32 bitmap, cluster;
int logid_index;
if (apic_x2apic_mode(source)) {
/* 16 bit dest mask, 16 bit cluster id */
bitmap = dest & 0xFFFF0000;
cluster = (dest >> 16) << 4;
} else if (kvm_lapic_get_reg(source, APIC_DFR) == APIC_DFR_FLAT) {
/* 8 bit dest mask*/
bitmap = dest;
cluster = 0;
} else {
/* 4 bit desk mask, 4 bit cluster id */
bitmap = dest & 0xF;
cluster = (dest >> 4) << 2;
}
if (unlikely(!bitmap))
/* guest bug: nobody to send the logical interrupt to */
return 0;
if (!is_power_of_2(bitmap))
/* multiple logical destinations, use slow path */
return -EINVAL;
logid_index = cluster + __ffs(bitmap);
if (apic_x2apic_mode(source)) {
l1_physical_id = logid_index;
} else {
u32 *avic_logical_id_table =
page_address(kvm_svm->avic_logical_id_table_page);
u32 logid_entry = avic_logical_id_table[logid_index];
if (WARN_ON_ONCE(index != logid_index))
return -EINVAL;
/* guest bug: non existing/reserved logical destination */
if (unlikely(!(logid_entry & AVIC_LOGICAL_ID_ENTRY_VALID_MASK)))
return 0;
l1_physical_id = logid_entry &
AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
}
} }
return -EINVAL; target_vcpu = kvm_get_vcpu_by_id(kvm, l1_physical_id);
if (unlikely(!target_vcpu))
/* guest bug: non existing vCPU is a target of this IPI*/
return 0;
target_vcpu->arch.apic->irr_pending = true;
svm_complete_interrupt_delivery(target_vcpu,
icrl & APIC_MODE_MASK,
icrl & APIC_INT_LEVELTRIG,
icrl & APIC_VECTOR_MASK);
return 0;
} }
static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source, static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,