KVM: x86: Query vcpu->vcpu_idx directly and drop its accessor

Read vcpu->vcpu_idx directly instead of bouncing through the one-line
wrapper, kvm_vcpu_get_idx(), and drop the wrapper.  The wrapper is a
remnant of the original implementation and serves no purpose; remove it
before it gains more users.

Back when kvm_vcpu_get_idx() was added by commit 497d72d80a ("KVM: Add
kvm_vcpu_get_idx to get vcpu index in kvm->vcpus"), the implementation
was more than just a simple wrapper as vcpu->vcpu_idx did not exist and
retrieving the index meant walking over the vCPU array to find the given
vCPU.

When vcpu_idx was introduced by commit 8750e72a79 ("KVM: remember
position in kvm->vcpus array"), the helper was left behind, likely to
avoid extra thrash (but even then there were only two users, the original
arm usage having been removed at some point in the past).

No functional change intended.

Suggested-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20210910183220.2397812-2-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Sean Christopherson 2021-09-10 11:32:19 -07:00 коммит произвёл Paolo Bonzini
Родитель e9337c843c
Коммит 4eeef24241
6 изменённых файлов: 8 добавлений и 14 удалений

Просмотреть файл

@ -419,13 +419,13 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
static void __set_cpu_idle(struct kvm_vcpu *vcpu) static void __set_cpu_idle(struct kvm_vcpu *vcpu)
{ {
kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT); kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
set_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask); set_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
} }
static void __unset_cpu_idle(struct kvm_vcpu *vcpu) static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
{ {
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT); kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask); clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
} }
static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)

Просмотреть файл

@ -4066,7 +4066,7 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
kvm_s390_patch_guest_per_regs(vcpu); kvm_s390_patch_guest_per_regs(vcpu);
} }
clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.gisa_int.kicked_mask); clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
vcpu->arch.sie_block->icptcode = 0; vcpu->arch.sie_block->icptcode = 0;
cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);

Просмотреть файл

@ -79,7 +79,7 @@ static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
static inline int is_vcpu_idle(struct kvm_vcpu *vcpu) static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
{ {
return test_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask); return test_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
} }
static inline int kvm_is_ucontrol(struct kvm *kvm) static inline int kvm_is_ucontrol(struct kvm *kvm)

Просмотреть файл

@ -939,7 +939,7 @@ static int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
stimer_init(&hv_vcpu->stimer[i], i); stimer_init(&hv_vcpu->stimer[i], i);
hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu); hv_vcpu->vp_index = vcpu->vcpu_idx;
return 0; return 0;
} }
@ -1444,7 +1444,6 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
switch (msr) { switch (msr) {
case HV_X64_MSR_VP_INDEX: { case HV_X64_MSR_VP_INDEX: {
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
int vcpu_idx = kvm_vcpu_get_idx(vcpu);
u32 new_vp_index = (u32)data; u32 new_vp_index = (u32)data;
if (!host || new_vp_index >= KVM_MAX_VCPUS) if (!host || new_vp_index >= KVM_MAX_VCPUS)
@ -1459,9 +1458,9 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
* VP index is changing, adjust num_mismatched_vp_indexes if * VP index is changing, adjust num_mismatched_vp_indexes if
* it now matches or no longer matches vcpu_idx. * it now matches or no longer matches vcpu_idx.
*/ */
if (hv_vcpu->vp_index == vcpu_idx) if (hv_vcpu->vp_index == vcpu->vcpu_idx)
atomic_inc(&hv->num_mismatched_vp_indexes); atomic_inc(&hv->num_mismatched_vp_indexes);
else if (new_vp_index == vcpu_idx) else if (new_vp_index == vcpu->vcpu_idx)
atomic_dec(&hv->num_mismatched_vp_indexes); atomic_dec(&hv->num_mismatched_vp_indexes);
hv_vcpu->vp_index = new_vp_index; hv_vcpu->vp_index = new_vp_index;

Просмотреть файл

@ -83,7 +83,7 @@ static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu)
{ {
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
return hv_vcpu ? hv_vcpu->vp_index : kvm_vcpu_get_idx(vcpu); return hv_vcpu ? hv_vcpu->vp_index : vcpu->vcpu_idx;
} }
int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host); int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);

Просмотреть файл

@ -721,11 +721,6 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
return NULL; return NULL;
} }
static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
{
return vcpu->vcpu_idx;
}
#define kvm_for_each_memslot(memslot, slots) \ #define kvm_for_each_memslot(memslot, slots) \
for (memslot = &slots->memslots[0]; \ for (memslot = &slots->memslots[0]; \
memslot < slots->memslots + slots->used_slots; memslot++) \ memslot < slots->memslots + slots->used_slots; memslot++) \