KVM: s390: Use common waitqueue
Lets use the common waitqueue for kvm cpus on s390. By itself it is just a cleanup, but it should also improve the accuracy of diag 0x44 which is implemented via kvm_vcpu_on_spin. kvm_vcpu_on_spin has an explicit check for waiting on the waitqueue to optimize the yielding. Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Cornelia Huck <cornelia.huck@de.ibm.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Родитель
b110feaf4d
Коммит
d0321a24bf
|
@ -228,7 +228,7 @@ struct kvm_s390_local_interrupt {
|
||||||
atomic_t active;
|
atomic_t active;
|
||||||
struct kvm_s390_float_interrupt *float_int;
|
struct kvm_s390_float_interrupt *float_int;
|
||||||
int timer_due; /* event indicator for waitqueue below */
|
int timer_due; /* event indicator for waitqueue below */
|
||||||
wait_queue_head_t wq;
|
wait_queue_head_t *wq;
|
||||||
atomic_t *cpuflags;
|
atomic_t *cpuflags;
|
||||||
unsigned int action_bits;
|
unsigned int action_bits;
|
||||||
};
|
};
|
||||||
|
|
|
@ -438,7 +438,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
|
||||||
no_timer:
|
no_timer:
|
||||||
spin_lock(&vcpu->arch.local_int.float_int->lock);
|
spin_lock(&vcpu->arch.local_int.float_int->lock);
|
||||||
spin_lock_bh(&vcpu->arch.local_int.lock);
|
spin_lock_bh(&vcpu->arch.local_int.lock);
|
||||||
add_wait_queue(&vcpu->arch.local_int.wq, &wait);
|
add_wait_queue(&vcpu->wq, &wait);
|
||||||
while (list_empty(&vcpu->arch.local_int.list) &&
|
while (list_empty(&vcpu->arch.local_int.list) &&
|
||||||
list_empty(&vcpu->arch.local_int.float_int->list) &&
|
list_empty(&vcpu->arch.local_int.float_int->list) &&
|
||||||
(!vcpu->arch.local_int.timer_due) &&
|
(!vcpu->arch.local_int.timer_due) &&
|
||||||
|
@ -452,7 +452,7 @@ no_timer:
|
||||||
}
|
}
|
||||||
__unset_cpu_idle(vcpu);
|
__unset_cpu_idle(vcpu);
|
||||||
__set_current_state(TASK_RUNNING);
|
__set_current_state(TASK_RUNNING);
|
||||||
remove_wait_queue(&vcpu->arch.local_int.wq, &wait);
|
remove_wait_queue(&vcpu->wq, &wait);
|
||||||
spin_unlock_bh(&vcpu->arch.local_int.lock);
|
spin_unlock_bh(&vcpu->arch.local_int.lock);
|
||||||
spin_unlock(&vcpu->arch.local_int.float_int->lock);
|
spin_unlock(&vcpu->arch.local_int.float_int->lock);
|
||||||
hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
|
hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
|
||||||
|
@ -465,8 +465,8 @@ void kvm_s390_tasklet(unsigned long parm)
|
||||||
|
|
||||||
spin_lock(&vcpu->arch.local_int.lock);
|
spin_lock(&vcpu->arch.local_int.lock);
|
||||||
vcpu->arch.local_int.timer_due = 1;
|
vcpu->arch.local_int.timer_due = 1;
|
||||||
if (waitqueue_active(&vcpu->arch.local_int.wq))
|
if (waitqueue_active(&vcpu->wq))
|
||||||
wake_up_interruptible(&vcpu->arch.local_int.wq);
|
wake_up_interruptible(&vcpu->wq);
|
||||||
spin_unlock(&vcpu->arch.local_int.lock);
|
spin_unlock(&vcpu->arch.local_int.lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -613,7 +613,7 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
|
||||||
spin_lock_bh(&li->lock);
|
spin_lock_bh(&li->lock);
|
||||||
list_add(&inti->list, &li->list);
|
list_add(&inti->list, &li->list);
|
||||||
atomic_set(&li->active, 1);
|
atomic_set(&li->active, 1);
|
||||||
BUG_ON(waitqueue_active(&li->wq));
|
BUG_ON(waitqueue_active(li->wq));
|
||||||
spin_unlock_bh(&li->lock);
|
spin_unlock_bh(&li->lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -746,8 +746,8 @@ int kvm_s390_inject_vm(struct kvm *kvm,
|
||||||
li = fi->local_int[sigcpu];
|
li = fi->local_int[sigcpu];
|
||||||
spin_lock_bh(&li->lock);
|
spin_lock_bh(&li->lock);
|
||||||
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
|
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
|
||||||
if (waitqueue_active(&li->wq))
|
if (waitqueue_active(li->wq))
|
||||||
wake_up_interruptible(&li->wq);
|
wake_up_interruptible(li->wq);
|
||||||
spin_unlock_bh(&li->lock);
|
spin_unlock_bh(&li->lock);
|
||||||
spin_unlock(&fi->lock);
|
spin_unlock(&fi->lock);
|
||||||
mutex_unlock(&kvm->lock);
|
mutex_unlock(&kvm->lock);
|
||||||
|
@ -832,8 +832,8 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
|
||||||
if (inti->type == KVM_S390_SIGP_STOP)
|
if (inti->type == KVM_S390_SIGP_STOP)
|
||||||
li->action_bits |= ACTION_STOP_ON_STOP;
|
li->action_bits |= ACTION_STOP_ON_STOP;
|
||||||
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
|
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
|
||||||
if (waitqueue_active(&li->wq))
|
if (waitqueue_active(&vcpu->wq))
|
||||||
wake_up_interruptible(&vcpu->arch.local_int.wq);
|
wake_up_interruptible(&vcpu->wq);
|
||||||
spin_unlock_bh(&li->lock);
|
spin_unlock_bh(&li->lock);
|
||||||
mutex_unlock(&vcpu->kvm->lock);
|
mutex_unlock(&vcpu->kvm->lock);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -438,7 +438,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
|
||||||
vcpu->arch.local_int.float_int = &kvm->arch.float_int;
|
vcpu->arch.local_int.float_int = &kvm->arch.float_int;
|
||||||
spin_lock(&kvm->arch.float_int.lock);
|
spin_lock(&kvm->arch.float_int.lock);
|
||||||
kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
|
kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
|
||||||
init_waitqueue_head(&vcpu->arch.local_int.wq);
|
vcpu->arch.local_int.wq = &vcpu->wq;
|
||||||
vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
|
vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
|
||||||
spin_unlock(&kvm->arch.float_int.lock);
|
spin_unlock(&kvm->arch.float_int.lock);
|
||||||
|
|
||||||
|
|
|
@ -79,8 +79,8 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
|
||||||
list_add_tail(&inti->list, &li->list);
|
list_add_tail(&inti->list, &li->list);
|
||||||
atomic_set(&li->active, 1);
|
atomic_set(&li->active, 1);
|
||||||
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
|
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
|
||||||
if (waitqueue_active(&li->wq))
|
if (waitqueue_active(li->wq))
|
||||||
wake_up_interruptible(&li->wq);
|
wake_up_interruptible(li->wq);
|
||||||
spin_unlock_bh(&li->lock);
|
spin_unlock_bh(&li->lock);
|
||||||
rc = SIGP_CC_ORDER_CODE_ACCEPTED;
|
rc = SIGP_CC_ORDER_CODE_ACCEPTED;
|
||||||
VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
|
VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
|
||||||
|
@ -117,8 +117,8 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
|
||||||
list_add_tail(&inti->list, &li->list);
|
list_add_tail(&inti->list, &li->list);
|
||||||
atomic_set(&li->active, 1);
|
atomic_set(&li->active, 1);
|
||||||
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
|
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
|
||||||
if (waitqueue_active(&li->wq))
|
if (waitqueue_active(li->wq))
|
||||||
wake_up_interruptible(&li->wq);
|
wake_up_interruptible(li->wq);
|
||||||
spin_unlock_bh(&li->lock);
|
spin_unlock_bh(&li->lock);
|
||||||
rc = SIGP_CC_ORDER_CODE_ACCEPTED;
|
rc = SIGP_CC_ORDER_CODE_ACCEPTED;
|
||||||
VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
|
VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
|
||||||
|
@ -145,8 +145,8 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
|
||||||
atomic_set(&li->active, 1);
|
atomic_set(&li->active, 1);
|
||||||
atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
|
atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
|
||||||
li->action_bits |= action;
|
li->action_bits |= action;
|
||||||
if (waitqueue_active(&li->wq))
|
if (waitqueue_active(li->wq))
|
||||||
wake_up_interruptible(&li->wq);
|
wake_up_interruptible(li->wq);
|
||||||
out:
|
out:
|
||||||
spin_unlock_bh(&li->lock);
|
spin_unlock_bh(&li->lock);
|
||||||
|
|
||||||
|
@ -250,8 +250,8 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
|
||||||
|
|
||||||
list_add_tail(&inti->list, &li->list);
|
list_add_tail(&inti->list, &li->list);
|
||||||
atomic_set(&li->active, 1);
|
atomic_set(&li->active, 1);
|
||||||
if (waitqueue_active(&li->wq))
|
if (waitqueue_active(li->wq))
|
||||||
wake_up_interruptible(&li->wq);
|
wake_up_interruptible(li->wq);
|
||||||
rc = SIGP_CC_ORDER_CODE_ACCEPTED;
|
rc = SIGP_CC_ORDER_CODE_ACCEPTED;
|
||||||
|
|
||||||
VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
|
VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
|
||||||
|
|
Загрузка…
Ссылка в новой задаче