KVM: Convert i8254/i8259 locks to raw_spinlocks
The i8254/i8259 locks need to be real spinlocks on preempt-rt. Convert them to raw_spinlock. No change for !RT kernels. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
Родитель
e424e19183
Коммит
fa8273e954
|
@ -242,11 +242,11 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
|
||||||
{
|
{
|
||||||
struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state,
|
struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state,
|
||||||
irq_ack_notifier);
|
irq_ack_notifier);
|
||||||
spin_lock(&ps->inject_lock);
|
raw_spin_lock(&ps->inject_lock);
|
||||||
if (atomic_dec_return(&ps->pit_timer.pending) < 0)
|
if (atomic_dec_return(&ps->pit_timer.pending) < 0)
|
||||||
atomic_inc(&ps->pit_timer.pending);
|
atomic_inc(&ps->pit_timer.pending);
|
||||||
ps->irq_ack = 1;
|
ps->irq_ack = 1;
|
||||||
spin_unlock(&ps->inject_lock);
|
raw_spin_unlock(&ps->inject_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
|
void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
|
||||||
|
@ -624,7 +624,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
|
||||||
|
|
||||||
mutex_init(&pit->pit_state.lock);
|
mutex_init(&pit->pit_state.lock);
|
||||||
mutex_lock(&pit->pit_state.lock);
|
mutex_lock(&pit->pit_state.lock);
|
||||||
spin_lock_init(&pit->pit_state.inject_lock);
|
raw_spin_lock_init(&pit->pit_state.inject_lock);
|
||||||
|
|
||||||
kvm->arch.vpit = pit;
|
kvm->arch.vpit = pit;
|
||||||
pit->kvm = kvm;
|
pit->kvm = kvm;
|
||||||
|
@ -724,12 +724,12 @@ void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu)
|
||||||
/* Try to inject pending interrupts when
|
/* Try to inject pending interrupts when
|
||||||
* last one has been acked.
|
* last one has been acked.
|
||||||
*/
|
*/
|
||||||
spin_lock(&ps->inject_lock);
|
raw_spin_lock(&ps->inject_lock);
|
||||||
if (atomic_read(&ps->pit_timer.pending) && ps->irq_ack) {
|
if (atomic_read(&ps->pit_timer.pending) && ps->irq_ack) {
|
||||||
ps->irq_ack = 0;
|
ps->irq_ack = 0;
|
||||||
inject = 1;
|
inject = 1;
|
||||||
}
|
}
|
||||||
spin_unlock(&ps->inject_lock);
|
raw_spin_unlock(&ps->inject_lock);
|
||||||
if (inject)
|
if (inject)
|
||||||
__inject_pit_timer_intr(kvm);
|
__inject_pit_timer_intr(kvm);
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,7 +27,7 @@ struct kvm_kpit_state {
|
||||||
u32 speaker_data_on;
|
u32 speaker_data_on;
|
||||||
struct mutex lock;
|
struct mutex lock;
|
||||||
struct kvm_pit *pit;
|
struct kvm_pit *pit;
|
||||||
spinlock_t inject_lock;
|
raw_spinlock_t inject_lock;
|
||||||
unsigned long irq_ack;
|
unsigned long irq_ack;
|
||||||
struct kvm_irq_ack_notifier irq_ack_notifier;
|
struct kvm_irq_ack_notifier irq_ack_notifier;
|
||||||
};
|
};
|
||||||
|
|
|
@ -44,18 +44,19 @@ static void pic_clear_isr(struct kvm_kpic_state *s, int irq)
|
||||||
* Other interrupt may be delivered to PIC while lock is dropped but
|
* Other interrupt may be delivered to PIC while lock is dropped but
|
||||||
* it should be safe since PIC state is already updated at this stage.
|
* it should be safe since PIC state is already updated at this stage.
|
||||||
*/
|
*/
|
||||||
spin_unlock(&s->pics_state->lock);
|
raw_spin_unlock(&s->pics_state->lock);
|
||||||
kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq);
|
kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq);
|
||||||
spin_lock(&s->pics_state->lock);
|
raw_spin_lock(&s->pics_state->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_pic_clear_isr_ack(struct kvm *kvm)
|
void kvm_pic_clear_isr_ack(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
struct kvm_pic *s = pic_irqchip(kvm);
|
struct kvm_pic *s = pic_irqchip(kvm);
|
||||||
spin_lock(&s->lock);
|
|
||||||
|
raw_spin_lock(&s->lock);
|
||||||
s->pics[0].isr_ack = 0xff;
|
s->pics[0].isr_ack = 0xff;
|
||||||
s->pics[1].isr_ack = 0xff;
|
s->pics[1].isr_ack = 0xff;
|
||||||
spin_unlock(&s->lock);
|
raw_spin_unlock(&s->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -156,9 +157,9 @@ static void pic_update_irq(struct kvm_pic *s)
|
||||||
|
|
||||||
void kvm_pic_update_irq(struct kvm_pic *s)
|
void kvm_pic_update_irq(struct kvm_pic *s)
|
||||||
{
|
{
|
||||||
spin_lock(&s->lock);
|
raw_spin_lock(&s->lock);
|
||||||
pic_update_irq(s);
|
pic_update_irq(s);
|
||||||
spin_unlock(&s->lock);
|
raw_spin_unlock(&s->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_pic_set_irq(void *opaque, int irq, int level)
|
int kvm_pic_set_irq(void *opaque, int irq, int level)
|
||||||
|
@ -166,14 +167,14 @@ int kvm_pic_set_irq(void *opaque, int irq, int level)
|
||||||
struct kvm_pic *s = opaque;
|
struct kvm_pic *s = opaque;
|
||||||
int ret = -1;
|
int ret = -1;
|
||||||
|
|
||||||
spin_lock(&s->lock);
|
raw_spin_lock(&s->lock);
|
||||||
if (irq >= 0 && irq < PIC_NUM_PINS) {
|
if (irq >= 0 && irq < PIC_NUM_PINS) {
|
||||||
ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, level);
|
ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, level);
|
||||||
pic_update_irq(s);
|
pic_update_irq(s);
|
||||||
trace_kvm_pic_set_irq(irq >> 3, irq & 7, s->pics[irq >> 3].elcr,
|
trace_kvm_pic_set_irq(irq >> 3, irq & 7, s->pics[irq >> 3].elcr,
|
||||||
s->pics[irq >> 3].imr, ret == 0);
|
s->pics[irq >> 3].imr, ret == 0);
|
||||||
}
|
}
|
||||||
spin_unlock(&s->lock);
|
raw_spin_unlock(&s->lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -203,7 +204,7 @@ int kvm_pic_read_irq(struct kvm *kvm)
|
||||||
int irq, irq2, intno;
|
int irq, irq2, intno;
|
||||||
struct kvm_pic *s = pic_irqchip(kvm);
|
struct kvm_pic *s = pic_irqchip(kvm);
|
||||||
|
|
||||||
spin_lock(&s->lock);
|
raw_spin_lock(&s->lock);
|
||||||
irq = pic_get_irq(&s->pics[0]);
|
irq = pic_get_irq(&s->pics[0]);
|
||||||
if (irq >= 0) {
|
if (irq >= 0) {
|
||||||
pic_intack(&s->pics[0], irq);
|
pic_intack(&s->pics[0], irq);
|
||||||
|
@ -228,7 +229,7 @@ int kvm_pic_read_irq(struct kvm *kvm)
|
||||||
intno = s->pics[0].irq_base + irq;
|
intno = s->pics[0].irq_base + irq;
|
||||||
}
|
}
|
||||||
pic_update_irq(s);
|
pic_update_irq(s);
|
||||||
spin_unlock(&s->lock);
|
raw_spin_unlock(&s->lock);
|
||||||
|
|
||||||
return intno;
|
return intno;
|
||||||
}
|
}
|
||||||
|
@ -442,7 +443,7 @@ static int picdev_write(struct kvm_io_device *this,
|
||||||
printk(KERN_ERR "PIC: non byte write\n");
|
printk(KERN_ERR "PIC: non byte write\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
spin_lock(&s->lock);
|
raw_spin_lock(&s->lock);
|
||||||
switch (addr) {
|
switch (addr) {
|
||||||
case 0x20:
|
case 0x20:
|
||||||
case 0x21:
|
case 0x21:
|
||||||
|
@ -455,7 +456,7 @@ static int picdev_write(struct kvm_io_device *this,
|
||||||
elcr_ioport_write(&s->pics[addr & 1], addr, data);
|
elcr_ioport_write(&s->pics[addr & 1], addr, data);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
spin_unlock(&s->lock);
|
raw_spin_unlock(&s->lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -472,7 +473,7 @@ static int picdev_read(struct kvm_io_device *this,
|
||||||
printk(KERN_ERR "PIC: non byte read\n");
|
printk(KERN_ERR "PIC: non byte read\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
spin_lock(&s->lock);
|
raw_spin_lock(&s->lock);
|
||||||
switch (addr) {
|
switch (addr) {
|
||||||
case 0x20:
|
case 0x20:
|
||||||
case 0x21:
|
case 0x21:
|
||||||
|
@ -486,7 +487,7 @@ static int picdev_read(struct kvm_io_device *this,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
*(unsigned char *)val = data;
|
*(unsigned char *)val = data;
|
||||||
spin_unlock(&s->lock);
|
raw_spin_unlock(&s->lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -520,7 +521,7 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm)
|
||||||
s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL);
|
s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL);
|
||||||
if (!s)
|
if (!s)
|
||||||
return NULL;
|
return NULL;
|
||||||
spin_lock_init(&s->lock);
|
raw_spin_lock_init(&s->lock);
|
||||||
s->kvm = kvm;
|
s->kvm = kvm;
|
||||||
s->pics[0].elcr_mask = 0xf8;
|
s->pics[0].elcr_mask = 0xf8;
|
||||||
s->pics[1].elcr_mask = 0xde;
|
s->pics[1].elcr_mask = 0xde;
|
||||||
|
|
|
@ -62,7 +62,7 @@ struct kvm_kpic_state {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct kvm_pic {
|
struct kvm_pic {
|
||||||
spinlock_t lock;
|
raw_spinlock_t lock;
|
||||||
unsigned pending_acks;
|
unsigned pending_acks;
|
||||||
struct kvm *kvm;
|
struct kvm *kvm;
|
||||||
struct kvm_kpic_state pics[2]; /* 0 is master pic, 1 is slave pic */
|
struct kvm_kpic_state pics[2]; /* 0 is master pic, 1 is slave pic */
|
||||||
|
|
|
@ -2542,18 +2542,18 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
|
||||||
r = 0;
|
r = 0;
|
||||||
switch (chip->chip_id) {
|
switch (chip->chip_id) {
|
||||||
case KVM_IRQCHIP_PIC_MASTER:
|
case KVM_IRQCHIP_PIC_MASTER:
|
||||||
spin_lock(&pic_irqchip(kvm)->lock);
|
raw_spin_lock(&pic_irqchip(kvm)->lock);
|
||||||
memcpy(&pic_irqchip(kvm)->pics[0],
|
memcpy(&pic_irqchip(kvm)->pics[0],
|
||||||
&chip->chip.pic,
|
&chip->chip.pic,
|
||||||
sizeof(struct kvm_pic_state));
|
sizeof(struct kvm_pic_state));
|
||||||
spin_unlock(&pic_irqchip(kvm)->lock);
|
raw_spin_unlock(&pic_irqchip(kvm)->lock);
|
||||||
break;
|
break;
|
||||||
case KVM_IRQCHIP_PIC_SLAVE:
|
case KVM_IRQCHIP_PIC_SLAVE:
|
||||||
spin_lock(&pic_irqchip(kvm)->lock);
|
raw_spin_lock(&pic_irqchip(kvm)->lock);
|
||||||
memcpy(&pic_irqchip(kvm)->pics[1],
|
memcpy(&pic_irqchip(kvm)->pics[1],
|
||||||
&chip->chip.pic,
|
&chip->chip.pic,
|
||||||
sizeof(struct kvm_pic_state));
|
sizeof(struct kvm_pic_state));
|
||||||
spin_unlock(&pic_irqchip(kvm)->lock);
|
raw_spin_unlock(&pic_irqchip(kvm)->lock);
|
||||||
break;
|
break;
|
||||||
case KVM_IRQCHIP_IOAPIC:
|
case KVM_IRQCHIP_IOAPIC:
|
||||||
r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
|
r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
|
||||||
|
|
Загрузка…
Ссылка в новой задаче