Drivers: hv: vmbus: Eliminate the spin lock on the read path
The function hv_ringbuffer_read() is called always on a pre-assigned CPU. Each chnnel is bound to a specific CPU and this function is always called on the CPU the channel is bound. There is no need to acquire the spin lock; get rid of this overhead. Signed-off-by: K. Y. Srinivasan <kys@microsoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Родитель
85d9aa7051
Коммит
3eba9a77d5
|
@ -388,7 +388,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
|
||||||
u32 bytes_avail_toread;
|
u32 bytes_avail_toread;
|
||||||
u32 next_read_location = 0;
|
u32 next_read_location = 0;
|
||||||
u64 prev_indices = 0;
|
u64 prev_indices = 0;
|
||||||
unsigned long flags;
|
|
||||||
struct vmpacket_descriptor desc;
|
struct vmpacket_descriptor desc;
|
||||||
u32 offset;
|
u32 offset;
|
||||||
u32 packetlen;
|
u32 packetlen;
|
||||||
|
@ -397,7 +396,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
|
||||||
if (buflen <= 0)
|
if (buflen <= 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
spin_lock_irqsave(&inring_info->ring_lock, flags);
|
|
||||||
|
|
||||||
*buffer_actual_len = 0;
|
*buffer_actual_len = 0;
|
||||||
*requestid = 0;
|
*requestid = 0;
|
||||||
|
@ -412,7 +410,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
|
||||||
* No error is set when there is even no header, drivers are
|
* No error is set when there is even no header, drivers are
|
||||||
* supposed to analyze buffer_actual_len.
|
* supposed to analyze buffer_actual_len.
|
||||||
*/
|
*/
|
||||||
goto out_unlock;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
next_read_location = hv_get_next_read_location(inring_info);
|
next_read_location = hv_get_next_read_location(inring_info);
|
||||||
|
@ -425,15 +423,11 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
|
||||||
*buffer_actual_len = packetlen;
|
*buffer_actual_len = packetlen;
|
||||||
*requestid = desc.trans_id;
|
*requestid = desc.trans_id;
|
||||||
|
|
||||||
if (bytes_avail_toread < packetlen + offset) {
|
if (bytes_avail_toread < packetlen + offset)
|
||||||
ret = -EAGAIN;
|
return -EAGAIN;
|
||||||
goto out_unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (packetlen > buflen) {
|
if (packetlen > buflen)
|
||||||
ret = -ENOBUFS;
|
return -ENOBUFS;
|
||||||
goto out_unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
next_read_location =
|
next_read_location =
|
||||||
hv_get_next_readlocation_withoffset(inring_info, offset);
|
hv_get_next_readlocation_withoffset(inring_info, offset);
|
||||||
|
@ -460,7 +454,5 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
|
||||||
|
|
||||||
*signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info);
|
*signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info);
|
||||||
|
|
||||||
out_unlock:
|
|
||||||
spin_unlock_irqrestore(&inring_info->ring_lock, flags);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
Загрузка…
Ссылка в новой задаче