xen/netback: fix rx queue stall detection

Commit 1d5d485239 ("xen-netback: require fewer guest Rx slots when
not using GSO") introduced a security problem in netback, as an
interface would only be regarded to be stalled if no slot is available
in the rx queue ring page. In case the SKB at the head of the queued
requests will need more than one rx slot and only one slot is free the
stall detection logic will never trigger, as the test for that is only
looking for at least one slot to be free.

Fix that by testing for the needed number of slots instead of only one
slot being available.

In order to not have to take the rx queue lock that often, store the
number of needed slots in the queue data. As all SKB dequeue operations
happen in the rx queue kernel thread this is safe, as long as the
number of needed slots is accessed via READ/WRITE_ONCE() only and
updates are always done with the rx queue lock held.

Add a small helper for obtaining the number of free slots.

This is part of XSA-392

Fixes: 1d5d485239 ("xen-netback: require fewer guest Rx slots when not using GSO")
Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
This commit is contained in:
Juergen Gross 2021-12-16 08:25:12 +01:00
Родитель fe415186b4
Коммит 6032046ec4
2 изменённых файлов: 42 добавлений и 24 удалений

Просмотреть файл

@ -203,6 +203,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
unsigned int rx_queue_max;
unsigned int rx_queue_len;
unsigned long last_rx_time;
unsigned int rx_slots_needed;
bool stalled;
struct xenvif_copy_state rx_copy;

Просмотреть файл

@ -33,28 +33,36 @@
#include <xen/xen.h>
#include <xen/events.h>
/*
* Update the needed ring page slots for the first SKB queued.
* Note that any call sequence outside the RX thread calling this function
* needs to wake up the RX thread via a call of xenvif_kick_thread()
* afterwards in order to avoid a race with putting the thread to sleep.
*/
static void xenvif_update_needed_slots(struct xenvif_queue *queue,
const struct sk_buff *skb)
{
unsigned int needed = 0;
if (skb) {
needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
if (skb_is_gso(skb))
needed++;
if (skb->sw_hash)
needed++;
}
WRITE_ONCE(queue->rx_slots_needed, needed);
}
static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
{
RING_IDX prod, cons;
struct sk_buff *skb;
int needed;
unsigned long flags;
unsigned int needed;
spin_lock_irqsave(&queue->rx_queue.lock, flags);
skb = skb_peek(&queue->rx_queue);
if (!skb) {
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
needed = READ_ONCE(queue->rx_slots_needed);
if (!needed)
return false;
}
needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
if (skb_is_gso(skb))
needed++;
if (skb->sw_hash)
needed++;
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
do {
prod = queue->rx.sring->req_prod;
@ -80,6 +88,9 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
spin_lock_irqsave(&queue->rx_queue.lock, flags);
if (skb_queue_empty(&queue->rx_queue))
xenvif_update_needed_slots(queue, skb);
__skb_queue_tail(&queue->rx_queue, skb);
queue->rx_queue_len += skb->len;
@ -100,6 +111,8 @@ static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
skb = __skb_dequeue(&queue->rx_queue);
if (skb) {
xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue));
queue->rx_queue_len -= skb->len;
if (queue->rx_queue_len < queue->rx_queue_max) {
struct netdev_queue *txq;
@ -487,27 +500,31 @@ void xenvif_rx_action(struct xenvif_queue *queue)
xenvif_rx_copy_flush(queue);
}
static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue)
{
RING_IDX prod, cons;
prod = queue->rx.sring->req_prod;
cons = queue->rx.req_cons;
return prod - cons;
}
static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue)
{
unsigned int needed = READ_ONCE(queue->rx_slots_needed);
return !queue->stalled &&
prod - cons < 1 &&
xenvif_rx_queue_slots(queue) < needed &&
time_after(jiffies,
queue->last_rx_time + queue->vif->stall_timeout);
}
static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
{
RING_IDX prod, cons;
unsigned int needed = READ_ONCE(queue->rx_slots_needed);
prod = queue->rx.sring->req_prod;
cons = queue->rx.req_cons;
return queue->stalled && prod - cons >= 1;
return queue->stalled && xenvif_rx_queue_slots(queue) >= needed;
}
bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)