xen-netback: Fix Rx stall due to race condition
The recent patch to fix receive side flow control (11b57f90257c1d6a91cee720151b69e0c2020cf6: xen-netback: stop vif thread spinning if frontend is unresponsive) solved the spinning thread problem, however caused an another one. The receive side can stall, if: - [THREAD] xenvif_rx_action sets rx_queue_stopped to true - [INTERRUPT] interrupt happens, and sets rx_event to true - [THREAD] then xenvif_kthread sets rx_event to false - [THREAD] rx_work_todo doesn't return true anymore Also, if interrupt sent but there is still no room in the ring, it take quite a long time until xenvif_rx_action realize it. This patch ditch that two variable, and rework rx_work_todo. If the thread finds it can't fit more skb's into the ring, it saves the last slot estimation into rx_last_skb_slots, otherwise it's kept as 0. Then rx_work_todo will check if: - there is something to send to the ring (like before) - there is space for the topmost packet in the queue I think that's more natural and optimal thing to test than two bool which are set somewhere else. Signed-off-by: Zoltan Kiss <zoltan.kiss@citrix.com> Reviewed-by: Paul Durrant <paul.durrant@citrix.com> Acked-by: Wei Liu <wei.liu2@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Родитель
bce3ea81d5
Коммит
9ab9831b4c
|
@ -143,11 +143,7 @@ struct xenvif {
|
|||
char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
|
||||
struct xen_netif_rx_back_ring rx;
|
||||
struct sk_buff_head rx_queue;
|
||||
bool rx_queue_stopped;
|
||||
/* Set when the RX interrupt is triggered by the frontend.
|
||||
* The worker thread may need to wake the queue.
|
||||
*/
|
||||
bool rx_event;
|
||||
RING_IDX rx_last_skb_slots;
|
||||
|
||||
/* This array is allocated seperately as it is large */
|
||||
struct gnttab_copy *grant_copy_op;
|
||||
|
|
|
@ -100,7 +100,6 @@ static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
|
|||
{
|
||||
struct xenvif *vif = dev_id;
|
||||
|
||||
vif->rx_event = true;
|
||||
xenvif_kick_thread(vif);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
|
|
|
@ -476,7 +476,6 @@ static void xenvif_rx_action(struct xenvif *vif)
|
|||
unsigned long offset;
|
||||
struct skb_cb_overlay *sco;
|
||||
bool need_to_notify = false;
|
||||
bool ring_full = false;
|
||||
|
||||
struct netrx_pending_operations npo = {
|
||||
.copy = vif->grant_copy_op,
|
||||
|
@ -486,7 +485,7 @@ static void xenvif_rx_action(struct xenvif *vif)
|
|||
skb_queue_head_init(&rxq);
|
||||
|
||||
while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
|
||||
int max_slots_needed;
|
||||
RING_IDX max_slots_needed;
|
||||
int i;
|
||||
|
||||
/* We need a cheap worse case estimate for the number of
|
||||
|
@ -509,9 +508,10 @@ static void xenvif_rx_action(struct xenvif *vif)
|
|||
if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
|
||||
skb_queue_head(&vif->rx_queue, skb);
|
||||
need_to_notify = true;
|
||||
ring_full = true;
|
||||
vif->rx_last_skb_slots = max_slots_needed;
|
||||
break;
|
||||
}
|
||||
} else
|
||||
vif->rx_last_skb_slots = 0;
|
||||
|
||||
sco = (struct skb_cb_overlay *)skb->cb;
|
||||
sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
|
||||
|
@ -522,8 +522,6 @@ static void xenvif_rx_action(struct xenvif *vif)
|
|||
|
||||
BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
|
||||
|
||||
vif->rx_queue_stopped = !npo.copy_prod && ring_full;
|
||||
|
||||
if (!npo.copy_prod)
|
||||
goto done;
|
||||
|
||||
|
@ -1473,8 +1471,8 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
|
|||
|
||||
static inline int rx_work_todo(struct xenvif *vif)
|
||||
{
|
||||
return (!skb_queue_empty(&vif->rx_queue) && !vif->rx_queue_stopped) ||
|
||||
vif->rx_event;
|
||||
return !skb_queue_empty(&vif->rx_queue) &&
|
||||
xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots);
|
||||
}
|
||||
|
||||
static inline int tx_work_todo(struct xenvif *vif)
|
||||
|
@ -1560,8 +1558,6 @@ int xenvif_kthread(void *data)
|
|||
if (!skb_queue_empty(&vif->rx_queue))
|
||||
xenvif_rx_action(vif);
|
||||
|
||||
vif->rx_event = false;
|
||||
|
||||
if (skb_queue_empty(&vif->rx_queue) &&
|
||||
netif_queue_stopped(vif->dev))
|
||||
xenvif_start_queue(vif);
|
||||
|
|
Загрузка…
Ссылка в новой задаче