Xen-netback: Fix issue caused by using gso_type wrongly

Current netback uses gso_type to check whether the skb contains
gso offload, and this is wrong. Gso_size is the right one to
check gso existence, and gso_type is only used to check gso type.

Some skbs contains nonzero gso_type and zero gso_size, current
netback would treat these skbs as gso and create wrong response
for this. This also causes ssh failure to domu from other server.

V2: use skb_is_gso function as Paul Durrant suggested

Signed-off-by: Annie Li <annie.li@oracle.com>
Acked-by: Wei Liu <wei.liu2@citrix.com>
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Annie Li 2014-03-10 22:58:34 +08:00 коммит произвёл David S. Miller
Родитель 2818fa0fa0
Коммит 5bd0767086
1 изменённых файлов: 18 добавлений и 21 удалений

Просмотреть файл

@ -240,7 +240,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
struct gnttab_copy *copy_gop; struct gnttab_copy *copy_gop;
struct xenvif_rx_meta *meta; struct xenvif_rx_meta *meta;
unsigned long bytes; unsigned long bytes;
int gso_type; int gso_type = XEN_NETIF_GSO_TYPE_NONE;
/* Data must not cross a page boundary. */ /* Data must not cross a page boundary. */
BUG_ON(size + offset > PAGE_SIZE<<compound_order(page)); BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
@ -299,12 +299,12 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
} }
/* Leave a gap for the GSO descriptor. */ /* Leave a gap for the GSO descriptor. */
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) if (skb_is_gso(skb)) {
gso_type = XEN_NETIF_GSO_TYPE_TCPV4; if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
gso_type = XEN_NETIF_GSO_TYPE_TCPV6; else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
else gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
gso_type = XEN_NETIF_GSO_TYPE_NONE; }
if (*head && ((1 << gso_type) & vif->gso_mask)) if (*head && ((1 << gso_type) & vif->gso_mask))
vif->rx.req_cons++; vif->rx.req_cons++;
@ -338,19 +338,15 @@ static int xenvif_gop_skb(struct sk_buff *skb,
int head = 1; int head = 1;
int old_meta_prod; int old_meta_prod;
int gso_type; int gso_type;
int gso_size;
old_meta_prod = npo->meta_prod; old_meta_prod = npo->meta_prod;
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { gso_type = XEN_NETIF_GSO_TYPE_NONE;
gso_type = XEN_NETIF_GSO_TYPE_TCPV4; if (skb_is_gso(skb)) {
gso_size = skb_shinfo(skb)->gso_size; if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
} else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
gso_type = XEN_NETIF_GSO_TYPE_TCPV6; else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
gso_size = skb_shinfo(skb)->gso_size; gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
} else {
gso_type = XEN_NETIF_GSO_TYPE_NONE;
gso_size = 0;
} }
/* Set up a GSO prefix descriptor, if necessary */ /* Set up a GSO prefix descriptor, if necessary */
@ -358,7 +354,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++; meta = npo->meta + npo->meta_prod++;
meta->gso_type = gso_type; meta->gso_type = gso_type;
meta->gso_size = gso_size; meta->gso_size = skb_shinfo(skb)->gso_size;
meta->size = 0; meta->size = 0;
meta->id = req->id; meta->id = req->id;
} }
@ -368,7 +364,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
if ((1 << gso_type) & vif->gso_mask) { if ((1 << gso_type) & vif->gso_mask) {
meta->gso_type = gso_type; meta->gso_type = gso_type;
meta->gso_size = gso_size; meta->gso_size = skb_shinfo(skb)->gso_size;
} else { } else {
meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
meta->gso_size = 0; meta->gso_size = 0;
@ -500,8 +496,9 @@ static void xenvif_rx_action(struct xenvif *vif)
size = skb_frag_size(&skb_shinfo(skb)->frags[i]); size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE); max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE);
} }
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || if (skb_is_gso(skb) &&
skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
max_slots_needed++; max_slots_needed++;
/* If the skb may not fit then bail out now */ /* If the skb may not fit then bail out now */