net: core: fix uses-after-free in list processing
In netif_receive_skb_list_internal(), all of skb_defer_rx_timestamp(),
do_xdp_generic() and enqueue_to_backlog() can lead to kfree(skb). Thus,
we cannot wait until after they return to remove the skb from the list;
instead, we remove it first and, in the pass case, add it to a sublist
afterwards.
In the case of enqueue_to_backlog() we have already decided not to pass
when we call the function, so we do not need a sublist.
Fixes: 7da517a3bc
("net: core: Another step of skb receive list processing")
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Edward Cree <ecree@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Родитель
c47078d6a3
Коммит
8c057efaeb
|
@ -4982,25 +4982,30 @@ static void netif_receive_skb_list_internal(struct list_head *head)
|
||||||
{
|
{
|
||||||
struct bpf_prog *xdp_prog = NULL;
|
struct bpf_prog *xdp_prog = NULL;
|
||||||
struct sk_buff *skb, *next;
|
struct sk_buff *skb, *next;
|
||||||
|
struct list_head sublist;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&sublist);
|
||||||
list_for_each_entry_safe(skb, next, head, list) {
|
list_for_each_entry_safe(skb, next, head, list) {
|
||||||
net_timestamp_check(netdev_tstamp_prequeue, skb);
|
net_timestamp_check(netdev_tstamp_prequeue, skb);
|
||||||
if (skb_defer_rx_timestamp(skb))
|
list_del(&skb->list);
|
||||||
/* Handled, remove from list */
|
if (!skb_defer_rx_timestamp(skb))
|
||||||
list_del(&skb->list);
|
list_add_tail(&skb->list, &sublist);
|
||||||
}
|
}
|
||||||
|
list_splice_init(&sublist, head);
|
||||||
|
|
||||||
if (static_branch_unlikely(&generic_xdp_needed_key)) {
|
if (static_branch_unlikely(&generic_xdp_needed_key)) {
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
list_for_each_entry_safe(skb, next, head, list) {
|
list_for_each_entry_safe(skb, next, head, list) {
|
||||||
xdp_prog = rcu_dereference(skb->dev->xdp_prog);
|
xdp_prog = rcu_dereference(skb->dev->xdp_prog);
|
||||||
if (do_xdp_generic(xdp_prog, skb) != XDP_PASS)
|
list_del(&skb->list);
|
||||||
/* Dropped, remove from list */
|
if (do_xdp_generic(xdp_prog, skb) == XDP_PASS)
|
||||||
list_del(&skb->list);
|
list_add_tail(&skb->list, &sublist);
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
|
/* Put passed packets back on main list */
|
||||||
|
list_splice_init(&sublist, head);
|
||||||
}
|
}
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
@ -5011,9 +5016,9 @@ static void netif_receive_skb_list_internal(struct list_head *head)
|
||||||
int cpu = get_rps_cpu(skb->dev, skb, &rflow);
|
int cpu = get_rps_cpu(skb->dev, skb, &rflow);
|
||||||
|
|
||||||
if (cpu >= 0) {
|
if (cpu >= 0) {
|
||||||
enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
|
/* Will be handled, remove from list */
|
||||||
/* Handled, remove from list */
|
|
||||||
list_del(&skb->list);
|
list_del(&skb->list);
|
||||||
|
enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Загрузка…
Ссылка в новой задаче