Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2018-08-24

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) Fix BPF sockmap and tls where we get a hang in do_tcp_sendpages()
   when sndbuf is full due to missing calls into underlying socket's
   sk_write_space(), from John.

2) Two BPF sockmap fixes to reject invalid parameters on map creation
   and to fix a map element miscount on allocation failure. Another fix
   for BPF hash tables to use per hash table salt for jhash(), from Daniel.

3) Fix for bpftool's command line parsing in order to terminate on bad
   arguments instead of keeping looping in some border cases, from Quentin.

4) Fix error value of xdp_umem_assign_dev() in order to comply with
   expected bind ops error codes, from Prashant.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-08-23 22:41:55 -07:00
Родитель c08eebad4a 785e76d7a2
Коммит ff0fadfffe
5 изменённых файлов: 35 добавлений и 17 удалений

Просмотреть файл

@ -15,6 +15,7 @@
#include <linux/jhash.h> #include <linux/jhash.h>
#include <linux/filter.h> #include <linux/filter.h>
#include <linux/rculist_nulls.h> #include <linux/rculist_nulls.h>
#include <linux/random.h>
#include <uapi/linux/btf.h> #include <uapi/linux/btf.h>
#include "percpu_freelist.h" #include "percpu_freelist.h"
#include "bpf_lru_list.h" #include "bpf_lru_list.h"
@ -41,6 +42,7 @@ struct bpf_htab {
atomic_t count; /* number of elements in this hashtable */ atomic_t count; /* number of elements in this hashtable */
u32 n_buckets; /* number of hash buckets */ u32 n_buckets; /* number of hash buckets */
u32 elem_size; /* size of each element in bytes */ u32 elem_size; /* size of each element in bytes */
u32 hashrnd;
}; };
/* each htab element is struct htab_elem + key + value */ /* each htab element is struct htab_elem + key + value */
@ -371,6 +373,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
if (!htab->buckets) if (!htab->buckets)
goto free_htab; goto free_htab;
htab->hashrnd = get_random_int();
for (i = 0; i < htab->n_buckets; i++) { for (i = 0; i < htab->n_buckets; i++) {
INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
raw_spin_lock_init(&htab->buckets[i].lock); raw_spin_lock_init(&htab->buckets[i].lock);
@ -402,9 +405,9 @@ free_htab:
return ERR_PTR(err); return ERR_PTR(err);
} }
static inline u32 htab_map_hash(const void *key, u32 key_len) static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd)
{ {
return jhash(key, key_len, 0); return jhash(key, key_len, hashrnd);
} }
static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
@ -470,7 +473,7 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
key_size = map->key_size; key_size = map->key_size;
hash = htab_map_hash(key, key_size); hash = htab_map_hash(key, key_size, htab->hashrnd);
head = select_bucket(htab, hash); head = select_bucket(htab, hash);
@ -597,7 +600,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
if (!key) if (!key)
goto find_first_elem; goto find_first_elem;
hash = htab_map_hash(key, key_size); hash = htab_map_hash(key, key_size, htab->hashrnd);
head = select_bucket(htab, hash); head = select_bucket(htab, hash);
@ -824,7 +827,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
key_size = map->key_size; key_size = map->key_size;
hash = htab_map_hash(key, key_size); hash = htab_map_hash(key, key_size, htab->hashrnd);
b = __select_bucket(htab, hash); b = __select_bucket(htab, hash);
head = &b->head; head = &b->head;
@ -880,7 +883,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
key_size = map->key_size; key_size = map->key_size;
hash = htab_map_hash(key, key_size); hash = htab_map_hash(key, key_size, htab->hashrnd);
b = __select_bucket(htab, hash); b = __select_bucket(htab, hash);
head = &b->head; head = &b->head;
@ -945,7 +948,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
key_size = map->key_size; key_size = map->key_size;
hash = htab_map_hash(key, key_size); hash = htab_map_hash(key, key_size, htab->hashrnd);
b = __select_bucket(htab, hash); b = __select_bucket(htab, hash);
head = &b->head; head = &b->head;
@ -998,7 +1001,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
key_size = map->key_size; key_size = map->key_size;
hash = htab_map_hash(key, key_size); hash = htab_map_hash(key, key_size, htab->hashrnd);
b = __select_bucket(htab, hash); b = __select_bucket(htab, hash);
head = &b->head; head = &b->head;
@ -1071,7 +1074,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
key_size = map->key_size; key_size = map->key_size;
hash = htab_map_hash(key, key_size); hash = htab_map_hash(key, key_size, htab->hashrnd);
b = __select_bucket(htab, hash); b = __select_bucket(htab, hash);
head = &b->head; head = &b->head;
@ -1103,7 +1106,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
key_size = map->key_size; key_size = map->key_size;
hash = htab_map_hash(key, key_size); hash = htab_map_hash(key, key_size, htab->hashrnd);
b = __select_bucket(htab, hash); b = __select_bucket(htab, hash);
head = &b->head; head = &b->head;

Просмотреть файл

@ -1427,12 +1427,15 @@ out:
static void smap_write_space(struct sock *sk) static void smap_write_space(struct sock *sk)
{ {
struct smap_psock *psock; struct smap_psock *psock;
void (*write_space)(struct sock *sk);
rcu_read_lock(); rcu_read_lock();
psock = smap_psock_sk(sk); psock = smap_psock_sk(sk);
if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state))) if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
schedule_work(&psock->tx_work); schedule_work(&psock->tx_work);
write_space = psock->save_write_space;
rcu_read_unlock(); rcu_read_unlock();
write_space(sk);
} }
static void smap_stop_sock(struct smap_psock *psock, struct sock *sk) static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
@ -2140,7 +2143,9 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
return ERR_PTR(-EPERM); return ERR_PTR(-EPERM);
/* check sanity of attributes */ /* check sanity of attributes */
if (attr->max_entries == 0 || attr->value_size != 4 || if (attr->max_entries == 0 ||
attr->key_size == 0 ||
attr->value_size != 4 ||
attr->map_flags & ~SOCK_CREATE_FLAG_MASK) attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
@ -2267,8 +2272,10 @@ static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab,
} }
l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN, l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
htab->map.numa_node); htab->map.numa_node);
if (!l_new) if (!l_new) {
atomic_dec(&htab->count);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
}
memcpy(l_new->key, key, key_size); memcpy(l_new->key, key, key_size);
l_new->sk = sk; l_new->sk = sk;

Просмотреть файл

@ -213,9 +213,14 @@ static void tls_write_space(struct sock *sk)
{ {
struct tls_context *ctx = tls_get_ctx(sk); struct tls_context *ctx = tls_get_ctx(sk);
/* We are already sending pages, ignore notification */ /* If in_tcp_sendpages call lower protocol write space handler
if (ctx->in_tcp_sendpages) * to ensure we wake up any waiting operations there. For example
* if do_tcp_sendpages where to call sk_wait_event.
*/
if (ctx->in_tcp_sendpages) {
ctx->sk_write_space(sk);
return; return;
}
if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) { if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) {
gfp_t sk_allocation = sk->sk_allocation; gfp_t sk_allocation = sk->sk_allocation;

Просмотреть файл

@ -74,14 +74,14 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
return 0; return 0;
if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit) if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit)
return force_zc ? -ENOTSUPP : 0; /* fail or fallback */ return force_zc ? -EOPNOTSUPP : 0; /* fail or fallback */
bpf.command = XDP_QUERY_XSK_UMEM; bpf.command = XDP_QUERY_XSK_UMEM;
rtnl_lock(); rtnl_lock();
err = xdp_umem_query(dev, queue_id); err = xdp_umem_query(dev, queue_id);
if (err) { if (err) {
err = err < 0 ? -ENOTSUPP : -EBUSY; err = err < 0 ? -EOPNOTSUPP : -EBUSY;
goto err_rtnl_unlock; goto err_rtnl_unlock;
} }

Просмотреть файл

@ -194,8 +194,10 @@ int do_event_pipe(int argc, char **argv)
} }
while (argc) { while (argc) {
if (argc < 2) if (argc < 2) {
BAD_ARG(); BAD_ARG();
goto err_close_map;
}
if (is_prefix(*argv, "cpu")) { if (is_prefix(*argv, "cpu")) {
char *endptr; char *endptr;
@ -221,6 +223,7 @@ int do_event_pipe(int argc, char **argv)
NEXT_ARG(); NEXT_ARG();
} else { } else {
BAD_ARG(); BAD_ARG();
goto err_close_map;
} }
do_all = false; do_all = false;