Merge branch 'bpf_cgroup_classid'

Daniel Borkmann says:

====================
BPF update

This small helper allows for accessing net_cls cgroups classid. Please
see individual patches for more details.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2015-07-20 12:41:30 -07:00
Родитель d9382bda4e 8d20aabe1c
Коммит 03b6dc7d17
4 изменённых файлов: 53 добавлений и 21 удалений

Просмотреть файл

@ -49,9 +49,38 @@ static inline void sock_update_classid(struct sock *sk)
if (classid != sk->sk_classid)
sk->sk_classid = classid;
}
static inline u32 task_get_classid(const struct sk_buff *skb)
{
u32 classid = task_cls_state(current)->classid;
/* Due to the nature of the classifier it is required to ignore all
* packets originating from softirq context as accessing `current'
* would lead to false results.
*
* This test assumes that all callers of dev_queue_xmit() explicitly
* disable bh. Knowing this, it is possible to detect softirq based
* calls by looking at the number of nested bh disable calls because
* softirqs always disables bh.
*/
if (in_serving_softirq()) {
/* If there is an sk_classid we'll use that. */
if (!skb->sk)
return 0;
classid = skb->sk->sk_classid;
}
return classid;
}
#else /* !CONFIG_CGROUP_NET_CLASSID */
static inline void sock_update_classid(struct sock *sk)
{
}
static inline u32 task_get_classid(const struct sk_buff *skb)
{
return 0;
}
#endif /* CONFIG_CGROUP_NET_CLASSID */
#endif /* _NET_CLS_CGROUP_H */

Просмотреть файл

@ -249,6 +249,13 @@ enum bpf_func_id {
* Return: 0 on success
*/
BPF_FUNC_get_current_comm,
/**
* bpf_get_cgroup_classid(skb) - retrieve a proc's classid
* @skb: pointer to skb
* Return: classid if != 0
*/
BPF_FUNC_get_cgroup_classid,
__BPF_FUNC_MAX_ID,
};

Просмотреть файл

@ -47,6 +47,7 @@
#include <linux/if_vlan.h>
#include <linux/bpf.h>
#include <net/sch_generic.h>
#include <net/cls_cgroup.h>
/**
* sk_filter - run a packet through a socket filter
@ -1424,6 +1425,18 @@ const struct bpf_func_proto bpf_clone_redirect_proto = {
.arg3_type = ARG_ANYTHING,
};
static u64 bpf_get_cgroup_classid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{
return task_get_classid((struct sk_buff *) (unsigned long) r1);
}
static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
.func = bpf_get_cgroup_classid,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
};
static const struct bpf_func_proto *
sk_filter_func_proto(enum bpf_func_id func_id)
{
@ -1461,6 +1474,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
return &bpf_l4_csum_replace_proto;
case BPF_FUNC_clone_redirect:
return &bpf_clone_redirect_proto;
case BPF_FUNC_get_cgroup_classid:
return &bpf_get_cgroup_classid_proto;
default:
return sk_filter_func_proto(func_id);
}

Просмотреть файл

@ -30,35 +30,16 @@ static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res)
{
struct cls_cgroup_head *head = rcu_dereference_bh(tp->root);
u32 classid;
classid = task_cls_state(current)->classid;
/*
* Due to the nature of the classifier it is required to ignore all
* packets originating from softirq context as accessing `current'
* would lead to false results.
*
* This test assumes that all callers of dev_queue_xmit() explicitely
* disable bh. Knowing this, it is possible to detect softirq based
* calls by looking at the number of nested bh disable calls because
* softirqs always disables bh.
*/
if (in_serving_softirq()) {
/* If there is an sk_classid we'll use that. */
if (!skb->sk)
return -1;
classid = skb->sk->sk_classid;
}
u32 classid = task_get_classid(skb);
if (!classid)
return -1;
if (!tcf_em_tree_match(skb, &head->ematches, NULL))
return -1;
res->classid = classid;
res->class = 0;
return tcf_exts_exec(skb, &head->exts, res);
}