2019-05-27 09:55:01 +03:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* net/sched/cls_rsvp.h Template file for RSVPv[46] classifiers.
|
|
|
|
*
|
|
|
|
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
Comparing to general packet classification problem,
|
|
|
|
RSVP needs only sevaral relatively simple rules:
|
|
|
|
|
|
|
|
* (dst, protocol) are always specified,
|
|
|
|
so that we are able to hash them.
|
|
|
|
* src may be exact, or may be wildcard, so that
|
|
|
|
we can keep a hash table plus one wildcard entry.
|
|
|
|
* source port (or flow label) is important only if src is given.
|
|
|
|
|
|
|
|
IMPLEMENTATION.
|
|
|
|
|
|
|
|
We use a two level hash table: The top level is keyed by
|
|
|
|
destination address and protocol ID, every bucket contains a list
|
|
|
|
of "rsvp sessions", identified by destination address, protocol and
|
|
|
|
DPI(="Destination Port ID"): triple (key, mask, offset).
|
|
|
|
|
|
|
|
Every bucket has a smaller hash table keyed by source address
|
|
|
|
(cf. RSVP flowspec) and one wildcard entry for wildcard reservations.
|
|
|
|
Every bucket is again a list of "RSVP flows", selected by
|
|
|
|
source address and SPI(="Source Port ID" here rather than
|
|
|
|
"security parameter index"): triple (key, mask, offset).
|
|
|
|
|
|
|
|
|
|
|
|
NOTE 1. All the packets with IPv6 extension headers (but AH and ESP)
|
|
|
|
and all fragmented packets go to the best-effort traffic class.
|
|
|
|
|
|
|
|
|
|
|
|
NOTE 2. Two "port id"'s seems to be redundant, rfc2207 requires
|
|
|
|
only one "Generalized Port Identifier". So that for classic
|
|
|
|
ah, esp (and udp,tcp) both *pi should coincide or one of them
|
|
|
|
should be wildcard.
|
|
|
|
|
|
|
|
At first sight, this redundancy is just a waste of CPU
|
|
|
|
resources. But DPI and SPI add the possibility to assign different
|
|
|
|
priorities to GPIs. Look also at note 4 about tunnels below.
|
|
|
|
|
|
|
|
|
|
|
|
NOTE 3. One complication is the case of tunneled packets.
|
|
|
|
We implement it as following: if the first lookup
|
|
|
|
matches a special session with "tunnelhdr" value not zero,
|
|
|
|
flowid doesn't contain the true flow ID, but the tunnel ID (1...255).
|
|
|
|
In this case, we pull tunnelhdr bytes and restart lookup
|
|
|
|
with tunnel ID added to the list of keys. Simple and stupid 8)8)
|
|
|
|
It's enough for PIMREG and IPIP.
|
|
|
|
|
|
|
|
|
|
|
|
NOTE 4. Two GPIs make it possible to parse even GRE packets.
|
|
|
|
F.e. DPI can select ETH_P_IP (and necessary flags to make
|
|
|
|
tunnelhdr correct) in GRE protocol field and SPI matches
|
|
|
|
GRE key. Is it not nice? 8)8)
|
|
|
|
|
|
|
|
|
|
|
|
Well, as result, despite its simplicity, we get a pretty
|
|
|
|
powerful classification engine. */
|
|
|
|
|
|
|
|
|
2011-01-19 22:26:56 +03:00
|
|
|
struct rsvp_head {
|
2005-04-17 02:20:36 +04:00
|
|
|
u32 tmap[256/32];
|
|
|
|
u32 hgenerator;
|
|
|
|
u8 tgenerator;
|
2014-09-13 07:09:49 +04:00
|
|
|
struct rsvp_session __rcu *ht[256];
|
|
|
|
struct rcu_head rcu;
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
|
|
|
|
2011-01-19 22:26:56 +03:00
|
|
|
struct rsvp_session {
|
2014-09-13 07:09:49 +04:00
|
|
|
struct rsvp_session __rcu *next;
|
|
|
|
__be32 dst[RSVP_DST_LEN];
|
|
|
|
struct tc_rsvp_gpi dpi;
|
|
|
|
u8 protocol;
|
|
|
|
u8 tunnelid;
|
2005-04-17 02:20:36 +04:00
|
|
|
/* 16 (src,sport) hash slots, and one wildcard source slot */
|
2014-09-13 07:09:49 +04:00
|
|
|
struct rsvp_filter __rcu *ht[16 + 1];
|
|
|
|
struct rcu_head rcu;
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2011-01-19 22:26:56 +03:00
|
|
|
struct rsvp_filter {
|
2014-09-13 07:09:49 +04:00
|
|
|
struct rsvp_filter __rcu *next;
|
|
|
|
__be32 src[RSVP_DST_LEN];
|
|
|
|
struct tc_rsvp_gpi spi;
|
|
|
|
u8 tunnelhdr;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2014-09-13 07:09:49 +04:00
|
|
|
struct tcf_result res;
|
|
|
|
struct tcf_exts exts;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2014-09-13 07:09:49 +04:00
|
|
|
u32 handle;
|
|
|
|
struct rsvp_session *sess;
|
2018-05-24 01:26:53 +03:00
|
|
|
struct rcu_work rwork;
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
|
|
|
|
2011-01-19 22:26:56 +03:00
|
|
|
static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2011-01-19 22:26:56 +03:00
|
|
|
unsigned int h = (__force __u32)dst[RSVP_DST_LEN - 1];
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
h ^= h>>16;
|
|
|
|
h ^= h>>8;
|
|
|
|
return (h ^ protocol ^ tunnelid) & 0xFF;
|
|
|
|
}
|
|
|
|
|
2011-01-19 22:26:56 +03:00
|
|
|
static inline unsigned int hash_src(__be32 *src)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2011-01-19 22:26:56 +03:00
|
|
|
unsigned int h = (__force __u32)src[RSVP_DST_LEN-1];
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
h ^= h>>16;
|
|
|
|
h ^= h>>8;
|
|
|
|
h ^= h>>4;
|
|
|
|
return h & 0xF;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define RSVP_APPLY_RESULT() \
|
|
|
|
{ \
|
|
|
|
int r = tcf_exts_exec(skb, &f->exts, res); \
|
|
|
|
if (r < 0) \
|
|
|
|
continue; \
|
|
|
|
else if (r > 0) \
|
|
|
|
return r; \
|
|
|
|
}
|
2007-02-09 17:25:16 +03:00
|
|
|
|
2011-07-06 03:25:42 +04:00
|
|
|
static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
2005-04-17 02:20:36 +04:00
|
|
|
struct tcf_result *res)
|
|
|
|
{
|
2014-09-13 07:09:49 +04:00
|
|
|
struct rsvp_head *head = rcu_dereference_bh(tp->root);
|
2005-04-17 02:20:36 +04:00
|
|
|
struct rsvp_session *s;
|
|
|
|
struct rsvp_filter *f;
|
2011-01-19 22:26:56 +03:00
|
|
|
unsigned int h1, h2;
|
2006-11-21 05:07:51 +03:00
|
|
|
__be32 *dst, *src;
|
2005-04-17 02:20:36 +04:00
|
|
|
u8 protocol;
|
|
|
|
u8 tunnelid = 0;
|
|
|
|
u8 *xprt;
|
|
|
|
#if RSVP_DST_LEN == 4
|
2010-08-04 08:55:40 +04:00
|
|
|
struct ipv6hdr *nhptr;
|
|
|
|
|
|
|
|
if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
|
|
|
|
return -1;
|
|
|
|
nhptr = ipv6_hdr(skb);
|
2005-04-17 02:20:36 +04:00
|
|
|
#else
|
2010-08-04 08:55:40 +04:00
|
|
|
struct iphdr *nhptr;
|
|
|
|
|
|
|
|
if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
|
|
|
|
return -1;
|
|
|
|
nhptr = ip_hdr(skb);
|
2005-04-17 02:20:36 +04:00
|
|
|
#endif
|
|
|
|
restart:
|
|
|
|
|
|
|
|
#if RSVP_DST_LEN == 4
|
|
|
|
src = &nhptr->saddr.s6_addr32[0];
|
|
|
|
dst = &nhptr->daddr.s6_addr32[0];
|
|
|
|
protocol = nhptr->nexthdr;
|
2011-01-19 22:26:56 +03:00
|
|
|
xprt = ((u8 *)nhptr) + sizeof(struct ipv6hdr);
|
2005-04-17 02:20:36 +04:00
|
|
|
#else
|
|
|
|
src = &nhptr->saddr;
|
|
|
|
dst = &nhptr->daddr;
|
|
|
|
protocol = nhptr->protocol;
|
2011-01-19 22:26:56 +03:00
|
|
|
xprt = ((u8 *)nhptr) + (nhptr->ihl<<2);
|
2011-06-22 07:33:34 +04:00
|
|
|
if (ip_is_fragment(nhptr))
|
2005-04-17 02:20:36 +04:00
|
|
|
return -1;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
h1 = hash_dst(dst, protocol, tunnelid);
|
|
|
|
h2 = hash_src(src);
|
|
|
|
|
2014-09-13 07:09:49 +04:00
|
|
|
for (s = rcu_dereference_bh(head->ht[h1]); s;
|
|
|
|
s = rcu_dereference_bh(s->next)) {
|
2011-01-19 22:26:56 +03:00
|
|
|
if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] &&
|
2005-04-17 02:20:36 +04:00
|
|
|
protocol == s->protocol &&
|
2009-11-30 03:55:45 +03:00
|
|
|
!(s->dpi.mask &
|
2011-01-19 22:26:56 +03:00
|
|
|
(*(u32 *)(xprt + s->dpi.offset) ^ s->dpi.key)) &&
|
2005-04-17 02:20:36 +04:00
|
|
|
#if RSVP_DST_LEN == 4
|
2009-11-30 03:55:45 +03:00
|
|
|
dst[0] == s->dst[0] &&
|
|
|
|
dst[1] == s->dst[1] &&
|
|
|
|
dst[2] == s->dst[2] &&
|
2005-04-17 02:20:36 +04:00
|
|
|
#endif
|
2009-11-30 03:55:45 +03:00
|
|
|
tunnelid == s->tunnelid) {
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2014-09-13 07:09:49 +04:00
|
|
|
for (f = rcu_dereference_bh(s->ht[h2]); f;
|
|
|
|
f = rcu_dereference_bh(f->next)) {
|
2011-01-19 22:26:56 +03:00
|
|
|
if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] &&
|
|
|
|
!(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key))
|
2005-04-17 02:20:36 +04:00
|
|
|
#if RSVP_DST_LEN == 4
|
2009-11-30 03:55:45 +03:00
|
|
|
&&
|
|
|
|
src[0] == f->src[0] &&
|
|
|
|
src[1] == f->src[1] &&
|
|
|
|
src[2] == f->src[2]
|
2005-04-17 02:20:36 +04:00
|
|
|
#endif
|
|
|
|
) {
|
|
|
|
*res = f->res;
|
|
|
|
RSVP_APPLY_RESULT();
|
|
|
|
|
|
|
|
matched:
|
|
|
|
if (f->tunnelhdr == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
tunnelid = f->res.classid;
|
2011-01-19 22:26:56 +03:00
|
|
|
nhptr = (void *)(xprt + f->tunnelhdr - sizeof(*nhptr));
|
2005-04-17 02:20:36 +04:00
|
|
|
goto restart;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* And wildcard bucket... */
|
2014-09-13 07:09:49 +04:00
|
|
|
for (f = rcu_dereference_bh(s->ht[16]); f;
|
|
|
|
f = rcu_dereference_bh(f->next)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
*res = f->res;
|
|
|
|
RSVP_APPLY_RESULT();
|
|
|
|
goto matched;
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2014-09-26 21:02:50 +04:00
|
|
|
static void rsvp_replace(struct tcf_proto *tp, struct rsvp_filter *n, u32 h)
|
|
|
|
{
|
|
|
|
struct rsvp_head *head = rtnl_dereference(tp->root);
|
|
|
|
struct rsvp_session *s;
|
|
|
|
struct rsvp_filter __rcu **ins;
|
|
|
|
struct rsvp_filter *pins;
|
|
|
|
unsigned int h1 = h & 0xFF;
|
|
|
|
unsigned int h2 = (h >> 8) & 0xFF;
|
|
|
|
|
|
|
|
for (s = rtnl_dereference(head->ht[h1]); s;
|
|
|
|
s = rtnl_dereference(s->next)) {
|
|
|
|
for (ins = &s->ht[h2], pins = rtnl_dereference(*ins); ;
|
|
|
|
ins = &pins->next, pins = rtnl_dereference(*ins)) {
|
|
|
|
if (pins->handle == h) {
|
|
|
|
RCU_INIT_POINTER(n->next, pins->next);
|
|
|
|
rcu_assign_pointer(*ins, n);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-09 10:02:17 +03:00
|
|
|
/* Something went wrong if we are trying to replace a non-existent
|
2014-09-26 21:02:50 +04:00
|
|
|
* node. Mind as well halt instead of silently failing.
|
|
|
|
*/
|
|
|
|
BUG_ON(1);
|
|
|
|
}
|
|
|
|
|
2017-08-05 07:31:43 +03:00
|
|
|
static void *rsvp_get(struct tcf_proto *tp, u32 handle)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2014-09-13 07:09:49 +04:00
|
|
|
struct rsvp_head *head = rtnl_dereference(tp->root);
|
2005-04-17 02:20:36 +04:00
|
|
|
struct rsvp_session *s;
|
|
|
|
struct rsvp_filter *f;
|
2011-01-19 22:26:56 +03:00
|
|
|
unsigned int h1 = handle & 0xFF;
|
|
|
|
unsigned int h2 = (handle >> 8) & 0xFF;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (h2 > 16)
|
2017-08-05 07:31:43 +03:00
|
|
|
return NULL;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2014-09-13 07:09:49 +04:00
|
|
|
for (s = rtnl_dereference(head->ht[h1]); s;
|
|
|
|
s = rtnl_dereference(s->next)) {
|
|
|
|
for (f = rtnl_dereference(s->ht[h2]); f;
|
|
|
|
f = rtnl_dereference(f->next)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
if (f->handle == handle)
|
2017-08-05 07:31:43 +03:00
|
|
|
return f;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
}
|
2017-08-05 07:31:43 +03:00
|
|
|
return NULL;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int rsvp_init(struct tcf_proto *tp)
|
|
|
|
{
|
|
|
|
struct rsvp_head *data;
|
|
|
|
|
2006-07-22 01:51:30 +04:00
|
|
|
data = kzalloc(sizeof(struct rsvp_head), GFP_KERNEL);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (data) {
|
2014-09-13 07:09:49 +04:00
|
|
|
rcu_assign_pointer(tp->root, data);
|
2005-04-17 02:20:36 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return -ENOBUFS;
|
|
|
|
}
|
|
|
|
|
2017-11-07 00:47:28 +03:00
|
|
|
static void __rsvp_delete_filter(struct rsvp_filter *f)
|
|
|
|
{
|
|
|
|
tcf_exts_destroy(&f->exts);
|
|
|
|
tcf_exts_put_net(&f->exts);
|
|
|
|
kfree(f);
|
|
|
|
}
|
|
|
|
|
2017-10-27 04:24:38 +03:00
|
|
|
static void rsvp_delete_filter_work(struct work_struct *work)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2018-05-24 01:26:53 +03:00
|
|
|
struct rsvp_filter *f = container_of(to_rcu_work(work),
|
|
|
|
struct rsvp_filter,
|
|
|
|
rwork);
|
2017-10-27 04:24:38 +03:00
|
|
|
rtnl_lock();
|
2017-11-07 00:47:28 +03:00
|
|
|
__rsvp_delete_filter(f);
|
2017-10-27 04:24:38 +03:00
|
|
|
rtnl_unlock();
|
|
|
|
}
|
|
|
|
|
2015-08-26 06:06:34 +03:00
|
|
|
static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
|
|
|
|
{
|
|
|
|
tcf_unbind_filter(tp, &f->res);
|
|
|
|
/* all classifiers are required to call tcf_exts_destroy() after rcu
|
|
|
|
* grace period, since converted-to-rcu actions are relying on that
|
|
|
|
* in cleanup() callback
|
|
|
|
*/
|
2017-11-07 00:47:28 +03:00
|
|
|
if (tcf_exts_get_net(&f->exts))
|
2018-05-24 01:26:53 +03:00
|
|
|
tcf_queue_work(&f->rwork, rsvp_delete_filter_work);
|
2017-11-07 00:47:28 +03:00
|
|
|
else
|
|
|
|
__rsvp_delete_filter(f);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2019-02-11 11:55:45 +03:00
|
|
|
static void rsvp_destroy(struct tcf_proto *tp, bool rtnl_held,
|
|
|
|
struct netlink_ext_ack *extack)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2014-09-13 07:09:49 +04:00
|
|
|
struct rsvp_head *data = rtnl_dereference(tp->root);
|
2005-04-17 02:20:36 +04:00
|
|
|
int h1, h2;
|
|
|
|
|
|
|
|
if (data == NULL)
|
2017-04-20 00:21:21 +03:00
|
|
|
return;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2011-01-19 22:26:56 +03:00
|
|
|
for (h1 = 0; h1 < 256; h1++) {
|
2005-04-17 02:20:36 +04:00
|
|
|
struct rsvp_session *s;
|
|
|
|
|
2014-09-13 07:09:49 +04:00
|
|
|
while ((s = rtnl_dereference(data->ht[h1])) != NULL) {
|
|
|
|
RCU_INIT_POINTER(data->ht[h1], s->next);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2011-01-19 22:26:56 +03:00
|
|
|
for (h2 = 0; h2 <= 16; h2++) {
|
2005-04-17 02:20:36 +04:00
|
|
|
struct rsvp_filter *f;
|
|
|
|
|
2014-09-13 07:09:49 +04:00
|
|
|
while ((f = rtnl_dereference(s->ht[h2])) != NULL) {
|
|
|
|
rcu_assign_pointer(s->ht[h2], f->next);
|
2005-04-17 02:20:36 +04:00
|
|
|
rsvp_delete_filter(tp, f);
|
|
|
|
}
|
|
|
|
}
|
2014-09-13 07:09:49 +04:00
|
|
|
kfree_rcu(s, rcu);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
}
|
2014-09-13 07:09:49 +04:00
|
|
|
kfree_rcu(data, rcu);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2018-01-18 19:20:53 +03:00
|
|
|
static int rsvp_delete(struct tcf_proto *tp, void *arg, bool *last,
|
2019-02-11 11:55:45 +03:00
|
|
|
bool rtnl_held, struct netlink_ext_ack *extack)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2014-09-13 07:09:49 +04:00
|
|
|
struct rsvp_head *head = rtnl_dereference(tp->root);
|
2017-08-05 07:31:43 +03:00
|
|
|
struct rsvp_filter *nfp, *f = arg;
|
2014-09-13 07:09:49 +04:00
|
|
|
struct rsvp_filter __rcu **fp;
|
2011-01-19 22:26:56 +03:00
|
|
|
unsigned int h = f->handle;
|
2014-09-13 07:09:49 +04:00
|
|
|
struct rsvp_session __rcu **sp;
|
|
|
|
struct rsvp_session *nsp, *s = f->sess;
|
2017-04-20 00:21:21 +03:00
|
|
|
int i, h1;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2014-09-13 07:09:49 +04:00
|
|
|
fp = &s->ht[(h >> 8) & 0xFF];
|
|
|
|
for (nfp = rtnl_dereference(*fp); nfp;
|
|
|
|
fp = &nfp->next, nfp = rtnl_dereference(*fp)) {
|
|
|
|
if (nfp == f) {
|
|
|
|
RCU_INIT_POINTER(*fp, f->next);
|
2005-04-17 02:20:36 +04:00
|
|
|
rsvp_delete_filter(tp, f);
|
|
|
|
|
|
|
|
/* Strip tree */
|
|
|
|
|
2011-01-19 22:26:56 +03:00
|
|
|
for (i = 0; i <= 16; i++)
|
2005-04-17 02:20:36 +04:00
|
|
|
if (s->ht[i])
|
2017-04-20 00:21:21 +03:00
|
|
|
goto out;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* OK, session has no flows */
|
2014-09-13 07:09:49 +04:00
|
|
|
sp = &head->ht[h & 0xFF];
|
|
|
|
for (nsp = rtnl_dereference(*sp); nsp;
|
|
|
|
sp = &nsp->next, nsp = rtnl_dereference(*sp)) {
|
|
|
|
if (nsp == s) {
|
|
|
|
RCU_INIT_POINTER(*sp, s->next);
|
|
|
|
kfree_rcu(s, rcu);
|
2017-04-20 00:21:21 +03:00
|
|
|
goto out;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-20 00:21:21 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
*last = true;
|
|
|
|
for (h1 = 0; h1 < 256; h1++) {
|
|
|
|
if (rcu_access_pointer(head->ht[h1])) {
|
|
|
|
*last = false;
|
|
|
|
break;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
}
|
2017-04-20 00:21:21 +03:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-01-19 22:26:56 +03:00
|
|
|
static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2014-09-13 07:09:49 +04:00
|
|
|
struct rsvp_head *data = rtnl_dereference(tp->root);
|
2005-04-17 02:20:36 +04:00
|
|
|
int i = 0xFFFF;
|
|
|
|
|
|
|
|
while (i-- > 0) {
|
|
|
|
u32 h;
|
2011-01-19 22:26:56 +03:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
if ((data->hgenerator += 0x10000) == 0)
|
|
|
|
data->hgenerator = 0x10000;
|
|
|
|
h = data->hgenerator|salt;
|
2017-09-10 22:48:50 +03:00
|
|
|
if (!rsvp_get(tp, h))
|
2005-04-17 02:20:36 +04:00
|
|
|
return h;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tunnel_bts(struct rsvp_head *data)
|
|
|
|
{
|
2011-01-19 22:26:56 +03:00
|
|
|
int n = data->tgenerator >> 5;
|
|
|
|
u32 b = 1 << (data->tgenerator & 0x1F);
|
2007-02-09 17:25:16 +03:00
|
|
|
|
2011-01-19 22:26:56 +03:00
|
|
|
if (data->tmap[n] & b)
|
2005-04-17 02:20:36 +04:00
|
|
|
return 0;
|
|
|
|
data->tmap[n] |= b;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tunnel_recycle(struct rsvp_head *data)
|
|
|
|
{
|
2014-09-13 07:09:49 +04:00
|
|
|
struct rsvp_session __rcu **sht = data->ht;
|
2005-04-17 02:20:36 +04:00
|
|
|
u32 tmap[256/32];
|
|
|
|
int h1, h2;
|
|
|
|
|
|
|
|
memset(tmap, 0, sizeof(tmap));
|
|
|
|
|
2011-01-19 22:26:56 +03:00
|
|
|
for (h1 = 0; h1 < 256; h1++) {
|
2005-04-17 02:20:36 +04:00
|
|
|
struct rsvp_session *s;
|
2014-09-13 07:09:49 +04:00
|
|
|
for (s = rtnl_dereference(sht[h1]); s;
|
|
|
|
s = rtnl_dereference(s->next)) {
|
2011-01-19 22:26:56 +03:00
|
|
|
for (h2 = 0; h2 <= 16; h2++) {
|
2005-04-17 02:20:36 +04:00
|
|
|
struct rsvp_filter *f;
|
|
|
|
|
2014-09-13 07:09:49 +04:00
|
|
|
for (f = rtnl_dereference(s->ht[h2]); f;
|
|
|
|
f = rtnl_dereference(f->next)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
if (f->tunnelhdr == 0)
|
|
|
|
continue;
|
|
|
|
data->tgenerator = f->res.classid;
|
|
|
|
tunnel_bts(data);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(data->tmap, tmap, sizeof(tmap));
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 gen_tunnel(struct rsvp_head *data)
|
|
|
|
{
|
|
|
|
int i, k;
|
|
|
|
|
2011-01-19 22:26:56 +03:00
|
|
|
for (k = 0; k < 2; k++) {
|
|
|
|
for (i = 255; i > 0; i--) {
|
2005-04-17 02:20:36 +04:00
|
|
|
if (++data->tgenerator == 0)
|
|
|
|
data->tgenerator = 1;
|
|
|
|
if (tunnel_bts(data))
|
|
|
|
return data->tgenerator;
|
|
|
|
}
|
|
|
|
tunnel_recycle(data);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-01-24 07:36:12 +03:00
|
|
|
static const struct nla_policy rsvp_policy[TCA_RSVP_MAX + 1] = {
|
|
|
|
[TCA_RSVP_CLASSID] = { .type = NLA_U32 },
|
2020-02-01 02:27:04 +03:00
|
|
|
[TCA_RSVP_DST] = { .len = RSVP_DST_LEN * sizeof(u32) },
|
|
|
|
[TCA_RSVP_SRC] = { .len = RSVP_DST_LEN * sizeof(u32) },
|
2008-01-24 07:36:12 +03:00
|
|
|
[TCA_RSVP_PINFO] = { .len = sizeof(struct tc_rsvp_pinfo) },
|
|
|
|
};
|
|
|
|
|
2013-01-14 09:15:39 +04:00
|
|
|
static int rsvp_change(struct net *net, struct sk_buff *in_skb,
|
2012-05-25 23:42:45 +04:00
|
|
|
struct tcf_proto *tp, unsigned long base,
|
2005-04-17 02:20:36 +04:00
|
|
|
u32 handle,
|
2008-01-23 09:11:33 +03:00
|
|
|
struct nlattr **tca,
|
2019-02-11 11:55:45 +03:00
|
|
|
void **arg, bool ovr, bool rtnl_held,
|
|
|
|
struct netlink_ext_ack *extack)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2014-09-13 07:09:49 +04:00
|
|
|
struct rsvp_head *data = rtnl_dereference(tp->root);
|
|
|
|
struct rsvp_filter *f, *nfp;
|
|
|
|
struct rsvp_filter __rcu **fp;
|
|
|
|
struct rsvp_session *nsp, *s;
|
|
|
|
struct rsvp_session __rcu **sp;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct tc_rsvp_pinfo *pinfo = NULL;
|
2011-08-30 07:12:55 +04:00
|
|
|
struct nlattr *opt = tca[TCA_OPTIONS];
|
2008-01-23 09:11:33 +03:00
|
|
|
struct nlattr *tb[TCA_RSVP_MAX + 1];
|
2005-04-17 02:20:36 +04:00
|
|
|
struct tcf_exts e;
|
2011-01-19 22:26:56 +03:00
|
|
|
unsigned int h1, h2;
|
2006-11-21 05:07:51 +03:00
|
|
|
__be32 *dst;
|
2005-04-17 02:20:36 +04:00
|
|
|
int err;
|
|
|
|
|
|
|
|
if (opt == NULL)
|
|
|
|
return handle ? -EINVAL : 0;
|
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 15:07:28 +03:00
|
|
|
err = nla_parse_nested_deprecated(tb, TCA_RSVP_MAX, opt, rsvp_policy,
|
|
|
|
NULL);
|
2008-01-24 07:33:32 +03:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2019-02-21 08:37:42 +03:00
|
|
|
err = tcf_exts_init(&e, net, TCA_RSVP_ACT, TCA_RSVP_POLICE);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
2019-02-11 11:55:43 +03:00
|
|
|
err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr, true,
|
|
|
|
extack);
|
2016-08-19 22:36:54 +03:00
|
|
|
if (err < 0)
|
|
|
|
goto errout2;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2017-08-05 07:31:43 +03:00
|
|
|
f = *arg;
|
2011-01-19 22:26:56 +03:00
|
|
|
if (f) {
|
2005-04-17 02:20:36 +04:00
|
|
|
/* Node exists: adjust only classid */
|
2014-09-26 21:02:50 +04:00
|
|
|
struct rsvp_filter *n;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (f->handle != handle && handle)
|
|
|
|
goto errout2;
|
2014-09-26 21:02:50 +04:00
|
|
|
|
|
|
|
n = kmemdup(f, sizeof(*f), GFP_KERNEL);
|
|
|
|
if (!n) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto errout2;
|
|
|
|
}
|
|
|
|
|
2019-02-21 08:37:42 +03:00
|
|
|
err = tcf_exts_init(&n->exts, net, TCA_RSVP_ACT,
|
|
|
|
TCA_RSVP_POLICE);
|
2016-08-19 22:36:54 +03:00
|
|
|
if (err < 0) {
|
|
|
|
kfree(n);
|
|
|
|
goto errout2;
|
|
|
|
}
|
2014-09-26 21:02:50 +04:00
|
|
|
|
2011-08-30 07:12:55 +04:00
|
|
|
if (tb[TCA_RSVP_CLASSID]) {
|
2014-09-26 21:02:50 +04:00
|
|
|
n->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
|
|
|
|
tcf_bind_filter(tp, &n->res, base);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2017-08-04 15:29:15 +03:00
|
|
|
tcf_exts_change(&n->exts, &e);
|
2014-09-26 21:02:50 +04:00
|
|
|
rsvp_replace(tp, n, handle);
|
2005-04-17 02:20:36 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Now more serious part... */
|
|
|
|
err = -EINVAL;
|
|
|
|
if (handle)
|
|
|
|
goto errout2;
|
2011-08-30 07:12:55 +04:00
|
|
|
if (tb[TCA_RSVP_DST] == NULL)
|
2005-04-17 02:20:36 +04:00
|
|
|
goto errout2;
|
|
|
|
|
|
|
|
err = -ENOBUFS;
|
2006-07-22 01:51:30 +04:00
|
|
|
f = kzalloc(sizeof(struct rsvp_filter), GFP_KERNEL);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (f == NULL)
|
|
|
|
goto errout2;
|
|
|
|
|
2019-02-21 08:37:42 +03:00
|
|
|
err = tcf_exts_init(&f->exts, net, TCA_RSVP_ACT, TCA_RSVP_POLICE);
|
2016-08-19 22:36:54 +03:00
|
|
|
if (err < 0)
|
|
|
|
goto errout;
|
2005-04-17 02:20:36 +04:00
|
|
|
h2 = 16;
|
2011-08-30 07:12:55 +04:00
|
|
|
if (tb[TCA_RSVP_SRC]) {
|
|
|
|
memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src));
|
2005-04-17 02:20:36 +04:00
|
|
|
h2 = hash_src(f->src);
|
|
|
|
}
|
2011-08-30 07:12:55 +04:00
|
|
|
if (tb[TCA_RSVP_PINFO]) {
|
|
|
|
pinfo = nla_data(tb[TCA_RSVP_PINFO]);
|
2005-04-17 02:20:36 +04:00
|
|
|
f->spi = pinfo->spi;
|
|
|
|
f->tunnelhdr = pinfo->tunnelhdr;
|
|
|
|
}
|
2011-08-30 07:12:55 +04:00
|
|
|
if (tb[TCA_RSVP_CLASSID])
|
|
|
|
f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2011-08-30 07:12:55 +04:00
|
|
|
dst = nla_data(tb[TCA_RSVP_DST]);
|
2005-04-17 02:20:36 +04:00
|
|
|
h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0);
|
|
|
|
|
|
|
|
err = -ENOMEM;
|
|
|
|
if ((f->handle = gen_handle(tp, h1 | (h2<<8))) == 0)
|
|
|
|
goto errout;
|
|
|
|
|
|
|
|
if (f->tunnelhdr) {
|
|
|
|
err = -EINVAL;
|
|
|
|
if (f->res.classid > 255)
|
|
|
|
goto errout;
|
|
|
|
|
|
|
|
err = -ENOMEM;
|
|
|
|
if (f->res.classid == 0 &&
|
|
|
|
(f->res.classid = gen_tunnel(data)) == 0)
|
|
|
|
goto errout;
|
|
|
|
}
|
|
|
|
|
2014-09-13 07:09:49 +04:00
|
|
|
for (sp = &data->ht[h1];
|
|
|
|
(s = rtnl_dereference(*sp)) != NULL;
|
|
|
|
sp = &s->next) {
|
2005-04-17 02:20:36 +04:00
|
|
|
if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
|
|
|
|
pinfo && pinfo->protocol == s->protocol &&
|
2009-11-30 03:55:45 +03:00
|
|
|
memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 &&
|
2005-04-17 02:20:36 +04:00
|
|
|
#if RSVP_DST_LEN == 4
|
2009-11-30 03:55:45 +03:00
|
|
|
dst[0] == s->dst[0] &&
|
|
|
|
dst[1] == s->dst[1] &&
|
|
|
|
dst[2] == s->dst[2] &&
|
2005-04-17 02:20:36 +04:00
|
|
|
#endif
|
2009-11-30 03:55:45 +03:00
|
|
|
pinfo->tunnelid == s->tunnelid) {
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
insert:
|
|
|
|
/* OK, we found appropriate session */
|
|
|
|
|
|
|
|
fp = &s->ht[h2];
|
|
|
|
|
|
|
|
f->sess = s;
|
|
|
|
if (f->tunnelhdr == 0)
|
|
|
|
tcf_bind_filter(tp, &f->res, base);
|
|
|
|
|
2017-08-04 15:29:15 +03:00
|
|
|
tcf_exts_change(&f->exts, &e);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2014-09-13 07:09:49 +04:00
|
|
|
fp = &s->ht[h2];
|
|
|
|
for (nfp = rtnl_dereference(*fp); nfp;
|
|
|
|
fp = &nfp->next, nfp = rtnl_dereference(*fp)) {
|
|
|
|
__u32 mask = nfp->spi.mask & f->spi.mask;
|
|
|
|
|
|
|
|
if (mask != f->spi.mask)
|
2005-04-17 02:20:36 +04:00
|
|
|
break;
|
2014-09-13 07:09:49 +04:00
|
|
|
}
|
|
|
|
RCU_INIT_POINTER(f->next, nfp);
|
|
|
|
rcu_assign_pointer(*fp, f);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2017-08-05 07:31:43 +03:00
|
|
|
*arg = f;
|
2005-04-17 02:20:36 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* No session found. Create new one. */
|
|
|
|
|
|
|
|
err = -ENOBUFS;
|
2006-07-22 01:51:30 +04:00
|
|
|
s = kzalloc(sizeof(struct rsvp_session), GFP_KERNEL);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (s == NULL)
|
|
|
|
goto errout;
|
|
|
|
memcpy(s->dst, dst, sizeof(s->dst));
|
|
|
|
|
|
|
|
if (pinfo) {
|
|
|
|
s->dpi = pinfo->dpi;
|
|
|
|
s->protocol = pinfo->protocol;
|
|
|
|
s->tunnelid = pinfo->tunnelid;
|
|
|
|
}
|
2014-09-13 07:09:49 +04:00
|
|
|
sp = &data->ht[h1];
|
|
|
|
for (nsp = rtnl_dereference(*sp); nsp;
|
|
|
|
sp = &nsp->next, nsp = rtnl_dereference(*sp)) {
|
|
|
|
if ((nsp->dpi.mask & s->dpi.mask) != s->dpi.mask)
|
2005-04-17 02:20:36 +04:00
|
|
|
break;
|
|
|
|
}
|
2014-09-13 07:09:49 +04:00
|
|
|
RCU_INIT_POINTER(s->next, nsp);
|
|
|
|
rcu_assign_pointer(*sp, s);
|
2007-02-09 17:25:16 +03:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
goto insert;
|
|
|
|
|
|
|
|
errout:
|
2016-08-19 22:36:54 +03:00
|
|
|
tcf_exts_destroy(&f->exts);
|
2005-11-08 20:41:34 +03:00
|
|
|
kfree(f);
|
2005-04-17 02:20:36 +04:00
|
|
|
errout2:
|
2014-09-25 21:26:37 +04:00
|
|
|
tcf_exts_destroy(&e);
|
2005-04-17 02:20:36 +04:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-02-11 11:55:45 +03:00
|
|
|
static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg,
|
|
|
|
bool rtnl_held)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2014-09-13 07:09:49 +04:00
|
|
|
struct rsvp_head *head = rtnl_dereference(tp->root);
|
2011-01-19 22:26:56 +03:00
|
|
|
unsigned int h, h1;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (arg->stop)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (h = 0; h < 256; h++) {
|
|
|
|
struct rsvp_session *s;
|
|
|
|
|
2014-09-13 07:09:49 +04:00
|
|
|
for (s = rtnl_dereference(head->ht[h]); s;
|
|
|
|
s = rtnl_dereference(s->next)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
for (h1 = 0; h1 <= 16; h1++) {
|
|
|
|
struct rsvp_filter *f;
|
|
|
|
|
2014-09-13 07:09:49 +04:00
|
|
|
for (f = rtnl_dereference(s->ht[h1]); f;
|
|
|
|
f = rtnl_dereference(f->next)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
if (arg->count < arg->skip) {
|
|
|
|
arg->count++;
|
|
|
|
continue;
|
|
|
|
}
|
2017-08-05 07:31:43 +03:00
|
|
|
if (arg->fn(tp, f, arg) < 0) {
|
2005-04-17 02:20:36 +04:00
|
|
|
arg->stop = 1;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
arg->count++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-05 07:31:43 +03:00
|
|
|
static int rsvp_dump(struct net *net, struct tcf_proto *tp, void *fh,
|
2019-02-11 11:55:45 +03:00
|
|
|
struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2017-08-05 07:31:43 +03:00
|
|
|
struct rsvp_filter *f = fh;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct rsvp_session *s;
|
2008-01-24 07:34:11 +03:00
|
|
|
struct nlattr *nest;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct tc_rsvp_pinfo pinfo;
|
|
|
|
|
|
|
|
if (f == NULL)
|
|
|
|
return skb->len;
|
|
|
|
s = f->sess;
|
|
|
|
|
|
|
|
t->tcm_handle = f->handle;
|
|
|
|
|
2019-04-26 12:13:06 +03:00
|
|
|
nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
|
2008-01-24 07:34:11 +03:00
|
|
|
if (nest == NULL)
|
|
|
|
goto nla_put_failure;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2012-03-29 13:11:39 +04:00
|
|
|
if (nla_put(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst))
|
|
|
|
goto nla_put_failure;
|
2005-04-17 02:20:36 +04:00
|
|
|
pinfo.dpi = s->dpi;
|
|
|
|
pinfo.spi = f->spi;
|
|
|
|
pinfo.protocol = s->protocol;
|
|
|
|
pinfo.tunnelid = s->tunnelid;
|
|
|
|
pinfo.tunnelhdr = f->tunnelhdr;
|
2005-06-28 23:56:45 +04:00
|
|
|
pinfo.pad = 0;
|
2012-03-29 13:11:39 +04:00
|
|
|
if (nla_put(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo))
|
|
|
|
goto nla_put_failure;
|
|
|
|
if (f->res.classid &&
|
|
|
|
nla_put_u32(skb, TCA_RSVP_CLASSID, f->res.classid))
|
|
|
|
goto nla_put_failure;
|
|
|
|
if (((f->handle >> 8) & 0xFF) != 16 &&
|
|
|
|
nla_put(skb, TCA_RSVP_SRC, sizeof(f->src), f->src))
|
|
|
|
goto nla_put_failure;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-12-16 08:15:07 +04:00
|
|
|
if (tcf_exts_dump(skb, &f->exts) < 0)
|
2008-01-23 09:11:33 +03:00
|
|
|
goto nla_put_failure;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2008-01-24 07:34:11 +03:00
|
|
|
nla_nest_end(skb, nest);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-12-16 08:15:07 +04:00
|
|
|
if (tcf_exts_dump_stats(skb, &f->exts) < 0)
|
2008-01-23 09:11:33 +03:00
|
|
|
goto nla_put_failure;
|
2005-04-17 02:20:36 +04:00
|
|
|
return skb->len;
|
|
|
|
|
2008-01-23 09:11:33 +03:00
|
|
|
nla_put_failure:
|
2014-12-10 00:23:29 +03:00
|
|
|
nla_nest_cancel(skb, nest);
|
2005-04-17 02:20:36 +04:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-01-24 03:26:18 +03:00
|
|
|
static void rsvp_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
|
|
|
|
unsigned long base)
|
net_sched: add reverse binding for tc class
TC filters when used as classifiers are bound to TC classes.
However, there is a hidden difference when adding them in different
orders:
1. If we add tc classes before its filters, everything is fine.
Logically, the classes exist before we specify their ID's in
filters, it is easy to bind them together, just as in the current
code base.
2. If we add tc filters before the tc classes they bind, we have to
do dynamic lookup in fast path. What's worse, this happens all
the time not just once, because on fast path tcf_result is passed
on stack, there is no way to propagate back to the one in tc filters.
This hidden difference hurts performance silently if we have many tc
classes in hierarchy.
This patch intends to close this gap by doing the reverse binding when
we create a new class, in this case we can actually search all the
filters in its parent, match and fixup by classid. And because
tcf_result is specific to each type of tc filter, we have to introduce
a new ops for each filter to tell how to bind the class.
Note, we still can NOT totally get rid of those class lookup in
->enqueue() because cgroup and flow filters have no way to determine
the classid at setup time, they still have to go through dynamic lookup.
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-08-31 00:30:36 +03:00
|
|
|
{
|
|
|
|
struct rsvp_filter *f = fh;
|
|
|
|
|
2020-01-24 03:26:18 +03:00
|
|
|
if (f && f->res.classid == classid) {
|
|
|
|
if (cl)
|
|
|
|
__tcf_bind_filter(q, &f->res, base);
|
|
|
|
else
|
|
|
|
__tcf_unbind_filter(q, &f->res);
|
|
|
|
}
|
net_sched: add reverse binding for tc class
TC filters when used as classifiers are bound to TC classes.
However, there is a hidden difference when adding them in different
orders:
1. If we add tc classes before its filters, everything is fine.
Logically, the classes exist before we specify their ID's in
filters, it is easy to bind them together, just as in the current
code base.
2. If we add tc filters before the tc classes they bind, we have to
do dynamic lookup in fast path. What's worse, this happens all
the time not just once, because on fast path tcf_result is passed
on stack, there is no way to propagate back to the one in tc filters.
This hidden difference hurts performance silently if we have many tc
classes in hierarchy.
This patch intends to close this gap by doing the reverse binding when
we create a new class, in this case we can actually search all the
filters in its parent, match and fixup by classid. And because
tcf_result is specific to each type of tc filter, we have to introduce
a new ops for each filter to tell how to bind the class.
Note, we still can NOT totally get rid of those class lookup in
->enqueue() because cgroup and flow filters have no way to determine
the classid at setup time, they still have to go through dynamic lookup.
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-08-31 00:30:36 +03:00
|
|
|
}
|
|
|
|
|
2011-08-30 07:12:55 +04:00
|
|
|
static struct tcf_proto_ops RSVP_OPS __read_mostly = {
|
2005-04-17 02:20:36 +04:00
|
|
|
.kind = RSVP_ID,
|
|
|
|
.classify = rsvp_classify,
|
|
|
|
.init = rsvp_init,
|
|
|
|
.destroy = rsvp_destroy,
|
|
|
|
.get = rsvp_get,
|
|
|
|
.change = rsvp_change,
|
|
|
|
.delete = rsvp_delete,
|
|
|
|
.walk = rsvp_walk,
|
|
|
|
.dump = rsvp_dump,
|
net_sched: add reverse binding for tc class
TC filters when used as classifiers are bound to TC classes.
However, there is a hidden difference when adding them in different
orders:
1. If we add tc classes before its filters, everything is fine.
Logically, the classes exist before we specify their ID's in
filters, it is easy to bind them together, just as in the current
code base.
2. If we add tc filters before the tc classes they bind, we have to
do dynamic lookup in fast path. What's worse, this happens all
the time not just once, because on fast path tcf_result is passed
on stack, there is no way to propagate back to the one in tc filters.
This hidden difference hurts performance silently if we have many tc
classes in hierarchy.
This patch intends to close this gap by doing the reverse binding when
we create a new class, in this case we can actually search all the
filters in its parent, match and fixup by classid. And because
tcf_result is specific to each type of tc filter, we have to introduce
a new ops for each filter to tell how to bind the class.
Note, we still can NOT totally get rid of those class lookup in
->enqueue() because cgroup and flow filters have no way to determine
the classid at setup time, they still have to go through dynamic lookup.
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-08-31 00:30:36 +03:00
|
|
|
.bind_class = rsvp_bind_class,
|
2005-04-17 02:20:36 +04:00
|
|
|
.owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init init_rsvp(void)
|
|
|
|
{
|
|
|
|
return register_tcf_proto_ops(&RSVP_OPS);
|
|
|
|
}
|
|
|
|
|
2007-02-09 17:25:16 +03:00
|
|
|
static void __exit exit_rsvp(void)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
unregister_tcf_proto_ops(&RSVP_OPS);
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(init_rsvp)
|
|
|
|
module_exit(exit_rsvp)
|