netfilter: nf_conntrack_bridge: add support for IPv6

br_defrag() and br_fragment() indirections are added in case that IPv6
support comes as a module, to avoid pulling innecessary dependencies in.

The new fraglist iterator and fragment transformer APIs are used to
implement the refragmentation code.

Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Pablo Neira Ayuso 2019-05-29 13:25:38 +02:00 коммит произвёл David S. Miller
Родитель 3c171f496e
Коммит 764dd163ac
3 изменённых файлов: 230 добавлений и 2 удалений

Просмотреть файл

@ -19,6 +19,7 @@ struct ip6_rt_info {
};
struct nf_queue_entry;
struct nf_ct_bridge_frag_data;
/*
* Hook functions for ipv6 to allow xt_* modules to be built-in even
@ -39,6 +40,15 @@ struct nf_ipv6_ops {
int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
int (*output)(struct net *, struct sock *, struct sk_buff *));
int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry);
#if IS_MODULE(CONFIG_IPV6)
int (*br_defrag)(struct net *net, struct sk_buff *skb, u32 user);
int (*br_fragment)(struct net *net, struct sock *sk,
struct sk_buff *skb,
struct nf_ct_bridge_frag_data *data,
int (*output)(struct net *, struct sock *sk,
const struct nf_ct_bridge_frag_data *data,
struct sk_buff *));
#endif
};
#ifdef CONFIG_NETFILTER
@ -86,6 +96,46 @@ static inline int nf_ip6_route(struct net *net, struct dst_entry **dst,
#endif
}
static inline int nf_ipv6_br_defrag(struct net *net, struct sk_buff *skb,
u32 user)
{
#if IS_MODULE(CONFIG_IPV6)
const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
if (!v6_ops)
return 1;
return v6_ops->br_defrag(net, skb, user);
#else
return nf_ct_frag6_gather(net, skb, user);
#endif
}
int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
struct nf_ct_bridge_frag_data *data,
int (*output)(struct net *, struct sock *sk,
const struct nf_ct_bridge_frag_data *data,
struct sk_buff *));
static inline int nf_br_ip6_fragment(struct net *net, struct sock *sk,
struct sk_buff *skb,
struct nf_ct_bridge_frag_data *data,
int (*output)(struct net *, struct sock *sk,
const struct nf_ct_bridge_frag_data *data,
struct sk_buff *))
{
#if IS_MODULE(CONFIG_IPV6)
const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
if (!v6_ops)
return 1;
return v6_ops->br_fragment(net, sk, skb, data, output);
#else
return br_ip6_fragment(net, sk, skb, data, output);
#endif
}
int ip6_route_me_harder(struct net *net, struct sk_buff *skb);
static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb)

Просмотреть файл

@ -163,6 +163,31 @@ static unsigned int nf_ct_br_defrag4(struct sk_buff *skb,
return NF_STOLEN;
}
static unsigned int nf_ct_br_defrag6(struct sk_buff *skb,
const struct nf_hook_state *state)
{
u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
enum ip_conntrack_info ctinfo;
struct br_input_skb_cb cb;
const struct nf_conn *ct;
int err;
ct = nf_ct_get(skb, &ctinfo);
if (ct)
zone_id = nf_ct_zone_id(nf_ct_zone(ct), CTINFO2DIR(ctinfo));
br_skb_cb_save(skb, &cb, sizeof(struct inet6_skb_parm));
err = nf_ipv6_br_defrag(state->net, skb,
IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id);
/* queued */
if (err == -EINPROGRESS)
return NF_STOLEN;
br_skb_cb_restore(skb, &cb, IP6CB(skb)->frag_max_size);
return err == 0 ? NF_ACCEPT : NF_DROP;
}
static int nf_ct_br_ip_check(const struct sk_buff *skb)
{
const struct iphdr *iph;
@ -177,6 +202,23 @@ static int nf_ct_br_ip_check(const struct sk_buff *skb)
len = ntohs(iph->tot_len);
if (skb->len < nhoff + len ||
len < (iph->ihl * 4))
return -1;
return 0;
}
static int nf_ct_br_ipv6_check(const struct sk_buff *skb)
{
const struct ipv6hdr *hdr;
int nhoff, len;
nhoff = skb_network_offset(skb);
hdr = ipv6_hdr(skb);
if (hdr->version != 6)
return -1;
len = ntohs(hdr->payload_len) + sizeof(struct ipv6hdr) + nhoff;
if (skb->len < len)
return -1;
return 0;
@ -212,7 +254,19 @@ static unsigned int nf_ct_bridge_pre(void *priv, struct sk_buff *skb,
ret = nf_ct_br_defrag4(skb, &bridge_state);
break;
case htons(ETH_P_IPV6):
/* fall through */
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
return NF_ACCEPT;
len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
if (pskb_trim_rcsum(skb, len))
return NF_ACCEPT;
if (nf_ct_br_ipv6_check(skb))
return NF_ACCEPT;
bridge_state.pf = NFPROTO_IPV6;
ret = nf_ct_br_defrag6(skb, &bridge_state);
break;
default:
nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
return NF_ACCEPT;
@ -254,7 +308,8 @@ nf_ct_bridge_refrag(struct sk_buff *skb, const struct nf_hook_state *state,
nf_br_ip_fragment(state->net, state->sk, skb, &data, output);
break;
case htons(ETH_P_IPV6):
return NF_ACCEPT;
nf_br_ip6_fragment(state->net, state->sk, skb, &data, output);
break;
default:
WARN_ON_ONCE(1);
return NF_DROP;

Просмотреть файл

@ -16,6 +16,9 @@
#include <net/ip6_route.h>
#include <net/xfrm.h>
#include <net/netfilter/nf_queue.h>
#include <net/netfilter/nf_conntrack_bridge.h>
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
#include "../bridge/br_private.h"
int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
{
@ -109,6 +112,122 @@ int __nf_ip6_route(struct net *net, struct dst_entry **dst,
}
EXPORT_SYMBOL_GPL(__nf_ip6_route);
int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
struct nf_ct_bridge_frag_data *data,
int (*output)(struct net *, struct sock *sk,
const struct nf_ct_bridge_frag_data *data,
struct sk_buff *))
{
int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
struct ip6_frag_state state;
u8 *prevhdr, nexthdr = 0;
unsigned int mtu, hlen;
int hroom, err = 0;
__be32 frag_id;
err = ip6_find_1stfragopt(skb, &prevhdr);
if (err < 0)
goto blackhole;
hlen = err;
nexthdr = *prevhdr;
mtu = skb->dev->mtu;
if (frag_max_size > mtu ||
frag_max_size < IPV6_MIN_MTU)
goto blackhole;
mtu = frag_max_size;
if (mtu < hlen + sizeof(struct frag_hdr) + 8)
goto blackhole;
mtu -= hlen + sizeof(struct frag_hdr);
frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
&ipv6_hdr(skb)->saddr);
if (skb->ip_summed == CHECKSUM_PARTIAL &&
(err = skb_checksum_help(skb)))
goto blackhole;
hroom = LL_RESERVED_SPACE(skb->dev);
if (skb_has_frag_list(skb)) {
unsigned int first_len = skb_pagelen(skb);
struct ip6_fraglist_iter iter;
struct sk_buff *frag2;
if (first_len - hlen > mtu ||
skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
goto blackhole;
if (skb_cloned(skb))
goto slow_path;
skb_walk_frags(skb, frag2) {
if (frag2->len > mtu ||
skb_headroom(frag2) < (hlen + hroom + sizeof(struct frag_hdr)))
goto blackhole;
/* Partially cloned skb? */
if (skb_shared(frag2))
goto slow_path;
}
err = ip6_fraglist_init(skb, hlen, prevhdr, nexthdr, frag_id,
&iter);
if (err < 0)
goto blackhole;
for (;;) {
/* Prepare header of the next frame,
* before previous one went down.
*/
if (iter.frag)
ip6_fraglist_prepare(skb, &iter);
err = output(net, sk, data, skb);
if (err || !iter.frag)
break;
skb = ip6_fraglist_next(&iter);
}
kfree(iter.tmp_hdr);
if (!err)
return 0;
kfree_skb_list(iter.frag_list);
return err;
}
slow_path:
/* This is a linearized skbuff, the original geometry is lost for us.
* This may also be a clone skbuff, we could preserve the geometry for
* the copies but probably not worth the effort.
*/
ip6_frag_init(skb, hlen, mtu, skb->dev->needed_tailroom,
LL_RESERVED_SPACE(skb->dev), prevhdr, nexthdr, frag_id,
&state);
while (state.left > 0) {
struct sk_buff *skb2;
skb2 = ip6_frag_next(skb, &state);
if (IS_ERR(skb2)) {
err = PTR_ERR(skb2);
goto blackhole;
}
err = output(net, sk, data, skb2);
if (err)
goto blackhole;
}
consume_skb(skb);
return err;
blackhole:
kfree_skb(skb);
return 0;
}
EXPORT_SYMBOL_GPL(br_ip6_fragment);
static const struct nf_ipv6_ops ipv6ops = {
#if IS_MODULE(CONFIG_IPV6)
.chk_addr = ipv6_chk_addr,
@ -119,6 +238,10 @@ static const struct nf_ipv6_ops ipv6ops = {
.route_input = ip6_route_input,
.fragment = ip6_fragment,
.reroute = nf_ip6_reroute,
#if IS_MODULE(CONFIG_NF_CONNTRACK_BRIDGE)
.br_defrag = nf_ct_frag6_gather,
.br_fragment = br_ip6_fragment,
#endif
};
int __init ipv6_netfilter_init(void)