2019-05-25 00:43:04 +03:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
/*
|
|
|
|
* Generic nexthop implementation
|
|
|
|
*
|
|
|
|
* Copyright (c) 2017-19 Cumulus Networks
|
|
|
|
* Copyright (c) 2017-19 David Ahern <dsa@cumulusnetworks.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __LINUX_NEXTHOP_H
|
|
|
|
#define __LINUX_NEXTHOP_H
|
|
|
|
|
|
|
|
#include <linux/netdevice.h>
|
2020-05-22 08:26:15 +03:00
|
|
|
#include <linux/notifier.h>
|
2019-06-04 06:19:52 +03:00
|
|
|
#include <linux/route.h>
|
2019-05-25 00:43:04 +03:00
|
|
|
#include <linux/types.h>
|
|
|
|
#include <net/ip_fib.h>
|
2019-05-25 00:43:06 +03:00
|
|
|
#include <net/ip6_fib.h>
|
2019-05-25 00:43:04 +03:00
|
|
|
#include <net/netlink.h>
|
|
|
|
|
|
|
|
#define NEXTHOP_VALID_USER_FLAGS RTNH_F_ONLINK
|
|
|
|
|
|
|
|
struct nexthop;
|
|
|
|
|
|
|
|
struct nh_config {
|
|
|
|
u32 nh_id;
|
|
|
|
|
|
|
|
u8 nh_family;
|
|
|
|
u8 nh_protocol;
|
|
|
|
u8 nh_blackhole;
|
2020-05-22 08:26:13 +03:00
|
|
|
u8 nh_fdb;
|
2019-05-25 00:43:04 +03:00
|
|
|
u32 nh_flags;
|
|
|
|
|
|
|
|
int nh_ifindex;
|
|
|
|
struct net_device *dev;
|
|
|
|
|
2019-05-25 00:43:05 +03:00
|
|
|
union {
|
|
|
|
__be32 ipv4;
|
2019-05-25 00:43:06 +03:00
|
|
|
struct in6_addr ipv6;
|
2019-05-25 00:43:05 +03:00
|
|
|
} gw;
|
|
|
|
|
2019-05-25 00:43:08 +03:00
|
|
|
struct nlattr *nh_grp;
|
|
|
|
u16 nh_grp_type;
|
nexthop: Add implementation of resilient next-hop groups
At this moment, there is only one type of next-hop group: an mpath group,
which implements the hash-threshold algorithm.
To select a next hop, hash-threshold algorithm first assigns a range of
hashes to each next hop in the group, and then selects the next hop by
comparing the SKB hash with the individual ranges. When a next hop is
removed from the group, the ranges are recomputed, which leads to
reassignment of parts of hash space from one next hop to another. While
there will usually be some overlap between the previous and the new
distribution, some traffic flows change the next hop that they resolve to.
That causes problems e.g. as established TCP connections are reset, because
the traffic is forwarded to a server that is not familiar with the
connection.
Resilient hashing is a technique to address the above problem. Resilient
next-hop group has another layer of indirection between the group itself
and its constituent next hops: a hash table. The selection algorithm uses a
straightforward modulo operation to choose a hash bucket, and then reads
the next hop that this bucket contains, and forwards traffic there.
This indirection brings an important feature. In the hash-threshold
algorithm, the range of hashes associated with a next hop must be
continuous. With a hash table, mapping between the hash table buckets and
the individual next hops is arbitrary. Therefore when a next hop is deleted
the buckets that held it are simply reassigned to other next hops. When
weights of next hops in a group are altered, it may be possible to choose a
subset of buckets that are currently not used for forwarding traffic, and
use those to satisfy the new next-hop distribution demands, keeping the
"busy" buckets intact. This way, established flows are ideally kept being
forwarded to the same endpoints through the same paths as before the
next-hop group change.
In a nutshell, the algorithm works as follows. Each next hop has a number
of buckets that it wants to have, according to its weight and the number of
buckets in the hash table. In case of an event that might cause bucket
allocation change, the numbers for individual next hops are updated,
similarly to how ranges are updated for mpath group next hops. Following
that, a new "upkeep" algorithm runs, and for idle buckets that belong to a
next hop that is currently occupying more buckets than it wants (it is
"overweight"), it migrates the buckets to one of the next hops that has
fewer buckets than it wants (it is "underweight"). If, after this, there
are still underweight next hops, another upkeep run is scheduled to a
future time.
Chances are there are not enough "idle" buckets to satisfy the new demands.
The algorithm has knobs to select both what it means for a bucket to be
idle, and for whether and when to forcefully migrate buckets if there keeps
being an insufficient number of idle buckets.
There are three users of the resilient data structures.
- The forwarding code accesses them under RCU, and does not modify them
except for updating the time a selected bucket was last used.
- Netlink code, running under RTNL, which may modify the data.
- The delayed upkeep code, which may modify the data. This runs unlocked,
and mutual exclusion between the RTNL code and the delayed upkeep is
maintained by canceling the delayed work synchronously before the RTNL
code touches anything. Later it restarts the delayed work if necessary.
The RTNL code has to implement next-hop group replacement, next hop
removal, etc. For removal, the mpath code uses a neat trick of having a
backup next hop group structure, doing the necessary changes offline, and
then RCU-swapping them in. However, the hash tables for resilient hashing
are about an order of magnitude larger than the groups themselves (the size
might be e.g. 4K entries), and it was felt that keeping two of them is an
overkill. Both the primary next-hop group and the spare therefore use the
same resilient table, and writers are careful to keep all references valid
for the forwarding code. The hash table references next-hop group entries
from the next-hop group that is currently in the primary role (i.e. not
spare). During the transition from primary to spare, the table references a
mix of both the primary group and the spare. When a next hop is deleted,
the corresponding buckets are not set to NULL, but instead marked as empty,
so that the pointer is valid and can be used by the forwarding code. The
buckets are then migrated to a new next-hop group entry during upkeep. The
only times that the hash table is invalid is the very beginning and very
end of its lifetime. Between those points, it is always kept valid.
This patch introduces the core support code itself. It does not handle
notifications towards drivers, which are kept as if the group were an mpath
one. It does not handle netlink either. The only bit currently exposed to
user space is the new next-hop group type, and that is currently bounced.
There is therefore no way to actually access this code.
Signed-off-by: Petr Machata <petrm@nvidia.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Reviewed-by: David Ahern <dsahern@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-11 21:03:16 +03:00
|
|
|
u16 nh_grp_res_num_buckets;
|
|
|
|
unsigned long nh_grp_res_idle_timer;
|
|
|
|
unsigned long nh_grp_res_unbalanced_timer;
|
|
|
|
bool nh_grp_res_has_num_buckets;
|
|
|
|
bool nh_grp_res_has_idle_timer;
|
|
|
|
bool nh_grp_res_has_unbalanced_timer;
|
2019-05-25 00:43:08 +03:00
|
|
|
|
2019-05-25 00:43:07 +03:00
|
|
|
struct nlattr *nh_encap;
|
|
|
|
u16 nh_encap_type;
|
|
|
|
|
2019-05-25 00:43:04 +03:00
|
|
|
u32 nlflags;
|
|
|
|
struct nl_info nlinfo;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct nh_info {
|
|
|
|
struct hlist_node dev_hash; /* entry on netns devhash */
|
|
|
|
struct nexthop *nh_parent;
|
|
|
|
|
|
|
|
u8 family;
|
|
|
|
bool reject_nh;
|
2020-05-22 08:26:13 +03:00
|
|
|
bool fdb_nh;
|
2019-05-25 00:43:04 +03:00
|
|
|
|
|
|
|
union {
|
|
|
|
struct fib_nh_common fib_nhc;
|
2019-05-25 00:43:05 +03:00
|
|
|
struct fib_nh fib_nh;
|
2019-05-25 00:43:06 +03:00
|
|
|
struct fib6_nh fib6_nh;
|
2019-05-25 00:43:04 +03:00
|
|
|
};
|
|
|
|
};
|
|
|
|
|
nexthop: Add implementation of resilient next-hop groups
At this moment, there is only one type of next-hop group: an mpath group,
which implements the hash-threshold algorithm.
To select a next hop, hash-threshold algorithm first assigns a range of
hashes to each next hop in the group, and then selects the next hop by
comparing the SKB hash with the individual ranges. When a next hop is
removed from the group, the ranges are recomputed, which leads to
reassignment of parts of hash space from one next hop to another. While
there will usually be some overlap between the previous and the new
distribution, some traffic flows change the next hop that they resolve to.
That causes problems e.g. as established TCP connections are reset, because
the traffic is forwarded to a server that is not familiar with the
connection.
Resilient hashing is a technique to address the above problem. Resilient
next-hop group has another layer of indirection between the group itself
and its constituent next hops: a hash table. The selection algorithm uses a
straightforward modulo operation to choose a hash bucket, and then reads
the next hop that this bucket contains, and forwards traffic there.
This indirection brings an important feature. In the hash-threshold
algorithm, the range of hashes associated with a next hop must be
continuous. With a hash table, mapping between the hash table buckets and
the individual next hops is arbitrary. Therefore when a next hop is deleted
the buckets that held it are simply reassigned to other next hops. When
weights of next hops in a group are altered, it may be possible to choose a
subset of buckets that are currently not used for forwarding traffic, and
use those to satisfy the new next-hop distribution demands, keeping the
"busy" buckets intact. This way, established flows are ideally kept being
forwarded to the same endpoints through the same paths as before the
next-hop group change.
In a nutshell, the algorithm works as follows. Each next hop has a number
of buckets that it wants to have, according to its weight and the number of
buckets in the hash table. In case of an event that might cause bucket
allocation change, the numbers for individual next hops are updated,
similarly to how ranges are updated for mpath group next hops. Following
that, a new "upkeep" algorithm runs, and for idle buckets that belong to a
next hop that is currently occupying more buckets than it wants (it is
"overweight"), it migrates the buckets to one of the next hops that has
fewer buckets than it wants (it is "underweight"). If, after this, there
are still underweight next hops, another upkeep run is scheduled to a
future time.
Chances are there are not enough "idle" buckets to satisfy the new demands.
The algorithm has knobs to select both what it means for a bucket to be
idle, and for whether and when to forcefully migrate buckets if there keeps
being an insufficient number of idle buckets.
There are three users of the resilient data structures.
- The forwarding code accesses them under RCU, and does not modify them
except for updating the time a selected bucket was last used.
- Netlink code, running under RTNL, which may modify the data.
- The delayed upkeep code, which may modify the data. This runs unlocked,
and mutual exclusion between the RTNL code and the delayed upkeep is
maintained by canceling the delayed work synchronously before the RTNL
code touches anything. Later it restarts the delayed work if necessary.
The RTNL code has to implement next-hop group replacement, next hop
removal, etc. For removal, the mpath code uses a neat trick of having a
backup next hop group structure, doing the necessary changes offline, and
then RCU-swapping them in. However, the hash tables for resilient hashing
are about an order of magnitude larger than the groups themselves (the size
might be e.g. 4K entries), and it was felt that keeping two of them is an
overkill. Both the primary next-hop group and the spare therefore use the
same resilient table, and writers are careful to keep all references valid
for the forwarding code. The hash table references next-hop group entries
from the next-hop group that is currently in the primary role (i.e. not
spare). During the transition from primary to spare, the table references a
mix of both the primary group and the spare. When a next hop is deleted,
the corresponding buckets are not set to NULL, but instead marked as empty,
so that the pointer is valid and can be used by the forwarding code. The
buckets are then migrated to a new next-hop group entry during upkeep. The
only times that the hash table is invalid is the very beginning and very
end of its lifetime. Between those points, it is always kept valid.
This patch introduces the core support code itself. It does not handle
notifications towards drivers, which are kept as if the group were an mpath
one. It does not handle netlink either. The only bit currently exposed to
user space is the new next-hop group type, and that is currently bounced.
There is therefore no way to actually access this code.
Signed-off-by: Petr Machata <petrm@nvidia.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Reviewed-by: David Ahern <dsahern@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-11 21:03:16 +03:00
|
|
|
struct nh_res_bucket {
|
|
|
|
struct nh_grp_entry __rcu *nh_entry;
|
|
|
|
atomic_long_t used_time;
|
|
|
|
unsigned long migrated_time;
|
|
|
|
bool occupied;
|
|
|
|
u8 nh_flags;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct nh_res_table {
|
|
|
|
struct net *net;
|
|
|
|
u32 nhg_id;
|
|
|
|
struct delayed_work upkeep_dw;
|
|
|
|
|
|
|
|
/* List of NHGEs that have too few buckets ("uw" for underweight).
|
|
|
|
* Reclaimed buckets will be given to entries in this list.
|
|
|
|
*/
|
|
|
|
struct list_head uw_nh_entries;
|
|
|
|
unsigned long unbalanced_since;
|
|
|
|
|
|
|
|
u32 idle_timer;
|
|
|
|
u32 unbalanced_timer;
|
|
|
|
|
|
|
|
u16 num_nh_buckets;
|
|
|
|
struct nh_res_bucket nh_buckets[];
|
|
|
|
};
|
|
|
|
|
2019-05-25 00:43:08 +03:00
|
|
|
struct nh_grp_entry {
|
|
|
|
struct nexthop *nh;
|
|
|
|
u8 weight;
|
2021-01-28 15:49:15 +03:00
|
|
|
|
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
atomic_t upper_bound;
|
2021-03-26 16:20:22 +03:00
|
|
|
} hthr;
|
nexthop: Add implementation of resilient next-hop groups
At this moment, there is only one type of next-hop group: an mpath group,
which implements the hash-threshold algorithm.
To select a next hop, hash-threshold algorithm first assigns a range of
hashes to each next hop in the group, and then selects the next hop by
comparing the SKB hash with the individual ranges. When a next hop is
removed from the group, the ranges are recomputed, which leads to
reassignment of parts of hash space from one next hop to another. While
there will usually be some overlap between the previous and the new
distribution, some traffic flows change the next hop that they resolve to.
That causes problems e.g. as established TCP connections are reset, because
the traffic is forwarded to a server that is not familiar with the
connection.
Resilient hashing is a technique to address the above problem. Resilient
next-hop group has another layer of indirection between the group itself
and its constituent next hops: a hash table. The selection algorithm uses a
straightforward modulo operation to choose a hash bucket, and then reads
the next hop that this bucket contains, and forwards traffic there.
This indirection brings an important feature. In the hash-threshold
algorithm, the range of hashes associated with a next hop must be
continuous. With a hash table, mapping between the hash table buckets and
the individual next hops is arbitrary. Therefore when a next hop is deleted
the buckets that held it are simply reassigned to other next hops. When
weights of next hops in a group are altered, it may be possible to choose a
subset of buckets that are currently not used for forwarding traffic, and
use those to satisfy the new next-hop distribution demands, keeping the
"busy" buckets intact. This way, established flows are ideally kept being
forwarded to the same endpoints through the same paths as before the
next-hop group change.
In a nutshell, the algorithm works as follows. Each next hop has a number
of buckets that it wants to have, according to its weight and the number of
buckets in the hash table. In case of an event that might cause bucket
allocation change, the numbers for individual next hops are updated,
similarly to how ranges are updated for mpath group next hops. Following
that, a new "upkeep" algorithm runs, and for idle buckets that belong to a
next hop that is currently occupying more buckets than it wants (it is
"overweight"), it migrates the buckets to one of the next hops that has
fewer buckets than it wants (it is "underweight"). If, after this, there
are still underweight next hops, another upkeep run is scheduled to a
future time.
Chances are there are not enough "idle" buckets to satisfy the new demands.
The algorithm has knobs to select both what it means for a bucket to be
idle, and for whether and when to forcefully migrate buckets if there keeps
being an insufficient number of idle buckets.
There are three users of the resilient data structures.
- The forwarding code accesses them under RCU, and does not modify them
except for updating the time a selected bucket was last used.
- Netlink code, running under RTNL, which may modify the data.
- The delayed upkeep code, which may modify the data. This runs unlocked,
and mutual exclusion between the RTNL code and the delayed upkeep is
maintained by canceling the delayed work synchronously before the RTNL
code touches anything. Later it restarts the delayed work if necessary.
The RTNL code has to implement next-hop group replacement, next hop
removal, etc. For removal, the mpath code uses a neat trick of having a
backup next hop group structure, doing the necessary changes offline, and
then RCU-swapping them in. However, the hash tables for resilient hashing
are about an order of magnitude larger than the groups themselves (the size
might be e.g. 4K entries), and it was felt that keeping two of them is an
overkill. Both the primary next-hop group and the spare therefore use the
same resilient table, and writers are careful to keep all references valid
for the forwarding code. The hash table references next-hop group entries
from the next-hop group that is currently in the primary role (i.e. not
spare). During the transition from primary to spare, the table references a
mix of both the primary group and the spare. When a next hop is deleted,
the corresponding buckets are not set to NULL, but instead marked as empty,
so that the pointer is valid and can be used by the forwarding code. The
buckets are then migrated to a new next-hop group entry during upkeep. The
only times that the hash table is invalid is the very beginning and very
end of its lifetime. Between those points, it is always kept valid.
This patch introduces the core support code itself. It does not handle
notifications towards drivers, which are kept as if the group were an mpath
one. It does not handle netlink either. The only bit currently exposed to
user space is the new next-hop group type, and that is currently bounced.
There is therefore no way to actually access this code.
Signed-off-by: Petr Machata <petrm@nvidia.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Reviewed-by: David Ahern <dsahern@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-11 21:03:16 +03:00
|
|
|
struct {
|
|
|
|
/* Member on uw_nh_entries. */
|
|
|
|
struct list_head uw_nh_entry;
|
|
|
|
|
|
|
|
u16 count_buckets;
|
|
|
|
u16 wants_buckets;
|
|
|
|
} res;
|
2021-01-28 15:49:15 +03:00
|
|
|
};
|
2019-05-25 00:43:08 +03:00
|
|
|
|
|
|
|
struct list_head nh_list;
|
|
|
|
struct nexthop *nh_parent; /* nexthop of group with this entry */
|
|
|
|
};
|
|
|
|
|
|
|
|
struct nh_group {
|
2020-05-26 21:56:15 +03:00
|
|
|
struct nh_group *spare; /* spare group for removals */
|
2019-05-25 00:43:08 +03:00
|
|
|
u16 num_nh;
|
2021-03-11 21:03:14 +03:00
|
|
|
bool is_multipath;
|
2021-03-26 16:20:22 +03:00
|
|
|
bool hash_threshold;
|
nexthop: Add implementation of resilient next-hop groups
At this moment, there is only one type of next-hop group: an mpath group,
which implements the hash-threshold algorithm.
To select a next hop, hash-threshold algorithm first assigns a range of
hashes to each next hop in the group, and then selects the next hop by
comparing the SKB hash with the individual ranges. When a next hop is
removed from the group, the ranges are recomputed, which leads to
reassignment of parts of hash space from one next hop to another. While
there will usually be some overlap between the previous and the new
distribution, some traffic flows change the next hop that they resolve to.
That causes problems e.g. as established TCP connections are reset, because
the traffic is forwarded to a server that is not familiar with the
connection.
Resilient hashing is a technique to address the above problem. Resilient
next-hop group has another layer of indirection between the group itself
and its constituent next hops: a hash table. The selection algorithm uses a
straightforward modulo operation to choose a hash bucket, and then reads
the next hop that this bucket contains, and forwards traffic there.
This indirection brings an important feature. In the hash-threshold
algorithm, the range of hashes associated with a next hop must be
continuous. With a hash table, mapping between the hash table buckets and
the individual next hops is arbitrary. Therefore when a next hop is deleted
the buckets that held it are simply reassigned to other next hops. When
weights of next hops in a group are altered, it may be possible to choose a
subset of buckets that are currently not used for forwarding traffic, and
use those to satisfy the new next-hop distribution demands, keeping the
"busy" buckets intact. This way, established flows are ideally kept being
forwarded to the same endpoints through the same paths as before the
next-hop group change.
In a nutshell, the algorithm works as follows. Each next hop has a number
of buckets that it wants to have, according to its weight and the number of
buckets in the hash table. In case of an event that might cause bucket
allocation change, the numbers for individual next hops are updated,
similarly to how ranges are updated for mpath group next hops. Following
that, a new "upkeep" algorithm runs, and for idle buckets that belong to a
next hop that is currently occupying more buckets than it wants (it is
"overweight"), it migrates the buckets to one of the next hops that has
fewer buckets than it wants (it is "underweight"). If, after this, there
are still underweight next hops, another upkeep run is scheduled to a
future time.
Chances are there are not enough "idle" buckets to satisfy the new demands.
The algorithm has knobs to select both what it means for a bucket to be
idle, and for whether and when to forcefully migrate buckets if there keeps
being an insufficient number of idle buckets.
There are three users of the resilient data structures.
- The forwarding code accesses them under RCU, and does not modify them
except for updating the time a selected bucket was last used.
- Netlink code, running under RTNL, which may modify the data.
- The delayed upkeep code, which may modify the data. This runs unlocked,
and mutual exclusion between the RTNL code and the delayed upkeep is
maintained by canceling the delayed work synchronously before the RTNL
code touches anything. Later it restarts the delayed work if necessary.
The RTNL code has to implement next-hop group replacement, next hop
removal, etc. For removal, the mpath code uses a neat trick of having a
backup next hop group structure, doing the necessary changes offline, and
then RCU-swapping them in. However, the hash tables for resilient hashing
are about an order of magnitude larger than the groups themselves (the size
might be e.g. 4K entries), and it was felt that keeping two of them is an
overkill. Both the primary next-hop group and the spare therefore use the
same resilient table, and writers are careful to keep all references valid
for the forwarding code. The hash table references next-hop group entries
from the next-hop group that is currently in the primary role (i.e. not
spare). During the transition from primary to spare, the table references a
mix of both the primary group and the spare. When a next hop is deleted,
the corresponding buckets are not set to NULL, but instead marked as empty,
so that the pointer is valid and can be used by the forwarding code. The
buckets are then migrated to a new next-hop group entry during upkeep. The
only times that the hash table is invalid is the very beginning and very
end of its lifetime. Between those points, it is always kept valid.
This patch introduces the core support code itself. It does not handle
notifications towards drivers, which are kept as if the group were an mpath
one. It does not handle netlink either. The only bit currently exposed to
user space is the new next-hop group type, and that is currently bounced.
There is therefore no way to actually access this code.
Signed-off-by: Petr Machata <petrm@nvidia.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Reviewed-by: David Ahern <dsahern@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-11 21:03:16 +03:00
|
|
|
bool resilient;
|
2020-06-09 05:54:43 +03:00
|
|
|
bool fdb_nh;
|
2019-05-25 00:43:08 +03:00
|
|
|
bool has_v4;
|
nexthop: Add implementation of resilient next-hop groups
At this moment, there is only one type of next-hop group: an mpath group,
which implements the hash-threshold algorithm.
To select a next hop, hash-threshold algorithm first assigns a range of
hashes to each next hop in the group, and then selects the next hop by
comparing the SKB hash with the individual ranges. When a next hop is
removed from the group, the ranges are recomputed, which leads to
reassignment of parts of hash space from one next hop to another. While
there will usually be some overlap between the previous and the new
distribution, some traffic flows change the next hop that they resolve to.
That causes problems e.g. as established TCP connections are reset, because
the traffic is forwarded to a server that is not familiar with the
connection.
Resilient hashing is a technique to address the above problem. Resilient
next-hop group has another layer of indirection between the group itself
and its constituent next hops: a hash table. The selection algorithm uses a
straightforward modulo operation to choose a hash bucket, and then reads
the next hop that this bucket contains, and forwards traffic there.
This indirection brings an important feature. In the hash-threshold
algorithm, the range of hashes associated with a next hop must be
continuous. With a hash table, mapping between the hash table buckets and
the individual next hops is arbitrary. Therefore when a next hop is deleted
the buckets that held it are simply reassigned to other next hops. When
weights of next hops in a group are altered, it may be possible to choose a
subset of buckets that are currently not used for forwarding traffic, and
use those to satisfy the new next-hop distribution demands, keeping the
"busy" buckets intact. This way, established flows are ideally kept being
forwarded to the same endpoints through the same paths as before the
next-hop group change.
In a nutshell, the algorithm works as follows. Each next hop has a number
of buckets that it wants to have, according to its weight and the number of
buckets in the hash table. In case of an event that might cause bucket
allocation change, the numbers for individual next hops are updated,
similarly to how ranges are updated for mpath group next hops. Following
that, a new "upkeep" algorithm runs, and for idle buckets that belong to a
next hop that is currently occupying more buckets than it wants (it is
"overweight"), it migrates the buckets to one of the next hops that has
fewer buckets than it wants (it is "underweight"). If, after this, there
are still underweight next hops, another upkeep run is scheduled to a
future time.
Chances are there are not enough "idle" buckets to satisfy the new demands.
The algorithm has knobs to select both what it means for a bucket to be
idle, and for whether and when to forcefully migrate buckets if there keeps
being an insufficient number of idle buckets.
There are three users of the resilient data structures.
- The forwarding code accesses them under RCU, and does not modify them
except for updating the time a selected bucket was last used.
- Netlink code, running under RTNL, which may modify the data.
- The delayed upkeep code, which may modify the data. This runs unlocked,
and mutual exclusion between the RTNL code and the delayed upkeep is
maintained by canceling the delayed work synchronously before the RTNL
code touches anything. Later it restarts the delayed work if necessary.
The RTNL code has to implement next-hop group replacement, next hop
removal, etc. For removal, the mpath code uses a neat trick of having a
backup next hop group structure, doing the necessary changes offline, and
then RCU-swapping them in. However, the hash tables for resilient hashing
are about an order of magnitude larger than the groups themselves (the size
might be e.g. 4K entries), and it was felt that keeping two of them is an
overkill. Both the primary next-hop group and the spare therefore use the
same resilient table, and writers are careful to keep all references valid
for the forwarding code. The hash table references next-hop group entries
from the next-hop group that is currently in the primary role (i.e. not
spare). During the transition from primary to spare, the table references a
mix of both the primary group and the spare. When a next hop is deleted,
the corresponding buckets are not set to NULL, but instead marked as empty,
so that the pointer is valid and can be used by the forwarding code. The
buckets are then migrated to a new next-hop group entry during upkeep. The
only times that the hash table is invalid is the very beginning and very
end of its lifetime. Between those points, it is always kept valid.
This patch introduces the core support code itself. It does not handle
notifications towards drivers, which are kept as if the group were an mpath
one. It does not handle netlink either. The only bit currently exposed to
user space is the new next-hop group type, and that is currently bounced.
There is therefore no way to actually access this code.
Signed-off-by: Petr Machata <petrm@nvidia.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Reviewed-by: David Ahern <dsahern@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-11 21:03:16 +03:00
|
|
|
|
|
|
|
struct nh_res_table __rcu *res_table;
|
2020-02-29 03:14:11 +03:00
|
|
|
struct nh_grp_entry nh_entries[];
|
2019-05-25 00:43:08 +03:00
|
|
|
};
|
|
|
|
|
2019-05-25 00:43:04 +03:00
|
|
|
struct nexthop {
|
|
|
|
struct rb_node rb_node; /* entry on netns rbtree */
|
2019-06-04 06:19:51 +03:00
|
|
|
struct list_head fi_list; /* v4 entries using nh */
|
2019-06-04 06:19:52 +03:00
|
|
|
struct list_head f6i_list; /* v6 entries using nh */
|
2020-05-22 08:26:13 +03:00
|
|
|
struct list_head fdb_list; /* fdb entries using this nh */
|
2019-05-25 00:43:08 +03:00
|
|
|
struct list_head grp_list; /* nh group entries using this nh */
|
2019-05-25 00:43:04 +03:00
|
|
|
struct net *net;
|
|
|
|
|
|
|
|
u32 id;
|
|
|
|
|
|
|
|
u8 protocol; /* app managing this nh */
|
|
|
|
u8 nh_flags;
|
2019-05-25 00:43:08 +03:00
|
|
|
bool is_group;
|
2019-05-25 00:43:04 +03:00
|
|
|
|
|
|
|
refcount_t refcnt;
|
|
|
|
struct rcu_head rcu;
|
|
|
|
|
|
|
|
union {
|
|
|
|
struct nh_info __rcu *nh_info;
|
2019-05-25 00:43:08 +03:00
|
|
|
struct nh_group __rcu *nh_grp;
|
2019-05-25 00:43:04 +03:00
|
|
|
};
|
|
|
|
};
|
|
|
|
|
2020-05-22 08:26:15 +03:00
|
|
|
enum nexthop_event_type {
|
2020-11-04 16:30:29 +03:00
|
|
|
NEXTHOP_EVENT_DEL,
|
|
|
|
NEXTHOP_EVENT_REPLACE,
|
2021-03-11 21:03:17 +03:00
|
|
|
NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE,
|
|
|
|
NEXTHOP_EVENT_BUCKET_REPLACE,
|
2020-05-22 08:26:15 +03:00
|
|
|
};
|
|
|
|
|
2021-01-28 15:49:17 +03:00
|
|
|
enum nh_notifier_info_type {
|
|
|
|
NH_NOTIFIER_INFO_TYPE_SINGLE,
|
|
|
|
NH_NOTIFIER_INFO_TYPE_GRP,
|
2021-03-11 21:03:17 +03:00
|
|
|
NH_NOTIFIER_INFO_TYPE_RES_TABLE,
|
|
|
|
NH_NOTIFIER_INFO_TYPE_RES_BUCKET,
|
2021-01-28 15:49:17 +03:00
|
|
|
};
|
|
|
|
|
2020-11-04 16:30:23 +03:00
|
|
|
struct nh_notifier_single_info {
|
|
|
|
struct net_device *dev;
|
|
|
|
u8 gw_family;
|
|
|
|
union {
|
|
|
|
__be32 ipv4;
|
|
|
|
struct in6_addr ipv6;
|
|
|
|
};
|
|
|
|
u8 is_reject:1,
|
|
|
|
is_fdb:1,
|
|
|
|
has_encap:1;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct nh_notifier_grp_entry_info {
|
|
|
|
u8 weight;
|
|
|
|
u32 id;
|
|
|
|
struct nh_notifier_single_info nh;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct nh_notifier_grp_info {
|
|
|
|
u16 num_nh;
|
|
|
|
bool is_fdb;
|
|
|
|
struct nh_notifier_grp_entry_info nh_entries[];
|
|
|
|
};
|
|
|
|
|
2021-03-11 21:03:17 +03:00
|
|
|
struct nh_notifier_res_bucket_info {
|
|
|
|
u16 bucket_index;
|
|
|
|
unsigned int idle_timer_ms;
|
|
|
|
bool force;
|
|
|
|
struct nh_notifier_single_info old_nh;
|
|
|
|
struct nh_notifier_single_info new_nh;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct nh_notifier_res_table_info {
|
|
|
|
u16 num_nh_buckets;
|
|
|
|
struct nh_notifier_single_info nhs[];
|
|
|
|
};
|
|
|
|
|
2020-11-04 16:30:23 +03:00
|
|
|
struct nh_notifier_info {
|
|
|
|
struct net *net;
|
|
|
|
struct netlink_ext_ack *extack;
|
|
|
|
u32 id;
|
2021-01-28 15:49:17 +03:00
|
|
|
enum nh_notifier_info_type type;
|
2020-11-04 16:30:23 +03:00
|
|
|
union {
|
|
|
|
struct nh_notifier_single_info *nh;
|
|
|
|
struct nh_notifier_grp_info *nh_grp;
|
2021-03-11 21:03:17 +03:00
|
|
|
struct nh_notifier_res_table_info *nh_res_table;
|
|
|
|
struct nh_notifier_res_bucket_info *nh_res_bucket;
|
2020-11-04 16:30:23 +03:00
|
|
|
};
|
|
|
|
};
|
|
|
|
|
2020-11-04 16:30:34 +03:00
|
|
|
int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
|
|
|
|
struct netlink_ext_ack *extack);
|
2020-05-22 08:26:15 +03:00
|
|
|
int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb);
|
2020-11-04 16:30:28 +03:00
|
|
|
void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap);
|
2021-03-11 21:03:19 +03:00
|
|
|
void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index,
|
|
|
|
bool offload, bool trap);
|
2021-03-11 21:03:20 +03:00
|
|
|
void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets,
|
|
|
|
unsigned long *activity);
|
2020-05-22 08:26:15 +03:00
|
|
|
|
2019-05-25 00:43:04 +03:00
|
|
|
/* caller is holding rcu or rtnl; no reference taken to nexthop */
|
|
|
|
struct nexthop *nexthop_find_by_id(struct net *net, u32 id);
|
|
|
|
void nexthop_free_rcu(struct rcu_head *head);
|
|
|
|
|
|
|
|
static inline bool nexthop_get(struct nexthop *nh)
|
|
|
|
{
|
|
|
|
return refcount_inc_not_zero(&nh->refcnt);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void nexthop_put(struct nexthop *nh)
|
|
|
|
{
|
|
|
|
if (refcount_dec_and_test(&nh->refcnt))
|
|
|
|
call_rcu(&nh->rcu, nexthop_free_rcu);
|
|
|
|
}
|
|
|
|
|
2019-06-04 06:19:51 +03:00
|
|
|
static inline bool nexthop_cmp(const struct nexthop *nh1,
|
|
|
|
const struct nexthop *nh2)
|
|
|
|
{
|
|
|
|
return nh1 == nh2;
|
2020-06-09 05:54:43 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool nexthop_is_fdb(const struct nexthop *nh)
|
|
|
|
{
|
|
|
|
if (nh->is_group) {
|
|
|
|
const struct nh_group *nh_grp;
|
|
|
|
|
|
|
|
nh_grp = rcu_dereference_rtnl(nh->nh_grp);
|
|
|
|
return nh_grp->fdb_nh;
|
|
|
|
} else {
|
|
|
|
const struct nh_info *nhi;
|
|
|
|
|
|
|
|
nhi = rcu_dereference_rtnl(nh->nh_info);
|
|
|
|
return nhi->fdb_nh;
|
|
|
|
}
|
2020-06-10 02:27:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool nexthop_has_v4(const struct nexthop *nh)
|
|
|
|
{
|
|
|
|
if (nh->is_group) {
|
|
|
|
struct nh_group *nh_grp;
|
|
|
|
|
|
|
|
nh_grp = rcu_dereference_rtnl(nh->nh_grp);
|
|
|
|
return nh_grp->has_v4;
|
|
|
|
}
|
|
|
|
return false;
|
2019-06-04 06:19:51 +03:00
|
|
|
}
|
|
|
|
|
2019-05-25 00:43:08 +03:00
|
|
|
static inline bool nexthop_is_multipath(const struct nexthop *nh)
|
|
|
|
{
|
|
|
|
if (nh->is_group) {
|
|
|
|
struct nh_group *nh_grp;
|
|
|
|
|
|
|
|
nh_grp = rcu_dereference_rtnl(nh->nh_grp);
|
2021-03-11 21:03:14 +03:00
|
|
|
return nh_grp->is_multipath;
|
2019-05-25 00:43:08 +03:00
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct nexthop *nexthop_select_path(struct nexthop *nh, int hash);
|
|
|
|
|
|
|
|
static inline unsigned int nexthop_num_path(const struct nexthop *nh)
|
|
|
|
{
|
|
|
|
unsigned int rc = 1;
|
|
|
|
|
2020-05-26 21:56:16 +03:00
|
|
|
if (nh->is_group) {
|
2019-05-25 00:43:08 +03:00
|
|
|
struct nh_group *nh_grp;
|
|
|
|
|
|
|
|
nh_grp = rcu_dereference_rtnl(nh->nh_grp);
|
2021-03-11 21:03:14 +03:00
|
|
|
if (nh_grp->is_multipath)
|
2020-05-26 21:56:16 +03:00
|
|
|
rc = nh_grp->num_nh;
|
2019-05-25 00:43:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline
|
2020-05-26 21:56:16 +03:00
|
|
|
struct nexthop *nexthop_mpath_select(const struct nh_group *nhg, int nhsel)
|
2019-05-25 00:43:08 +03:00
|
|
|
{
|
|
|
|
/* for_nexthops macros in fib_semantics.c grabs a pointer to
|
|
|
|
* the nexthop before checking nhsel
|
|
|
|
*/
|
2019-06-07 18:31:07 +03:00
|
|
|
if (nhsel >= nhg->num_nh)
|
2019-05-25 00:43:08 +03:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return nhg->nh_entries[nhsel].nh;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline
|
2019-09-04 17:11:58 +03:00
|
|
|
int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh,
|
|
|
|
u8 rt_family)
|
2019-05-25 00:43:08 +03:00
|
|
|
{
|
|
|
|
struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < nhg->num_nh; i++) {
|
|
|
|
struct nexthop *nhe = nhg->nh_entries[i].nh;
|
|
|
|
struct nh_info *nhi = rcu_dereference_rtnl(nhe->nh_info);
|
|
|
|
struct fib_nh_common *nhc = &nhi->fib_nhc;
|
|
|
|
int weight = nhg->nh_entries[i].weight;
|
|
|
|
|
2021-09-23 18:03:19 +03:00
|
|
|
if (fib_add_nexthop(skb, nhc, weight, rt_family, 0) < 0)
|
2019-05-25 00:43:08 +03:00
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-05-25 00:43:04 +03:00
|
|
|
/* called with rcu lock */
|
|
|
|
static inline bool nexthop_is_blackhole(const struct nexthop *nh)
|
|
|
|
{
|
|
|
|
const struct nh_info *nhi;
|
|
|
|
|
2020-05-26 21:56:16 +03:00
|
|
|
if (nh->is_group) {
|
|
|
|
struct nh_group *nh_grp;
|
|
|
|
|
|
|
|
nh_grp = rcu_dereference_rtnl(nh->nh_grp);
|
|
|
|
if (nh_grp->num_nh > 1)
|
2019-05-25 00:43:08 +03:00
|
|
|
return false;
|
2020-05-26 21:56:16 +03:00
|
|
|
|
|
|
|
nh = nh_grp->nh_entries[0].nh;
|
2019-05-25 00:43:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
nhi = rcu_dereference_rtnl(nh->nh_info);
|
2019-05-25 00:43:04 +03:00
|
|
|
return nhi->reject_nh;
|
|
|
|
}
|
2019-06-04 06:19:49 +03:00
|
|
|
|
2019-06-04 06:19:51 +03:00
|
|
|
static inline void nexthop_path_fib_result(struct fib_result *res, int hash)
|
|
|
|
{
|
|
|
|
struct nh_info *nhi;
|
|
|
|
struct nexthop *nh;
|
|
|
|
|
|
|
|
nh = nexthop_select_path(res->fi->nh, hash);
|
|
|
|
nhi = rcu_dereference(nh->nh_info);
|
|
|
|
res->nhc = &nhi->fib_nhc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* called with rcu read lock or rtnl held */
|
|
|
|
static inline
|
|
|
|
struct fib_nh_common *nexthop_fib_nhc(struct nexthop *nh, int nhsel)
|
|
|
|
{
|
|
|
|
struct nh_info *nhi;
|
|
|
|
|
|
|
|
BUILD_BUG_ON(offsetof(struct fib_nh, nh_common) != 0);
|
|
|
|
BUILD_BUG_ON(offsetof(struct fib6_nh, nh_common) != 0);
|
|
|
|
|
2020-05-26 21:56:16 +03:00
|
|
|
if (nh->is_group) {
|
|
|
|
struct nh_group *nh_grp;
|
|
|
|
|
|
|
|
nh_grp = rcu_dereference_rtnl(nh->nh_grp);
|
2021-03-11 21:03:14 +03:00
|
|
|
if (nh_grp->is_multipath) {
|
2020-05-26 21:56:16 +03:00
|
|
|
nh = nexthop_mpath_select(nh_grp, nhsel);
|
|
|
|
if (!nh)
|
|
|
|
return NULL;
|
|
|
|
}
|
2019-06-04 06:19:51 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
nhi = rcu_dereference_rtnl(nh->nh_info);
|
|
|
|
return &nhi->fib_nhc;
|
|
|
|
}
|
|
|
|
|
2020-05-26 21:56:17 +03:00
|
|
|
/* called from fib_table_lookup with rcu_lock */
|
|
|
|
static inline
|
|
|
|
struct fib_nh_common *nexthop_get_nhc_lookup(const struct nexthop *nh,
|
|
|
|
int fib_flags,
|
|
|
|
const struct flowi4 *flp,
|
|
|
|
int *nhsel)
|
|
|
|
{
|
|
|
|
struct nh_info *nhi;
|
|
|
|
|
|
|
|
if (nh->is_group) {
|
|
|
|
struct nh_group *nhg = rcu_dereference(nh->nh_grp);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < nhg->num_nh; i++) {
|
|
|
|
struct nexthop *nhe = nhg->nh_entries[i].nh;
|
|
|
|
|
|
|
|
nhi = rcu_dereference(nhe->nh_info);
|
|
|
|
if (fib_lookup_good_nhc(&nhi->fib_nhc, fib_flags, flp)) {
|
|
|
|
*nhsel = i;
|
|
|
|
return &nhi->fib_nhc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
nhi = rcu_dereference(nh->nh_info);
|
|
|
|
if (fib_lookup_good_nhc(&nhi->fib_nhc, fib_flags, flp)) {
|
|
|
|
*nhsel = 0;
|
|
|
|
return &nhi->fib_nhc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-05-26 21:56:18 +03:00
|
|
|
static inline bool nexthop_uses_dev(const struct nexthop *nh,
|
|
|
|
const struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct nh_info *nhi;
|
|
|
|
|
|
|
|
if (nh->is_group) {
|
|
|
|
struct nh_group *nhg = rcu_dereference(nh->nh_grp);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < nhg->num_nh; i++) {
|
|
|
|
struct nexthop *nhe = nhg->nh_entries[i].nh;
|
|
|
|
|
|
|
|
nhi = rcu_dereference(nhe->nh_info);
|
|
|
|
if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
nhi = rcu_dereference(nh->nh_info);
|
|
|
|
if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-04 06:19:49 +03:00
|
|
|
static inline unsigned int fib_info_num_path(const struct fib_info *fi)
|
|
|
|
{
|
2019-06-04 06:19:51 +03:00
|
|
|
if (unlikely(fi->nh))
|
|
|
|
return nexthop_num_path(fi->nh);
|
|
|
|
|
2019-06-04 06:19:49 +03:00
|
|
|
return fi->fib_nhs;
|
|
|
|
}
|
|
|
|
|
2019-06-04 06:19:51 +03:00
|
|
|
int fib_check_nexthop(struct nexthop *nh, u8 scope,
|
|
|
|
struct netlink_ext_ack *extack);
|
|
|
|
|
2019-06-04 06:19:49 +03:00
|
|
|
static inline struct fib_nh_common *fib_info_nhc(struct fib_info *fi, int nhsel)
|
|
|
|
{
|
2019-06-04 06:19:51 +03:00
|
|
|
if (unlikely(fi->nh))
|
|
|
|
return nexthop_fib_nhc(fi->nh, nhsel);
|
|
|
|
|
2019-06-04 06:19:49 +03:00
|
|
|
return &fi->fib_nh[nhsel].nh_common;
|
|
|
|
}
|
|
|
|
|
2019-06-04 06:19:51 +03:00
|
|
|
/* only used when fib_nh is built into fib_info */
|
2019-06-04 06:19:49 +03:00
|
|
|
static inline struct fib_nh *fib_info_nh(struct fib_info *fi, int nhsel)
|
|
|
|
{
|
2019-06-04 06:19:51 +03:00
|
|
|
WARN_ON(fi->nh);
|
|
|
|
|
2019-06-04 06:19:49 +03:00
|
|
|
return &fi->fib_nh[nhsel];
|
|
|
|
}
|
2019-06-04 06:19:52 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* IPv6 variants
|
|
|
|
*/
|
|
|
|
int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg,
|
|
|
|
struct netlink_ext_ack *extack);
|
|
|
|
|
2021-03-10 05:20:35 +03:00
|
|
|
/* Caller should either hold rcu_read_lock(), or RTNL. */
|
2019-06-04 06:19:52 +03:00
|
|
|
static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh)
|
|
|
|
{
|
|
|
|
struct nh_info *nhi;
|
|
|
|
|
2020-05-26 21:56:16 +03:00
|
|
|
if (nh->is_group) {
|
|
|
|
struct nh_group *nh_grp;
|
|
|
|
|
|
|
|
nh_grp = rcu_dereference_rtnl(nh->nh_grp);
|
|
|
|
nh = nexthop_mpath_select(nh_grp, 0);
|
2019-06-04 06:19:52 +03:00
|
|
|
if (!nh)
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
nhi = rcu_dereference_rtnl(nh->nh_info);
|
|
|
|
if (nhi->family == AF_INET6)
|
|
|
|
return &nhi->fib6_nh;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2021-03-10 05:20:35 +03:00
|
|
|
/* Variant of nexthop_fib6_nh().
|
|
|
|
* Caller should either hold rcu_read_lock_bh(), or RTNL.
|
|
|
|
*/
|
|
|
|
static inline struct fib6_nh *nexthop_fib6_nh_bh(struct nexthop *nh)
|
|
|
|
{
|
|
|
|
struct nh_info *nhi;
|
|
|
|
|
|
|
|
if (nh->is_group) {
|
|
|
|
struct nh_group *nh_grp;
|
|
|
|
|
|
|
|
nh_grp = rcu_dereference_bh_rtnl(nh->nh_grp);
|
|
|
|
nh = nexthop_mpath_select(nh_grp, 0);
|
|
|
|
if (!nh)
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
nhi = rcu_dereference_bh_rtnl(nh->nh_info);
|
|
|
|
if (nhi->family == AF_INET6)
|
|
|
|
return &nhi->fib6_nh;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-06-04 06:19:52 +03:00
|
|
|
static inline struct net_device *fib6_info_nh_dev(struct fib6_info *f6i)
|
|
|
|
{
|
|
|
|
struct fib6_nh *fib6_nh;
|
|
|
|
|
|
|
|
fib6_nh = f6i->nh ? nexthop_fib6_nh(f6i->nh) : f6i->fib6_nh;
|
|
|
|
return fib6_nh->fib_nh_dev;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void nexthop_path_fib6_result(struct fib6_result *res, int hash)
|
|
|
|
{
|
|
|
|
struct nexthop *nh = res->f6i->nh;
|
|
|
|
struct nh_info *nhi;
|
|
|
|
|
|
|
|
nh = nexthop_select_path(nh, hash);
|
|
|
|
|
|
|
|
nhi = rcu_dereference_rtnl(nh->nh_info);
|
|
|
|
if (nhi->reject_nh) {
|
|
|
|
res->fib6_type = RTN_BLACKHOLE;
|
|
|
|
res->fib6_flags |= RTF_REJECT;
|
|
|
|
res->nh = nexthop_fib6_nh(nh);
|
|
|
|
} else {
|
|
|
|
res->nh = &nhi->fib6_nh;
|
|
|
|
}
|
|
|
|
}
|
2019-06-09 00:53:22 +03:00
|
|
|
|
|
|
|
int nexthop_for_each_fib6_nh(struct nexthop *nh,
|
|
|
|
int (*cb)(struct fib6_nh *nh, void *arg),
|
|
|
|
void *arg);
|
2020-05-22 08:26:13 +03:00
|
|
|
|
|
|
|
static inline int nexthop_get_family(struct nexthop *nh)
|
|
|
|
{
|
|
|
|
struct nh_info *nhi = rcu_dereference_rtnl(nh->nh_info);
|
|
|
|
|
|
|
|
return nhi->family;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline
|
|
|
|
struct fib_nh_common *nexthop_fdb_nhc(struct nexthop *nh)
|
|
|
|
{
|
|
|
|
struct nh_info *nhi = rcu_dereference_rtnl(nh->nh_info);
|
|
|
|
|
|
|
|
return &nhi->fib_nhc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct fib_nh_common *nexthop_path_fdb_result(struct nexthop *nh,
|
|
|
|
int hash)
|
|
|
|
{
|
|
|
|
struct nh_info *nhi;
|
|
|
|
struct nexthop *nhp;
|
|
|
|
|
|
|
|
nhp = nexthop_select_path(nh, hash);
|
|
|
|
if (unlikely(!nhp))
|
|
|
|
return NULL;
|
|
|
|
nhi = rcu_dereference(nhp->nh_info);
|
|
|
|
return &nhi->fib_nhc;
|
|
|
|
}
|
2019-05-25 00:43:04 +03:00
|
|
|
#endif
|