2019-05-27 09:55:01 +03:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2014-11-28 16:34:17 +03:00
|
|
|
/*
|
|
|
|
* include/net/switchdev.h - Switch device API
|
2015-09-24 11:02:41 +03:00
|
|
|
* Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
|
2015-03-09 23:59:09 +03:00
|
|
|
* Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
|
2014-11-28 16:34:17 +03:00
|
|
|
*/
|
|
|
|
#ifndef _LINUX_SWITCHDEV_H_
|
|
|
|
#define _LINUX_SWITCHDEV_H_
|
|
|
|
|
|
|
|
#include <linux/netdevice.h>
|
2015-01-16 01:49:36 +03:00
|
|
|
#include <linux/notifier.h>
|
2015-09-24 11:02:41 +03:00
|
|
|
#include <linux/list.h>
|
2015-10-14 20:40:51 +03:00
|
|
|
#include <net/ip_fib.h>
|
2015-01-16 01:49:36 +03:00
|
|
|
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-10 19:47:48 +03:00
|
|
|
#define SWITCHDEV_F_NO_RECURSE BIT(0)
|
2015-10-09 05:23:18 +03:00
|
|
|
#define SWITCHDEV_F_SKIP_EOPNOTSUPP BIT(1)
|
2015-10-14 20:40:50 +03:00
|
|
|
#define SWITCHDEV_F_DEFER BIT(2)
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-10 19:47:48 +03:00
|
|
|
|
|
|
|
enum switchdev_attr_id {
|
2015-10-01 12:03:42 +03:00
|
|
|
SWITCHDEV_ATTR_ID_UNDEFINED,
|
|
|
|
SWITCHDEV_ATTR_ID_PORT_STP_STATE,
|
|
|
|
SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
|
2019-02-21 03:58:19 +03:00
|
|
|
SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS,
|
2017-02-09 16:54:42 +03:00
|
|
|
SWITCHDEV_ATTR_ID_PORT_MROUTER,
|
2015-10-09 05:23:17 +03:00
|
|
|
SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
|
2016-01-06 15:01:05 +03:00
|
|
|
SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
|
2020-11-29 15:54:05 +03:00
|
|
|
SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL,
|
2017-02-09 16:54:40 +03:00
|
|
|
SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
|
2017-10-09 12:15:31 +03:00
|
|
|
SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
|
2020-04-26 16:22:03 +03:00
|
|
|
SWITCHDEV_ATTR_ID_MRP_PORT_ROLE,
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-10 19:47:48 +03:00
|
|
|
};
|
|
|
|
|
net: switchdev: pass flags and mask to both {PRE_,}BRIDGE_FLAGS attributes
This switchdev attribute offers a counterproductive API for a driver
writer, because although br_switchdev_set_port_flag gets passed a
"flags" and a "mask", those are passed piecemeal to the driver, so while
the PRE_BRIDGE_FLAGS listener knows what changed because it has the
"mask", the BRIDGE_FLAGS listener doesn't, because it only has the final
value. But certain drivers can offload only certain combinations of
settings, like for example they cannot change unicast flooding
independently of multicast flooding - they must be both on or both off.
The way the information is passed to switchdev makes drivers not
expressive enough, and unable to reject this request ahead of time, in
the PRE_BRIDGE_FLAGS notifier, so they are forced to reject it during
the deferred BRIDGE_FLAGS attribute, where the rejection is currently
ignored.
This patch also changes drivers to make use of the "mask" field for edge
detection when possible.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Grygorii Strashko <grygorii.strashko@ti.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-02-12 18:15:55 +03:00
|
|
|
struct switchdev_brport_flags {
|
|
|
|
unsigned long val;
|
|
|
|
unsigned long mask;
|
|
|
|
};
|
|
|
|
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-10 19:47:48 +03:00
|
|
|
struct switchdev_attr {
|
2015-12-15 18:03:35 +03:00
|
|
|
struct net_device *orig_dev;
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-10 19:47:48 +03:00
|
|
|
enum switchdev_attr_id id;
|
|
|
|
u32 flags;
|
2016-04-21 13:52:43 +03:00
|
|
|
void *complete_priv;
|
|
|
|
void (*complete)(struct net_device *dev, int err, void *priv);
|
2015-05-10 19:47:49 +03:00
|
|
|
union {
|
2015-05-10 19:47:51 +03:00
|
|
|
u8 stp_state; /* PORT_STP_STATE */
|
net: switchdev: pass flags and mask to both {PRE_,}BRIDGE_FLAGS attributes
This switchdev attribute offers a counterproductive API for a driver
writer, because although br_switchdev_set_port_flag gets passed a
"flags" and a "mask", those are passed piecemeal to the driver, so while
the PRE_BRIDGE_FLAGS listener knows what changed because it has the
"mask", the BRIDGE_FLAGS listener doesn't, because it only has the final
value. But certain drivers can offload only certain combinations of
settings, like for example they cannot change unicast flooding
independently of multicast flooding - they must be both on or both off.
The way the information is passed to switchdev makes drivers not
expressive enough, and unable to reject this request ahead of time, in
the PRE_BRIDGE_FLAGS notifier, so they are forced to reject it during
the deferred BRIDGE_FLAGS attribute, where the rejection is currently
ignored.
This patch also changes drivers to make use of the "mask" field for edge
detection when possible.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Grygorii Strashko <grygorii.strashko@ti.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-02-12 18:15:55 +03:00
|
|
|
struct switchdev_brport_flags brport_flags; /* PORT_BRIDGE_FLAGS */
|
2017-02-09 16:54:42 +03:00
|
|
|
bool mrouter; /* PORT_MROUTER */
|
2016-07-18 22:02:06 +03:00
|
|
|
clock_t ageing_time; /* BRIDGE_AGEING_TIME */
|
2016-01-06 15:01:05 +03:00
|
|
|
bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */
|
2020-11-29 15:54:05 +03:00
|
|
|
u16 vlan_protocol; /* BRIDGE_VLAN_PROTOCOL */
|
2017-02-09 16:54:40 +03:00
|
|
|
bool mc_disabled; /* MC_DISABLED */
|
2020-04-26 16:22:03 +03:00
|
|
|
u8 mrp_port_role; /* MRP_PORT_ROLE */
|
2015-05-13 21:16:50 +03:00
|
|
|
} u;
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-10 19:47:48 +03:00
|
|
|
};
|
|
|
|
|
2015-05-10 19:47:52 +03:00
|
|
|
enum switchdev_obj_id {
|
2015-10-01 12:03:41 +03:00
|
|
|
SWITCHDEV_OBJ_ID_UNDEFINED,
|
|
|
|
SWITCHDEV_OBJ_ID_PORT_VLAN,
|
2016-01-10 23:06:22 +03:00
|
|
|
SWITCHDEV_OBJ_ID_PORT_MDB,
|
2017-11-10 01:10:59 +03:00
|
|
|
SWITCHDEV_OBJ_ID_HOST_MDB,
|
2020-04-26 16:22:03 +03:00
|
|
|
SWITCHDEV_OBJ_ID_MRP,
|
|
|
|
SWITCHDEV_OBJ_ID_RING_TEST_MRP,
|
|
|
|
SWITCHDEV_OBJ_ID_RING_ROLE_MRP,
|
|
|
|
SWITCHDEV_OBJ_ID_RING_STATE_MRP,
|
2020-07-14 10:34:47 +03:00
|
|
|
SWITCHDEV_OBJ_ID_IN_TEST_MRP,
|
|
|
|
SWITCHDEV_OBJ_ID_IN_ROLE_MRP,
|
|
|
|
SWITCHDEV_OBJ_ID_IN_STATE_MRP,
|
2015-05-10 19:47:52 +03:00
|
|
|
};
|
|
|
|
|
2015-10-01 12:03:45 +03:00
|
|
|
struct switchdev_obj {
|
2015-12-15 18:03:35 +03:00
|
|
|
struct net_device *orig_dev;
|
2015-10-01 12:03:46 +03:00
|
|
|
enum switchdev_obj_id id;
|
2015-10-14 20:40:52 +03:00
|
|
|
u32 flags;
|
2016-04-21 13:52:43 +03:00
|
|
|
void *complete_priv;
|
|
|
|
void (*complete)(struct net_device *dev, int err, void *priv);
|
2015-10-01 12:03:45 +03:00
|
|
|
};
|
|
|
|
|
2015-10-01 12:03:41 +03:00
|
|
|
/* SWITCHDEV_OBJ_ID_PORT_VLAN */
|
2015-10-01 12:03:43 +03:00
|
|
|
struct switchdev_obj_port_vlan {
|
2015-10-01 12:03:45 +03:00
|
|
|
struct switchdev_obj obj;
|
2015-09-29 19:07:18 +03:00
|
|
|
u16 flags;
|
net: switchdev: remove vid_begin -> vid_end range from VLAN objects
The call path of a switchdev VLAN addition to the bridge looks something
like this today:
nbp_vlan_init
| __br_vlan_set_default_pvid
| | |
| | br_afspec |
| | | |
| | v |
| | br_process_vlan_info |
| | | |
| | v |
| | br_vlan_info |
| | / \ /
| | / \ /
| | / \ /
| | / \ /
v v v v v
nbp_vlan_add br_vlan_add ------+
| ^ ^ | |
| / | | |
| / / / |
\ br_vlan_get_master/ / v
\ ^ / / br_vlan_add_existing
\ | / / |
\ | / / /
\ | / / /
\ | / / /
\ | / / /
v | | v /
__vlan_add /
/ | /
/ | /
v | /
__vlan_vid_add | /
\ | /
v v v
br_switchdev_port_vlan_add
The ranges UAPI was introduced to the bridge in commit bdced7ef7838
("bridge: support for multiple vlans and vlan ranges in setlink and
dellink requests") (Jan 10 2015). But the VLAN ranges (parsed in br_afspec)
have always been passed one by one, through struct bridge_vlan_info
tmp_vinfo, to br_vlan_info. So the range never went too far in depth.
Then Scott Feldman introduced the switchdev_port_bridge_setlink function
in commit 47f8328bb1a4 ("switchdev: add new switchdev bridge setlink").
That marked the introduction of the SWITCHDEV_OBJ_PORT_VLAN, which made
full use of the range. But switchdev_port_bridge_setlink was called like
this:
br_setlink
-> br_afspec
-> switchdev_port_bridge_setlink
Basically, the switchdev and the bridge code were not tightly integrated.
Then commit 41c498b9359e ("bridge: restore br_setlink back to original")
came, and switchdev drivers were required to implement
.ndo_bridge_setlink = switchdev_port_bridge_setlink for a while.
In the meantime, commits such as 0944d6b5a2fa ("bridge: try switchdev op
first in __vlan_vid_add/del") finally made switchdev penetrate the
br_vlan_info() barrier and start to develop the call path we have today.
But remember, br_vlan_info() still receives VLANs one by one.
Then Arkadi Sharshevsky refactored the switchdev API in 2017 in commit
29ab586c3d83 ("net: switchdev: Remove bridge bypass support from
switchdev") so that drivers would not implement .ndo_bridge_setlink any
longer. The switchdev_port_bridge_setlink also got deleted.
This refactoring removed the parallel bridge_setlink implementation from
switchdev, and left the only switchdev VLAN objects to be the ones
offloaded from __vlan_vid_add (basically RX filtering) and __vlan_add
(the latter coming from commit 9c86ce2c1ae3 ("net: bridge: Notify about
bridge VLANs")).
That is to say, today the switchdev VLAN object ranges are not used in
the kernel. Refactoring the above call path is a bit complicated, when
the bridge VLAN call path is already a bit complicated.
Let's go off and finish the job of commit 29ab586c3d83 by deleting the
bogus iteration through the VLAN ranges from the drivers. Some aspects
of this feature never made too much sense in the first place. For
example, what is a range of VLANs all having the BRIDGE_VLAN_INFO_PVID
flag supposed to mean, when a port can obviously have a single pvid?
This particular configuration _is_ denied as of commit 6623c60dc28e
("bridge: vlan: enforce no pvid flag in vlan ranges"), but from an API
perspective, the driver still has to play pretend, and only offload the
vlan->vid_end as pvid. And the addition of a switchdev VLAN object can
modify the flags of another, completely unrelated, switchdev VLAN
object! (a VLAN that is PVID will invalidate the PVID flag from whatever
other VLAN had previously been offloaded with switchdev and had that
flag. Yet switchdev never notifies about that change, drivers are
supposed to guess).
Nonetheless, having a VLAN range in the API makes error handling look
scarier than it really is - unwinding on errors and all of that.
When in reality, no one really calls this API with more than one VLAN.
It is all unnecessary complexity.
And despite appearing pretentious (two-phase transactional model and
all), the switchdev API is really sloppy because the VLAN addition and
removal operations are not paired with one another (you can add a VLAN
100 times and delete it just once). The bridge notifies through
switchdev of a VLAN addition not only when the flags of an existing VLAN
change, but also when nothing changes. There are switchdev drivers out
there who don't like adding a VLAN that has already been added, and
those checks don't really belong at driver level. But the fact that the
API contains ranges is yet another factor that prevents this from being
addressed in the future.
Of the existing switchdev pieces of hardware, it appears that only
Mellanox Spectrum supports offloading more than one VLAN at a time,
through mlxsw_sp_port_vlan_set. I have kept that code internal to the
driver, because there is some more bookkeeping that makes use of it, but
I deleted it from the switchdev API. But since the switchdev support for
ranges has already been de facto deleted by a Mellanox employee and
nobody noticed for 4 years, I'm going to assume it's not a biggie.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com> # switchdev and mlxsw
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de> # hellcreek
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2021-01-09 03:01:46 +03:00
|
|
|
u16 vid;
|
2015-09-29 19:07:18 +03:00
|
|
|
};
|
|
|
|
|
switchdev: SWITCHDEV_OBJ_PORT_{VLAN, MDB}(): Sanitize
The two macros SWITCHDEV_OBJ_PORT_VLAN() and SWITCHDEV_OBJ_PORT_MDB()
expand to a container_of() call, yielding an appropriate container of
their sole argument. However, due to a name collision, the first
argument, i.e. the contained object pointer, is not the only one to get
expanded. The third argument, which is a structure member name, and
should be kept literal, gets expanded as well. The only safe way to use
these two macros is therefore to name the local variable passed to them
"obj".
To fix this, rename the sole argument of the two macros from
"obj" (which collides with the member name) to "OBJ". Additionally,
instead of passing "OBJ" to container_of() verbatim, parenthesize it, so
that a comma in the passed-in expression doesn't pollute the
container_of() invocation.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Acked-by: Jiri Pirko <jiri@mellanox.com>
Reviewed-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-11-23 02:28:07 +03:00
|
|
|
#define SWITCHDEV_OBJ_PORT_VLAN(OBJ) \
|
|
|
|
container_of((OBJ), struct switchdev_obj_port_vlan, obj)
|
2015-10-01 12:03:45 +03:00
|
|
|
|
2016-01-10 23:06:22 +03:00
|
|
|
/* SWITCHDEV_OBJ_ID_PORT_MDB */
|
|
|
|
struct switchdev_obj_port_mdb {
|
|
|
|
struct switchdev_obj obj;
|
|
|
|
unsigned char addr[ETH_ALEN];
|
|
|
|
u16 vid;
|
|
|
|
};
|
|
|
|
|
switchdev: SWITCHDEV_OBJ_PORT_{VLAN, MDB}(): Sanitize
The two macros SWITCHDEV_OBJ_PORT_VLAN() and SWITCHDEV_OBJ_PORT_MDB()
expand to a container_of() call, yielding an appropriate container of
their sole argument. However, due to a name collision, the first
argument, i.e. the contained object pointer, is not the only one to get
expanded. The third argument, which is a structure member name, and
should be kept literal, gets expanded as well. The only safe way to use
these two macros is therefore to name the local variable passed to them
"obj".
To fix this, rename the sole argument of the two macros from
"obj" (which collides with the member name) to "OBJ". Additionally,
instead of passing "OBJ" to container_of() verbatim, parenthesize it, so
that a comma in the passed-in expression doesn't pollute the
container_of() invocation.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Acked-by: Jiri Pirko <jiri@mellanox.com>
Reviewed-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-11-23 02:28:07 +03:00
|
|
|
#define SWITCHDEV_OBJ_PORT_MDB(OBJ) \
|
|
|
|
container_of((OBJ), struct switchdev_obj_port_mdb, obj)
|
2016-01-10 23:06:22 +03:00
|
|
|
|
2020-04-26 16:22:03 +03:00
|
|
|
|
|
|
|
/* SWITCHDEV_OBJ_ID_MRP */
|
|
|
|
struct switchdev_obj_mrp {
|
|
|
|
struct switchdev_obj obj;
|
|
|
|
struct net_device *p_port;
|
|
|
|
struct net_device *s_port;
|
|
|
|
u32 ring_id;
|
2020-05-30 21:09:47 +03:00
|
|
|
u16 prio;
|
2020-04-26 16:22:03 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
#define SWITCHDEV_OBJ_MRP(OBJ) \
|
|
|
|
container_of((OBJ), struct switchdev_obj_mrp, obj)
|
|
|
|
|
|
|
|
/* SWITCHDEV_OBJ_ID_RING_TEST_MRP */
|
|
|
|
struct switchdev_obj_ring_test_mrp {
|
|
|
|
struct switchdev_obj obj;
|
|
|
|
/* The value is in us and a value of 0 represents to stop */
|
|
|
|
u32 interval;
|
|
|
|
u8 max_miss;
|
|
|
|
u32 ring_id;
|
|
|
|
u32 period;
|
2020-05-30 21:09:48 +03:00
|
|
|
bool monitor;
|
2020-04-26 16:22:03 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
#define SWITCHDEV_OBJ_RING_TEST_MRP(OBJ) \
|
|
|
|
container_of((OBJ), struct switchdev_obj_ring_test_mrp, obj)
|
|
|
|
|
|
|
|
/* SWICHDEV_OBJ_ID_RING_ROLE_MRP */
|
|
|
|
struct switchdev_obj_ring_role_mrp {
|
|
|
|
struct switchdev_obj obj;
|
|
|
|
u8 ring_role;
|
|
|
|
u32 ring_id;
|
2021-02-17 00:41:59 +03:00
|
|
|
u8 sw_backup;
|
2020-04-26 16:22:03 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
#define SWITCHDEV_OBJ_RING_ROLE_MRP(OBJ) \
|
|
|
|
container_of((OBJ), struct switchdev_obj_ring_role_mrp, obj)
|
|
|
|
|
|
|
|
struct switchdev_obj_ring_state_mrp {
|
|
|
|
struct switchdev_obj obj;
|
|
|
|
u8 ring_state;
|
|
|
|
u32 ring_id;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define SWITCHDEV_OBJ_RING_STATE_MRP(OBJ) \
|
|
|
|
container_of((OBJ), struct switchdev_obj_ring_state_mrp, obj)
|
|
|
|
|
2020-07-14 10:34:47 +03:00
|
|
|
/* SWITCHDEV_OBJ_ID_IN_TEST_MRP */
|
|
|
|
struct switchdev_obj_in_test_mrp {
|
|
|
|
struct switchdev_obj obj;
|
|
|
|
/* The value is in us and a value of 0 represents to stop */
|
|
|
|
u32 interval;
|
|
|
|
u32 in_id;
|
|
|
|
u32 period;
|
|
|
|
u8 max_miss;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define SWITCHDEV_OBJ_IN_TEST_MRP(OBJ) \
|
|
|
|
container_of((OBJ), struct switchdev_obj_in_test_mrp, obj)
|
|
|
|
|
|
|
|
/* SWICHDEV_OBJ_ID_IN_ROLE_MRP */
|
|
|
|
struct switchdev_obj_in_role_mrp {
|
|
|
|
struct switchdev_obj obj;
|
|
|
|
struct net_device *i_port;
|
|
|
|
u32 ring_id;
|
|
|
|
u16 in_id;
|
|
|
|
u8 in_role;
|
2021-02-17 00:41:59 +03:00
|
|
|
u8 sw_backup;
|
2020-07-14 10:34:47 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
#define SWITCHDEV_OBJ_IN_ROLE_MRP(OBJ) \
|
|
|
|
container_of((OBJ), struct switchdev_obj_in_role_mrp, obj)
|
|
|
|
|
|
|
|
struct switchdev_obj_in_state_mrp {
|
|
|
|
struct switchdev_obj obj;
|
|
|
|
u32 in_id;
|
|
|
|
u8 in_state;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define SWITCHDEV_OBJ_IN_STATE_MRP(OBJ) \
|
|
|
|
container_of((OBJ), struct switchdev_obj_in_state_mrp, obj)
|
|
|
|
|
2015-10-01 12:03:45 +03:00
|
|
|
typedef int switchdev_obj_dump_cb_t(struct switchdev_obj *obj);
|
|
|
|
|
2015-05-10 19:47:46 +03:00
|
|
|
enum switchdev_notifier_type {
|
2017-06-08 09:44:14 +03:00
|
|
|
SWITCHDEV_FDB_ADD_TO_BRIDGE = 1,
|
|
|
|
SWITCHDEV_FDB_DEL_TO_BRIDGE,
|
|
|
|
SWITCHDEV_FDB_ADD_TO_DEVICE,
|
|
|
|
SWITCHDEV_FDB_DEL_TO_DEVICE,
|
2017-06-08 09:44:15 +03:00
|
|
|
SWITCHDEV_FDB_OFFLOADED,
|
2020-09-10 20:23:48 +03:00
|
|
|
SWITCHDEV_FDB_FLUSH_TO_BRIDGE,
|
2018-10-17 11:53:22 +03:00
|
|
|
|
switchdev: Add SWITCHDEV_PORT_OBJ_ADD, SWITCHDEV_PORT_OBJ_DEL
An offloading driver may need to have access to switchdev events on
ports that aren't directly under its control. An example is a VXLAN port
attached to a bridge offloaded by a driver. The driver needs to know
about VLANs configured on the VXLAN device. However the VXLAN device
isn't stashed between the bridge and a front-panel-port device (such as
is the case e.g. for LAG devices), so the usual switchdev ops don't
reach the driver.
VXLAN is likely not the only device type like this: in theory any L2
tunnel device that needs offloading will prompt requirement of this
sort. This falsifies the assumption that only the lower devices of a
front panel port need to be notified to achieve flawless offloading.
A way to fix this is to give up the notion of port object addition /
deletion as a switchdev operation, which assumes somewhat tight coupling
between the message producer and consumer. And instead send the message
over a notifier chain.
To that end, introduce two new switchdev notifier types,
SWITCHDEV_PORT_OBJ_ADD and SWITCHDEV_PORT_OBJ_DEL. These notifier types
communicate the same event as the corresponding switchdev op, except in
a form of a notification. struct switchdev_notifier_port_obj_info was
added to carry the fields that the switchdev op carries. An additional
field, handled, will be used to communicate back to switchdev that the
event has reached an interested party, which will be important for the
two-phase commit.
The two switchdev operations themselves are kept in place. Following
patches first convert individual clients to the notifier protocol, and
only then are the operations removed.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Acked-by: Jiri Pirko <jiri@mellanox.com>
Reviewed-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-11-23 02:28:38 +03:00
|
|
|
SWITCHDEV_PORT_OBJ_ADD, /* Blocking. */
|
|
|
|
SWITCHDEV_PORT_OBJ_DEL, /* Blocking. */
|
2019-02-27 22:44:25 +03:00
|
|
|
SWITCHDEV_PORT_ATTR_SET, /* May be blocking . */
|
switchdev: Add SWITCHDEV_PORT_OBJ_ADD, SWITCHDEV_PORT_OBJ_DEL
An offloading driver may need to have access to switchdev events on
ports that aren't directly under its control. An example is a VXLAN port
attached to a bridge offloaded by a driver. The driver needs to know
about VLANs configured on the VXLAN device. However the VXLAN device
isn't stashed between the bridge and a front-panel-port device (such as
is the case e.g. for LAG devices), so the usual switchdev ops don't
reach the driver.
VXLAN is likely not the only device type like this: in theory any L2
tunnel device that needs offloading will prompt requirement of this
sort. This falsifies the assumption that only the lower devices of a
front panel port need to be notified to achieve flawless offloading.
A way to fix this is to give up the notion of port object addition /
deletion as a switchdev operation, which assumes somewhat tight coupling
between the message producer and consumer. And instead send the message
over a notifier chain.
To that end, introduce two new switchdev notifier types,
SWITCHDEV_PORT_OBJ_ADD and SWITCHDEV_PORT_OBJ_DEL. These notifier types
communicate the same event as the corresponding switchdev op, except in
a form of a notification. struct switchdev_notifier_port_obj_info was
added to carry the fields that the switchdev op carries. An additional
field, handled, will be used to communicate back to switchdev that the
event has reached an interested party, which will be important for the
two-phase commit.
The two switchdev operations themselves are kept in place. Following
patches first convert individual clients to the notifier protocol, and
only then are the operations removed.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Acked-by: Jiri Pirko <jiri@mellanox.com>
Reviewed-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-11-23 02:28:38 +03:00
|
|
|
|
2018-11-21 11:02:39 +03:00
|
|
|
SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE,
|
|
|
|
SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE,
|
2018-10-17 11:53:22 +03:00
|
|
|
SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE,
|
|
|
|
SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE,
|
2018-10-17 11:53:26 +03:00
|
|
|
SWITCHDEV_VXLAN_FDB_OFFLOADED,
|
2015-01-16 01:49:37 +03:00
|
|
|
};
|
|
|
|
|
2015-05-10 19:47:46 +03:00
|
|
|
struct switchdev_notifier_info {
|
2015-01-16 01:49:36 +03:00
|
|
|
struct net_device *dev;
|
2018-12-12 20:02:54 +03:00
|
|
|
struct netlink_ext_ack *extack;
|
2015-01-16 01:49:36 +03:00
|
|
|
};
|
|
|
|
|
2015-05-10 19:47:46 +03:00
|
|
|
struct switchdev_notifier_fdb_info {
|
|
|
|
struct switchdev_notifier_info info; /* must be first */
|
2015-01-16 01:49:37 +03:00
|
|
|
const unsigned char *addr;
|
|
|
|
u16 vid;
|
2018-10-17 11:53:29 +03:00
|
|
|
u8 added_by_user:1,
|
|
|
|
offloaded:1;
|
2015-01-16 01:49:37 +03:00
|
|
|
};
|
|
|
|
|
switchdev: Add SWITCHDEV_PORT_OBJ_ADD, SWITCHDEV_PORT_OBJ_DEL
An offloading driver may need to have access to switchdev events on
ports that aren't directly under its control. An example is a VXLAN port
attached to a bridge offloaded by a driver. The driver needs to know
about VLANs configured on the VXLAN device. However the VXLAN device
isn't stashed between the bridge and a front-panel-port device (such as
is the case e.g. for LAG devices), so the usual switchdev ops don't
reach the driver.
VXLAN is likely not the only device type like this: in theory any L2
tunnel device that needs offloading will prompt requirement of this
sort. This falsifies the assumption that only the lower devices of a
front panel port need to be notified to achieve flawless offloading.
A way to fix this is to give up the notion of port object addition /
deletion as a switchdev operation, which assumes somewhat tight coupling
between the message producer and consumer. And instead send the message
over a notifier chain.
To that end, introduce two new switchdev notifier types,
SWITCHDEV_PORT_OBJ_ADD and SWITCHDEV_PORT_OBJ_DEL. These notifier types
communicate the same event as the corresponding switchdev op, except in
a form of a notification. struct switchdev_notifier_port_obj_info was
added to carry the fields that the switchdev op carries. An additional
field, handled, will be used to communicate back to switchdev that the
event has reached an interested party, which will be important for the
two-phase commit.
The two switchdev operations themselves are kept in place. Following
patches first convert individual clients to the notifier protocol, and
only then are the operations removed.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Acked-by: Jiri Pirko <jiri@mellanox.com>
Reviewed-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-11-23 02:28:38 +03:00
|
|
|
struct switchdev_notifier_port_obj_info {
|
|
|
|
struct switchdev_notifier_info info; /* must be first */
|
|
|
|
const struct switchdev_obj *obj;
|
|
|
|
bool handled;
|
|
|
|
};
|
|
|
|
|
2019-02-27 22:44:25 +03:00
|
|
|
struct switchdev_notifier_port_attr_info {
|
|
|
|
struct switchdev_notifier_info info; /* must be first */
|
|
|
|
const struct switchdev_attr *attr;
|
|
|
|
bool handled;
|
|
|
|
};
|
|
|
|
|
2015-01-16 01:49:36 +03:00
|
|
|
static inline struct net_device *
|
2015-05-10 19:47:46 +03:00
|
|
|
switchdev_notifier_info_to_dev(const struct switchdev_notifier_info *info)
|
2015-01-16 01:49:36 +03:00
|
|
|
{
|
|
|
|
return info->dev;
|
|
|
|
}
|
2014-11-28 16:34:17 +03:00
|
|
|
|
2018-12-12 20:02:54 +03:00
|
|
|
static inline struct netlink_ext_ack *
|
|
|
|
switchdev_notifier_info_to_extack(const struct switchdev_notifier_info *info)
|
|
|
|
{
|
|
|
|
return info->extack;
|
|
|
|
}
|
|
|
|
|
2014-11-28 16:34:17 +03:00
|
|
|
#ifdef CONFIG_NET_SWITCHDEV
|
|
|
|
|
2015-10-14 20:40:48 +03:00
|
|
|
void switchdev_deferred_process(void);
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-10 19:47:48 +03:00
|
|
|
int switchdev_port_attr_set(struct net_device *dev,
|
2021-02-13 23:43:17 +03:00
|
|
|
const struct switchdev_attr *attr,
|
|
|
|
struct netlink_ext_ack *extack);
|
2015-10-01 12:03:46 +03:00
|
|
|
int switchdev_port_obj_add(struct net_device *dev,
|
2018-12-12 20:02:52 +03:00
|
|
|
const struct switchdev_obj *obj,
|
|
|
|
struct netlink_ext_ack *extack);
|
2015-10-01 12:03:46 +03:00
|
|
|
int switchdev_port_obj_del(struct net_device *dev,
|
2015-10-01 12:03:45 +03:00
|
|
|
const struct switchdev_obj *obj);
|
2018-11-23 02:28:25 +03:00
|
|
|
|
2015-05-10 19:47:46 +03:00
|
|
|
int register_switchdev_notifier(struct notifier_block *nb);
|
|
|
|
int unregister_switchdev_notifier(struct notifier_block *nb);
|
|
|
|
int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
|
2019-01-17 02:06:56 +03:00
|
|
|
struct switchdev_notifier_info *info,
|
|
|
|
struct netlink_ext_ack *extack);
|
2018-11-23 02:28:25 +03:00
|
|
|
|
|
|
|
int register_switchdev_blocking_notifier(struct notifier_block *nb);
|
|
|
|
int unregister_switchdev_blocking_notifier(struct notifier_block *nb);
|
|
|
|
int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
|
2018-12-12 20:02:54 +03:00
|
|
|
struct switchdev_notifier_info *info,
|
|
|
|
struct netlink_ext_ack *extack);
|
2018-11-23 02:28:25 +03:00
|
|
|
|
2015-07-19 04:24:50 +03:00
|
|
|
void switchdev_port_fwd_mark_set(struct net_device *dev,
|
|
|
|
struct net_device *group_dev,
|
|
|
|
bool joining);
|
2015-03-06 08:21:15 +03:00
|
|
|
|
2018-11-23 02:29:44 +03:00
|
|
|
int switchdev_handle_port_obj_add(struct net_device *dev,
|
|
|
|
struct switchdev_notifier_port_obj_info *port_obj_info,
|
|
|
|
bool (*check_cb)(const struct net_device *dev),
|
|
|
|
int (*add_cb)(struct net_device *dev,
|
|
|
|
const struct switchdev_obj *obj,
|
2018-12-12 20:02:56 +03:00
|
|
|
struct netlink_ext_ack *extack));
|
2018-11-23 02:29:44 +03:00
|
|
|
int switchdev_handle_port_obj_del(struct net_device *dev,
|
|
|
|
struct switchdev_notifier_port_obj_info *port_obj_info,
|
|
|
|
bool (*check_cb)(const struct net_device *dev),
|
|
|
|
int (*del_cb)(struct net_device *dev,
|
|
|
|
const struct switchdev_obj *obj));
|
|
|
|
|
2019-02-27 22:44:25 +03:00
|
|
|
int switchdev_handle_port_attr_set(struct net_device *dev,
|
|
|
|
struct switchdev_notifier_port_attr_info *port_attr_info,
|
|
|
|
bool (*check_cb)(const struct net_device *dev),
|
|
|
|
int (*set_cb)(struct net_device *dev,
|
2021-02-12 18:15:51 +03:00
|
|
|
const struct switchdev_attr *attr,
|
|
|
|
struct netlink_ext_ack *extack));
|
2014-11-28 16:34:17 +03:00
|
|
|
#else
|
|
|
|
|
2015-10-14 20:40:48 +03:00
|
|
|
static inline void switchdev_deferred_process(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-10 19:47:48 +03:00
|
|
|
static inline int switchdev_port_attr_set(struct net_device *dev,
|
2021-02-16 00:09:11 +03:00
|
|
|
const struct switchdev_attr *attr,
|
|
|
|
struct netlink_ext_ack *extack)
|
switchdev: introduce get/set attrs ops
Add two new swdev ops for get/set switch port attributes. Most swdev
interactions on a port are gets or sets on port attributes, so rather than
adding ops for each attribute, let's define clean get/set ops for all
attributes, and then we can have clear, consistent rules on how attributes
propagate on stacked devs.
Add the basic algorithms for get/set attr ops. Use the same recusive algo
to walk lower devs we've used for STP updates, for example. For get,
compare attr value for each lower dev and only return success if attr
values match across all lower devs. For sets, set the same attr value for
all lower devs. We'll use a two-phase prepare-commit transaction model for
sets. In the first phase, the driver(s) are asked if attr set is OK. If
all OK, the commit attr set in second phase. A driver would NACK the
prepare phase if it can't set the attr due to lack of resources or support,
within it's control. RTNL lock must be held across both phases because
we'll recurse all lower devs first in prepare phase, and then recurse all
lower devs again in commit phase. If any lower dev fails the prepare
phase, we need to abort the transaction for all lower devs.
If lower dev recusion isn't desired, allow a flag SWITCHDEV_F_NO_RECURSE to
indicate get/set only work on port (lowest) device.
Signed-off-by: Scott Feldman <sfeldma@gmail.com>
Acked-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-10 19:47:48 +03:00
|
|
|
{
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2015-05-10 19:47:52 +03:00
|
|
|
static inline int switchdev_port_obj_add(struct net_device *dev,
|
2018-12-12 20:02:52 +03:00
|
|
|
const struct switchdev_obj *obj,
|
|
|
|
struct netlink_ext_ack *extack)
|
2015-05-10 19:47:52 +03:00
|
|
|
{
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int switchdev_port_obj_del(struct net_device *dev,
|
2015-10-01 12:03:45 +03:00
|
|
|
const struct switchdev_obj *obj)
|
2015-05-10 19:47:52 +03:00
|
|
|
{
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2015-05-10 19:47:46 +03:00
|
|
|
static inline int register_switchdev_notifier(struct notifier_block *nb)
|
2015-01-16 01:49:36 +03:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-05-10 19:47:46 +03:00
|
|
|
static inline int unregister_switchdev_notifier(struct notifier_block *nb)
|
2015-01-16 01:49:36 +03:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-05-10 19:47:46 +03:00
|
|
|
static inline int call_switchdev_notifiers(unsigned long val,
|
|
|
|
struct net_device *dev,
|
2019-01-17 02:06:56 +03:00
|
|
|
struct switchdev_notifier_info *info,
|
|
|
|
struct netlink_ext_ack *extack)
|
2015-01-16 01:49:36 +03:00
|
|
|
{
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
2018-11-23 02:28:25 +03:00
|
|
|
static inline int
|
|
|
|
register_switchdev_blocking_notifier(struct notifier_block *nb)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
unregister_switchdev_blocking_notifier(struct notifier_block *nb)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
call_switchdev_blocking_notifiers(unsigned long val,
|
|
|
|
struct net_device *dev,
|
2018-12-12 20:02:54 +03:00
|
|
|
struct switchdev_notifier_info *info,
|
|
|
|
struct netlink_ext_ack *extack)
|
2018-11-23 02:28:25 +03:00
|
|
|
{
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
2018-11-23 02:29:44 +03:00
|
|
|
static inline int
|
|
|
|
switchdev_handle_port_obj_add(struct net_device *dev,
|
|
|
|
struct switchdev_notifier_port_obj_info *port_obj_info,
|
|
|
|
bool (*check_cb)(const struct net_device *dev),
|
|
|
|
int (*add_cb)(struct net_device *dev,
|
|
|
|
const struct switchdev_obj *obj,
|
2018-12-12 20:02:56 +03:00
|
|
|
struct netlink_ext_ack *extack))
|
2018-11-23 02:29:44 +03:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
switchdev_handle_port_obj_del(struct net_device *dev,
|
|
|
|
struct switchdev_notifier_port_obj_info *port_obj_info,
|
|
|
|
bool (*check_cb)(const struct net_device *dev),
|
|
|
|
int (*del_cb)(struct net_device *dev,
|
|
|
|
const struct switchdev_obj *obj))
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-02-27 22:44:25 +03:00
|
|
|
static inline int
|
|
|
|
switchdev_handle_port_attr_set(struct net_device *dev,
|
|
|
|
struct switchdev_notifier_port_attr_info *port_attr_info,
|
|
|
|
bool (*check_cb)(const struct net_device *dev),
|
|
|
|
int (*set_cb)(struct net_device *dev,
|
2021-02-12 18:15:51 +03:00
|
|
|
const struct switchdev_attr *attr,
|
|
|
|
struct netlink_ext_ack *extack))
|
2019-02-27 22:44:25 +03:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2014-11-28 16:34:17 +03:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* _LINUX_SWITCHDEV_H_ */
|