net: bridge: use READ_ONCE() and WRITE_ONCE() compiler barriers for fdb->dst

Annotate the writer side of fdb->dst:

- fdb_create()
- br_fdb_update()
- fdb_add_entry()
- br_fdb_external_learn_add()

with WRITE_ONCE() and the reader side:

- br_fdb_test_addr()
- br_fdb_update()
- fdb_fill_info()
- fdb_add_entry()
- fdb_delete_by_addr_and_port()
- br_fdb_external_learn_add()
- br_switchdev_fdb_notify()

with compiler barriers such that the readers do not attempt to reload
fdb->dst multiple times, leading to potentially different destination
ports when the fdb entry is updated concurrently.

This is especially important in read-side sections where fdb->dst is
used more than once, but let's convert all accesses for the sake of
uniformity.

Suggested-by: Nikolay Aleksandrov <nikolay@nvidia.com>
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Vladimir Oltean 2021-06-29 17:06:44 +03:00 коммит произвёл David S. Miller
Родитель 84fe73996c
Коммит 3e19ae7c6f
2 изменённых файлов: 21 добавлений и 14 удалений

Просмотреть файл

@ -440,9 +440,14 @@ int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
if (!port) if (!port)
ret = 0; ret = 0;
else { else {
const struct net_bridge_port *dst = NULL;
fdb = br_fdb_find_rcu(port->br, addr, 0); fdb = br_fdb_find_rcu(port->br, addr, 0);
ret = fdb && fdb->dst && fdb->dst->dev != dev && if (fdb)
fdb->dst->state == BR_STATE_FORWARDING; dst = READ_ONCE(fdb->dst);
ret = dst && dst->dev != dev &&
dst->state == BR_STATE_FORWARDING;
} }
rcu_read_unlock(); rcu_read_unlock();
@ -509,7 +514,7 @@ static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br,
fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC); fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
if (fdb) { if (fdb) {
memcpy(fdb->key.addr.addr, addr, ETH_ALEN); memcpy(fdb->key.addr.addr, addr, ETH_ALEN);
fdb->dst = source; WRITE_ONCE(fdb->dst, source);
fdb->key.vlan_id = vid; fdb->key.vlan_id = vid;
fdb->flags = flags; fdb->flags = flags;
fdb->updated = fdb->used = jiffies; fdb->updated = fdb->used = jiffies;
@ -600,10 +605,10 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
} }
/* fastpath: update of existing entry */ /* fastpath: update of existing entry */
if (unlikely(source != fdb->dst && if (unlikely(source != READ_ONCE(fdb->dst) &&
!test_bit(BR_FDB_STICKY, &fdb->flags))) { !test_bit(BR_FDB_STICKY, &fdb->flags))) {
br_switchdev_fdb_notify(fdb, RTM_DELNEIGH); br_switchdev_fdb_notify(fdb, RTM_DELNEIGH);
fdb->dst = source; WRITE_ONCE(fdb->dst, source);
fdb_modified = true; fdb_modified = true;
/* Take over HW learned entry */ /* Take over HW learned entry */
if (unlikely(test_bit(BR_FDB_ADDED_BY_EXT_LEARN, if (unlikely(test_bit(BR_FDB_ADDED_BY_EXT_LEARN,
@ -650,6 +655,7 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
const struct net_bridge_fdb_entry *fdb, const struct net_bridge_fdb_entry *fdb,
u32 portid, u32 seq, int type, unsigned int flags) u32 portid, u32 seq, int type, unsigned int flags)
{ {
const struct net_bridge_port *dst = READ_ONCE(fdb->dst);
unsigned long now = jiffies; unsigned long now = jiffies;
struct nda_cacheinfo ci; struct nda_cacheinfo ci;
struct nlmsghdr *nlh; struct nlmsghdr *nlh;
@ -665,7 +671,7 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
ndm->ndm_pad2 = 0; ndm->ndm_pad2 = 0;
ndm->ndm_flags = 0; ndm->ndm_flags = 0;
ndm->ndm_type = 0; ndm->ndm_type = 0;
ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex; ndm->ndm_ifindex = dst ? dst->dev->ifindex : br->dev->ifindex;
ndm->ndm_state = fdb_to_nud(br, fdb); ndm->ndm_state = fdb_to_nud(br, fdb);
if (test_bit(BR_FDB_OFFLOADED, &fdb->flags)) if (test_bit(BR_FDB_OFFLOADED, &fdb->flags))
@ -964,8 +970,8 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
if (flags & NLM_F_EXCL) if (flags & NLM_F_EXCL)
return -EEXIST; return -EEXIST;
if (fdb->dst != source) { if (READ_ONCE(fdb->dst) != source) {
fdb->dst = source; WRITE_ONCE(fdb->dst, source);
modified = true; modified = true;
} }
} }
@ -1132,7 +1138,7 @@ static int fdb_delete_by_addr_and_port(struct net_bridge *br,
struct net_bridge_fdb_entry *fdb; struct net_bridge_fdb_entry *fdb;
fdb = br_fdb_find(br, addr, vlan); fdb = br_fdb_find(br, addr, vlan);
if (!fdb || fdb->dst != p) if (!fdb || READ_ONCE(fdb->dst) != p)
return -ENOENT; return -ENOENT;
fdb_delete(br, fdb, true); fdb_delete(br, fdb, true);
@ -1281,8 +1287,8 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
} else { } else {
fdb->updated = jiffies; fdb->updated = jiffies;
if (fdb->dst != p) { if (READ_ONCE(fdb->dst) != p) {
fdb->dst = p; WRITE_ONCE(fdb->dst, p);
modified = true; modified = true;
} }

Просмотреть файл

@ -110,6 +110,7 @@ int br_switchdev_set_port_flag(struct net_bridge_port *p,
void void
br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type) br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
{ {
const struct net_bridge_port *dst = READ_ONCE(fdb->dst);
struct switchdev_notifier_fdb_info info = { struct switchdev_notifier_fdb_info info = {
.addr = fdb->key.addr.addr, .addr = fdb->key.addr.addr,
.vid = fdb->key.vlan_id, .vid = fdb->key.vlan_id,
@ -118,17 +119,17 @@ br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
.offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags), .offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags),
}; };
if (!fdb->dst) if (!dst)
return; return;
switch (type) { switch (type) {
case RTM_DELNEIGH: case RTM_DELNEIGH:
call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_DEVICE, call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_DEVICE,
fdb->dst->dev, &info.info, NULL); dst->dev, &info.info, NULL);
break; break;
case RTM_NEWNEIGH: case RTM_NEWNEIGH:
call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_DEVICE, call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_DEVICE,
fdb->dst->dev, &info.info, NULL); dst->dev, &info.info, NULL);
break; break;
} }
} }