sfc: give ef10 its own rwsem in the filter table instead of filter_lock

efx->filter_lock remains in place for use on farch, but EF10 now ignores it.
EFX_EF10_FILTER_FLAG_BUSY is no longer needed, hence it is removed.

Signed-off-by: Edward Cree <ecree@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Edward Cree 2018-03-27 17:42:28 +01:00 коммит произвёл David S. Miller
Родитель 3af0f34290
Коммит c2bebe37c6
3 изменённых файлов: 152 добавлений и 214 удалений

Просмотреть файл

@ -96,17 +96,15 @@ struct efx_ef10_filter_table {
MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2];
unsigned int rx_match_count;
struct rw_semaphore lock; /* Protects entries */
struct {
unsigned long spec; /* pointer to spec plus flag bits */
/* BUSY flag indicates that an update is in progress. AUTO_OLD is
* used to mark and sweep MAC filters for the device address lists.
*/
#define EFX_EF10_FILTER_FLAG_BUSY 1UL
/* AUTO_OLD is used to mark and sweep MAC filters for the device address lists. */
/* unused flag 1UL */
#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL
#define EFX_EF10_FILTER_FLAGS 3UL
u64 handle; /* firmware handle */
} *entry;
wait_queue_head_t waitq;
/* Shadow of net_device address lists, guarded by mac_lock */
struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
@ -4302,26 +4300,33 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
struct efx_filter_spec *spec,
bool replace_equal)
{
struct efx_ef10_filter_table *table = efx->filter_state;
DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
struct efx_ef10_filter_table *table;
struct efx_filter_spec *saved_spec;
struct efx_rss_context *ctx = NULL;
unsigned int match_pri, hash;
unsigned int priv_flags;
bool replacing = false;
unsigned int depth, i;
int ins_index = -1;
DEFINE_WAIT(wait);
bool is_mc_recip;
s32 rc;
down_read(&efx->filter_sem);
table = efx->filter_state;
down_write(&table->lock);
/* For now, only support RX filters */
if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
EFX_FILTER_FLAG_RX)
return -EINVAL;
EFX_FILTER_FLAG_RX) {
rc = -EINVAL;
goto out_unlock;
}
rc = efx_ef10_filter_pri(table, spec);
if (rc < 0)
return rc;
goto out_unlock;
match_pri = rc;
hash = efx_ef10_filter_hash(spec);
@ -4335,86 +4340,64 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
&efx->rss_context.list);
else
ctx = &efx->rss_context;
if (!ctx)
return -ENOENT;
if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID)
return -EOPNOTSUPP;
if (!ctx) {
rc = -ENOENT;
goto out_unlock;
}
if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) {
rc = -EOPNOTSUPP;
goto out_unlock;
}
}
/* Find any existing filters with the same match tuple or
* else a free slot to insert at. If any of them are busy,
* we have to wait and retry.
* else a free slot to insert at.
*/
for (;;) {
unsigned int depth = 1;
unsigned int i;
for (depth = 1; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
saved_spec = efx_ef10_filter_entry_spec(table, i);
spin_lock_bh(&efx->filter_lock);
for (;;) {
i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
saved_spec = efx_ef10_filter_entry_spec(table, i);
if (!saved_spec) {
if (!saved_spec) {
if (ins_index < 0)
ins_index = i;
} else if (efx_ef10_filter_equal(spec, saved_spec)) {
if (spec->priority < saved_spec->priority &&
spec->priority != EFX_FILTER_PRI_AUTO) {
rc = -EPERM;
goto out_unlock;
}
if (!is_mc_recip) {
/* This is the only one */
if (spec->priority ==
saved_spec->priority &&
!replace_equal) {
rc = -EEXIST;
goto out_unlock;
}
ins_index = i;
break;
} else if (spec->priority >
saved_spec->priority ||
(spec->priority ==
saved_spec->priority &&
replace_equal)) {
if (ins_index < 0)
ins_index = i;
} else if (efx_ef10_filter_equal(spec, saved_spec)) {
if (table->entry[i].spec &
EFX_EF10_FILTER_FLAG_BUSY)
break;
if (spec->priority < saved_spec->priority &&
spec->priority != EFX_FILTER_PRI_AUTO) {
rc = -EPERM;
goto out_unlock;
}
if (!is_mc_recip) {
/* This is the only one */
if (spec->priority ==
saved_spec->priority &&
!replace_equal) {
rc = -EEXIST;
goto out_unlock;
}
ins_index = i;
goto found;
} else if (spec->priority >
saved_spec->priority ||
(spec->priority ==
saved_spec->priority &&
replace_equal)) {
if (ins_index < 0)
ins_index = i;
else
__set_bit(depth, mc_rem_map);
}
else
__set_bit(depth, mc_rem_map);
}
/* Once we reach the maximum search depth, use
* the first suitable slot or return -EBUSY if
* there was none
*/
if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
if (ins_index < 0) {
rc = -EBUSY;
goto out_unlock;
}
goto found;
}
++depth;
}
prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
spin_unlock_bh(&efx->filter_lock);
schedule();
}
found:
/* Create a software table entry if necessary, and mark it
* busy. We might yet fail to insert, but any attempt to
* insert a conflicting filter while we're waiting for the
* firmware must find the busy entry.
/* Once we reach the maximum search depth, use the first suitable
* slot, or return -EBUSY if there was none
*/
if (ins_index < 0) {
rc = -EBUSY;
goto out_unlock;
}
/* Create a software table entry if necessary. */
saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
if (saved_spec) {
if (spec->priority == EFX_FILTER_PRI_AUTO &&
@ -4438,28 +4421,13 @@ found:
*saved_spec = *spec;
priv_flags = 0;
}
efx_ef10_filter_set_entry(table, ins_index, saved_spec,
priv_flags | EFX_EF10_FILTER_FLAG_BUSY);
/* Mark lower-priority multicast recipients busy prior to removal */
if (is_mc_recip) {
unsigned int depth, i;
for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
if (test_bit(depth, mc_rem_map))
table->entry[i].spec |=
EFX_EF10_FILTER_FLAG_BUSY;
}
}
spin_unlock_bh(&efx->filter_lock);
efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
/* Actually insert the filter on the HW */
rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
ctx, replacing);
/* Finalise the software table entry */
spin_lock_bh(&efx->filter_lock);
if (rc == 0) {
if (replacing) {
/* Update the fields that may differ */
@ -4475,6 +4443,12 @@ found:
} else if (!replacing) {
kfree(saved_spec);
saved_spec = NULL;
} else {
/* We failed to replace, so the old filter is still present.
* Roll back the software table to reflect this. In fact the
* efx_ef10_filter_set_entry() call below will do the right
* thing, so nothing extra is needed here.
*/
}
efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
@ -4496,7 +4470,6 @@ found:
priv_flags = efx_ef10_filter_entry_flags(table, i);
if (rc == 0) {
spin_unlock_bh(&efx->filter_lock);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
@ -4504,15 +4477,12 @@ found:
rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
inbuf, sizeof(inbuf),
NULL, 0, NULL);
spin_lock_bh(&efx->filter_lock);
}
if (rc == 0) {
kfree(saved_spec);
saved_spec = NULL;
priv_flags = 0;
} else {
priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY;
}
efx_ef10_filter_set_entry(table, i, saved_spec,
priv_flags);
@ -4523,10 +4493,9 @@ found:
if (rc == 0)
rc = efx_ef10_make_filter_id(match_pri, ins_index);
wake_up_all(&table->waitq);
out_unlock:
spin_unlock_bh(&efx->filter_lock);
finish_wait(&table->waitq, &wait);
up_write(&table->lock);
up_read(&efx->filter_sem);
return rc;
}
@ -4539,6 +4508,8 @@ static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
* If !by_index, remove by ID
* If by_index, remove by index
* Filter ID may come from userland and must be range-checked.
* Caller must hold efx->filter_sem for read, and efx->filter_state->lock
* for write.
*/
static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
unsigned int priority_mask,
@ -4553,45 +4524,23 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
DEFINE_WAIT(wait);
int rc;
/* Find the software table entry and mark it busy. Don't
* remove it yet; any attempt to update while we're waiting
* for the firmware must find the busy entry.
*/
for (;;) {
spin_lock_bh(&efx->filter_lock);
if (!(table->entry[filter_idx].spec &
EFX_EF10_FILTER_FLAG_BUSY))
break;
prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
spin_unlock_bh(&efx->filter_lock);
schedule();
}
spec = efx_ef10_filter_entry_spec(table, filter_idx);
if (!spec ||
(!by_index &&
efx_ef10_filter_pri(table, spec) !=
efx_ef10_filter_get_unsafe_pri(filter_id))) {
rc = -ENOENT;
goto out_unlock;
}
efx_ef10_filter_get_unsafe_pri(filter_id)))
return -ENOENT;
if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
/* Just remove flags */
spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
rc = 0;
goto out_unlock;
return 0;
}
if (!(priority_mask & (1U << spec->priority))) {
rc = -ENOENT;
goto out_unlock;
}
table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
spin_unlock_bh(&efx->filter_lock);
if (!(priority_mask & (1U << spec->priority)))
return -ENOENT;
if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
/* Reset to an automatic filter */
@ -4609,7 +4558,6 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
&efx->rss_context,
true);
spin_lock_bh(&efx->filter_lock);
if (rc == 0)
*spec = new_spec;
} else {
@ -4624,7 +4572,6 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP,
inbuf, sizeof(inbuf), NULL, 0, NULL);
spin_lock_bh(&efx->filter_lock);
if ((rc == 0) || (rc == -ENOENT)) {
/* Filter removed OK or didn't actually exist */
kfree(spec);
@ -4636,11 +4583,6 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
}
}
table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
wake_up_all(&table->waitq);
out_unlock:
spin_unlock_bh(&efx->filter_lock);
finish_wait(&table->waitq, &wait);
return rc;
}
@ -4648,17 +4590,33 @@ static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id)
{
return efx_ef10_filter_remove_internal(efx, 1U << priority,
filter_id, false);
struct efx_ef10_filter_table *table;
int rc;
down_read(&efx->filter_sem);
table = efx->filter_state;
down_write(&table->lock);
rc = efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id,
false);
up_write(&table->lock);
up_read(&efx->filter_sem);
return rc;
}
/* Caller must hold efx->filter_sem for read */
static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id)
{
struct efx_ef10_filter_table *table = efx->filter_state;
if (filter_id == EFX_EF10_FILTER_ID_INVALID)
return;
efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id, true);
down_write(&table->lock);
efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id,
true);
up_write(&table->lock);
}
static int efx_ef10_filter_get_safe(struct efx_nic *efx,
@ -4666,11 +4624,13 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx,
u32 filter_id, struct efx_filter_spec *spec)
{
unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id);
struct efx_ef10_filter_table *table = efx->filter_state;
const struct efx_filter_spec *saved_spec;
struct efx_ef10_filter_table *table;
int rc;
spin_lock_bh(&efx->filter_lock);
down_read(&efx->filter_sem);
table = efx->filter_state;
down_read(&table->lock);
saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
if (saved_spec && saved_spec->priority == priority &&
efx_ef10_filter_pri(table, saved_spec) ==
@ -4680,13 +4640,15 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx,
} else {
rc = -ENOENT;
}
spin_unlock_bh(&efx->filter_lock);
up_read(&table->lock);
up_read(&efx->filter_sem);
return rc;
}
static int efx_ef10_filter_clear_rx(struct efx_nic *efx,
enum efx_filter_priority priority)
enum efx_filter_priority priority)
{
struct efx_ef10_filter_table *table;
unsigned int priority_mask;
unsigned int i;
int rc;
@ -4694,31 +4656,40 @@ static int efx_ef10_filter_clear_rx(struct efx_nic *efx,
priority_mask = (((1U << (priority + 1)) - 1) &
~(1U << EFX_FILTER_PRI_AUTO));
down_read(&efx->filter_sem);
table = efx->filter_state;
down_write(&table->lock);
for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
rc = efx_ef10_filter_remove_internal(efx, priority_mask,
i, true);
if (rc && rc != -ENOENT)
return rc;
break;
rc = 0;
}
return 0;
up_write(&table->lock);
up_read(&efx->filter_sem);
return rc;
}
static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
enum efx_filter_priority priority)
{
struct efx_ef10_filter_table *table = efx->filter_state;
struct efx_ef10_filter_table *table;
unsigned int filter_idx;
s32 count = 0;
spin_lock_bh(&efx->filter_lock);
down_read(&efx->filter_sem);
table = efx->filter_state;
down_read(&table->lock);
for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
if (table->entry[filter_idx].spec &&
efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
priority)
++count;
}
spin_unlock_bh(&efx->filter_lock);
up_read(&table->lock);
up_read(&efx->filter_sem);
return count;
}
@ -4733,12 +4704,15 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 *buf, u32 size)
{
struct efx_ef10_filter_table *table = efx->filter_state;
struct efx_ef10_filter_table *table;
struct efx_filter_spec *spec;
unsigned int filter_idx;
s32 count = 0;
spin_lock_bh(&efx->filter_lock);
down_read(&efx->filter_sem);
table = efx->filter_state;
down_read(&table->lock);
for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
spec = efx_ef10_filter_entry_spec(table, filter_idx);
if (spec && spec->priority == priority) {
@ -4752,73 +4726,44 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
filter_idx);
}
}
spin_unlock_bh(&efx->filter_lock);
up_read(&table->lock);
up_read(&efx->filter_sem);
return count;
}
#ifdef CONFIG_RFS_ACCEL
static void
efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
unsigned long filter_idx,
int rc, efx_dword_t *outbuf,
size_t outlen_actual);
static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
unsigned int filter_idx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
struct efx_ef10_filter_table *table;
struct efx_filter_spec *spec;
MCDI_DECLARE_BUF(inbuf,
MC_CMD_FILTER_OP_IN_HANDLE_OFST +
MC_CMD_FILTER_OP_IN_HANDLE_LEN);
bool ret = true;
bool ret;
spin_lock_bh(&efx->filter_lock);
down_read(&efx->filter_sem);
table = efx->filter_state;
down_write(&table->lock);
spec = efx_ef10_filter_entry_spec(table, filter_idx);
if (!spec ||
(table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) ||
spec->priority != EFX_FILTER_PRI_HINT ||
!rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
if (!spec || spec->priority != EFX_FILTER_PRI_HINT) {
ret = true;
goto out_unlock;
}
if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
flow_id, filter_idx)) {
ret = false;
goto out_unlock;
}
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
MC_CMD_FILTER_OP_IN_OP_REMOVE);
MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
table->entry[filter_idx].handle);
if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0,
efx_ef10_filter_rfs_expire_complete, filter_idx))
ret = false;
else
table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority,
filter_idx, true) == 0;
out_unlock:
spin_unlock_bh(&efx->filter_lock);
up_write(&table->lock);
up_read(&efx->filter_sem);
return ret;
}
static void
efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
unsigned long filter_idx,
int rc, efx_dword_t *outbuf,
size_t outlen_actual)
{
struct efx_ef10_filter_table *table = efx->filter_state;
struct efx_filter_spec *spec =
efx_ef10_filter_entry_spec(table, filter_idx);
spin_lock_bh(&efx->filter_lock);
if (rc == 0) {
kfree(spec);
efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
}
table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
wake_up_all(&table->waitq);
spin_unlock_bh(&efx->filter_lock);
}
#endif /* CONFIG_RFS_ACCEL */
static int efx_ef10_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags)
@ -5011,9 +4956,9 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx)
table->vlan_filter =
!!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
INIT_LIST_HEAD(&table->vlan_list);
init_rwsem(&table->lock);
efx->filter_state = table;
init_waitqueue_head(&table->waitq);
list_for_each_entry(vlan, &nic_data->vlan_list, list) {
rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
@ -5055,7 +5000,7 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
if (!table)
return;
spin_lock_bh(&efx->filter_lock);
down_write(&table->lock);
for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
spec = efx_ef10_filter_entry_spec(table, filter_idx);
@ -5093,15 +5038,11 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
}
}
table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
spin_unlock_bh(&efx->filter_lock);
rc = efx_ef10_filter_push(efx, spec,
&table->entry[filter_idx].handle,
ctx, false);
if (rc)
failed++;
spin_lock_bh(&efx->filter_lock);
if (rc) {
not_restored:
@ -5113,13 +5054,10 @@ not_restored:
kfree(spec);
efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
} else {
table->entry[filter_idx].spec &=
~EFX_EF10_FILTER_FLAG_BUSY;
}
}
spin_unlock_bh(&efx->filter_lock);
up_write(&table->lock);
/* This can happen validly if the MC's capabilities have changed, so
* is not an error.
@ -5187,6 +5125,8 @@ static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id)
struct efx_ef10_filter_table *table = efx->filter_state;
unsigned int filter_idx;
efx_rwsem_assert_write_locked(&table->lock);
if (*id != EFX_EF10_FILTER_ID_INVALID) {
filter_idx = efx_ef10_filter_get_unsafe_id(*id);
if (!table->entry[filter_idx].spec)
@ -5222,10 +5162,10 @@ static void efx_ef10_filter_mark_old(struct efx_nic *efx)
struct efx_ef10_filter_table *table = efx->filter_state;
struct efx_ef10_filter_vlan *vlan;
spin_lock_bh(&efx->filter_lock);
down_write(&table->lock);
list_for_each_entry(vlan, &table->vlan_list, list)
_efx_ef10_filter_vlan_mark_old(efx, vlan);
spin_unlock_bh(&efx->filter_lock);
up_write(&table->lock);
}
static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx)
@ -5502,10 +5442,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
return rc;
}
/* Remove filters that weren't renewed. Since nothing else changes the AUTO_OLD
* flag or removes these filters, we don't need to hold the filter_lock while
* scanning for these filters.
*/
/* Remove filters that weren't renewed. */
static void efx_ef10_filter_remove_old(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
@ -5514,6 +5451,7 @@ static void efx_ef10_filter_remove_old(struct efx_nic *efx)
int rc;
int i;
down_write(&table->lock);
for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
if (READ_ONCE(table->entry[i].spec) &
EFX_EF10_FILTER_FLAG_AUTO_OLD) {
@ -5525,6 +5463,7 @@ static void efx_ef10_filter_remove_old(struct efx_nic *efx)
remove_failed++;
}
}
up_write(&table->lock);
if (remove_failed)
netif_info(efx, drv, efx->net_dev,

Просмотреть файл

@ -1783,7 +1783,6 @@ static int efx_probe_filters(struct efx_nic *efx)
{
int rc;
spin_lock_init(&efx->filter_lock);
init_rwsem(&efx->filter_sem);
mutex_lock(&efx->mac_lock);
down_write(&efx->filter_sem);

Просмотреть файл

@ -843,7 +843,7 @@ struct efx_rss_context {
* @loopback_mode: Loopback status
* @loopback_modes: Supported loopback mode bitmask
* @loopback_selftest: Offline self-test private state
* @filter_sem: Filter table rw_semaphore, for freeing the table
* @filter_sem: Filter table rw_semaphore, protects existence of @filter_state
* @filter_lock: Filter table lock, for mere content changes
* @filter_state: Architecture-dependent filter table state
* @rps_mutex: Protects RPS state of all channels