Huazhong Tan says:

====================
net: hns3: fixes for -net

There are some bugfixes for the HNS3 ethernet driver. patch#1 fixes
a desc filling bug, patch#2 fixes a false TX timeout issue, and
patch#3~#5 fixes some bugs related to VLAN and FD.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2020-07-28 12:54:48 -07:00
Родитель 181964e619 b7b5d25bdd
Коммит 7a9212d178
3 изменённых файлов: 52 добавлений и 39 удалений

Просмотреть файл

@ -1093,16 +1093,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
int k, sizeoflast;
dma_addr_t dma;
if (type == DESC_TYPE_SKB) {
struct sk_buff *skb = (struct sk_buff *)priv;
int ret;
ret = hns3_fill_skb_desc(ring, skb, desc);
if (unlikely(ret < 0))
return ret;
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
} else if (type == DESC_TYPE_FRAGLIST_SKB) {
if (type == DESC_TYPE_FRAGLIST_SKB ||
type == DESC_TYPE_SKB) {
struct sk_buff *skb = (struct sk_buff *)priv;
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
@ -1439,6 +1431,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
next_to_use_head = ring->next_to_use;
ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use]);
if (unlikely(ret < 0))
goto fill_err;
ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
if (unlikely(ret < 0))
goto fill_err;
@ -4140,8 +4136,8 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
return;
if (linkup) {
netif_carrier_on(netdev);
netif_tx_wake_all_queues(netdev);
netif_carrier_on(netdev);
if (netif_msg_link(handle))
netdev_info(netdev, "link up\n");
} else {

Просмотреть файл

@ -5806,9 +5806,9 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
/* to avoid rule conflict, when user configure rule by ethtool,
* we need to clear all arfs rules
*/
spin_lock_bh(&hdev->fd_rule_lock);
hclge_clear_arfs_rules(handle);
spin_lock_bh(&hdev->fd_rule_lock);
ret = hclge_fd_config_rule(hdev, rule);
spin_unlock_bh(&hdev->fd_rule_lock);
@ -5851,6 +5851,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
return ret;
}
/* make sure being called after lock up with fd_rule_lock */
static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
bool clear_list)
{
@ -5863,7 +5864,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
if (!hnae3_dev_fd_supported(hdev))
return;
spin_lock_bh(&hdev->fd_rule_lock);
for_each_set_bit(location, hdev->fd_bmap,
hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
@ -5880,8 +5880,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
bitmap_zero(hdev->fd_bmap,
hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
}
spin_unlock_bh(&hdev->fd_rule_lock);
}
static int hclge_restore_fd_entries(struct hnae3_handle *handle)
@ -6263,7 +6261,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
u16 flow_id, struct flow_keys *fkeys)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_fd_rule_tuples new_tuples;
struct hclge_fd_rule_tuples new_tuples = {};
struct hclge_dev *hdev = vport->back;
struct hclge_fd_rule *rule;
u16 tmp_queue_id;
@ -6273,19 +6271,17 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
if (!hnae3_dev_fd_supported(hdev))
return -EOPNOTSUPP;
memset(&new_tuples, 0, sizeof(new_tuples));
hclge_fd_get_flow_tuples(fkeys, &new_tuples);
spin_lock_bh(&hdev->fd_rule_lock);
/* when there is already fd rule existed add by user,
* arfs should not work
*/
spin_lock_bh(&hdev->fd_rule_lock);
if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
spin_unlock_bh(&hdev->fd_rule_lock);
return -EOPNOTSUPP;
}
hclge_fd_get_flow_tuples(fkeys, &new_tuples);
/* check is there flow director filter existed for this flow,
* if not, create a new filter for it;
* if filter exist with different queue id, modify the filter;
@ -6368,6 +6364,7 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
#endif
}
/* make sure being called after lock up with fd_rule_lock */
static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
{
#ifdef CONFIG_RFS_ACCEL
@ -6420,10 +6417,14 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
hdev->fd_en = enable;
clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
if (!enable)
if (!enable) {
spin_lock_bh(&hdev->fd_rule_lock);
hclge_del_all_fd_entries(handle, clear);
else
spin_unlock_bh(&hdev->fd_rule_lock);
} else {
hclge_restore_fd_entries(handle);
}
}
static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
@ -6886,8 +6887,9 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
int i;
set_bit(HCLGE_STATE_DOWN, &hdev->state);
spin_lock_bh(&hdev->fd_rule_lock);
hclge_clear_arfs_rules(handle);
spin_unlock_bh(&hdev->fd_rule_lock);
/* If it is not PF reset, the firmware will disable the MAC,
* so it only need to stop phy here.
@ -9040,11 +9042,12 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
bool writen_to_tbl = false;
int ret = 0;
/* When device is resetting, firmware is unable to handle
* mailbox. Just record the vlan id, and remove it after
/* When device is resetting or reset failed, firmware is unable to
* handle mailbox. Just record the vlan id, and remove it after
* reset finished.
*/
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
set_bit(vlan_id, vport->vlan_del_fail_bmap);
return -EBUSY;
}

Просмотреть файл

@ -1592,11 +1592,12 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
if (proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
/* When device is resetting, firmware is unable to handle
* mailbox. Just record the vlan id, and remove it after
/* When device is resetting or reset failed, firmware is unable to
* handle mailbox. Just record the vlan id, and remove it after
* reset finished.
*/
if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) && is_kill) {
if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
set_bit(vlan_id, hdev->vlan_del_fail_bmap);
return -EBUSY;
}
@ -3439,23 +3440,36 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
{
struct hnae3_handle *nic = &hdev->nic;
struct hclge_vf_to_pf_msg send_msg;
int ret;
rtnl_lock();
hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
rtnl_unlock();
if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) {
dev_warn(&hdev->pdev->dev,
"is resetting when updating port based vlan info\n");
rtnl_unlock();
return;
}
ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
if (ret) {
rtnl_unlock();
return;
}
/* send msg to PF and wait update port based vlan info */
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
HCLGE_MBX_PORT_BASE_VLAN_CFG);
memcpy(send_msg.data, port_base_vlan_info, data_size);
hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
if (!ret) {
if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
nic->port_base_vlan_state = state;
else
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
}
if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
else
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
rtnl_lock();
hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
rtnl_unlock();
}