Merge branch '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue
Jeff Kirsher says: ==================== 10GbE Intel Wired LAN Driver Updates 2018-05-17 This series contains updates to ixgbe, ixgbevf and ice drivers. Cathy Zhou resolves sparse warnings by using the force attribute. Mauro S M Rodrigues fixes a bug where IRQs were not freed if a PCI error recovery system opts to remove the device which causes ixgbe_io_error_detected() to return PCI_ERS_RESULT_DISCONNECT before calling ixgbe_close_suspend() which results in IRQs not freed and crashing when the remove handler calls pci_disable_device(). Resolved this by calling ixgbe_close_suspend() before evaluating the PCI channel state. Pavel Tatashin releases the rtnl_lock during the call to ixgbe_close_suspend() to allow scaling if device_shutdown() is multi-threaded. Emil modifies ixgbe to not validate the MAC address during a reset, unless the MAC was set on the host so that the VF will get a new MAC address every time it reloads. Also updates ixgbevf to set hw->mac.perm_addr in order to retain the custom MAC on a reset. Anirudh updates the ice NVM read/erase/update AQ commands to align with the latest specification. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Коммит
56a9a9e737
|
@ -1049,7 +1049,9 @@ struct ice_aqc_set_event_mask {
|
|||
* NVM Update commands (indirect 0x0703)
|
||||
*/
|
||||
struct ice_aqc_nvm {
|
||||
u8 cmd_flags;
|
||||
__le16 offset_low;
|
||||
u8 offset_high;
|
||||
u8 cmd_flags;
|
||||
#define ICE_AQC_NVM_LAST_CMD BIT(0)
|
||||
#define ICE_AQC_NVM_PCIR_REQ BIT(0) /* Used by NVM Update reply */
|
||||
#define ICE_AQC_NVM_PRESERVATION_S 1
|
||||
|
@ -1058,12 +1060,11 @@ struct ice_aqc_nvm {
|
|||
#define ICE_AQC_NVM_PRESERVE_ALL BIT(1)
|
||||
#define ICE_AQC_NVM_PRESERVE_SELECTED (3 << CSR_AQ_NVM_PRESERVATION_S)
|
||||
#define ICE_AQC_NVM_FLASH_ONLY BIT(7)
|
||||
u8 module_typeid;
|
||||
__le16 length;
|
||||
__le16 module_typeid;
|
||||
__le16 length;
|
||||
#define ICE_AQC_NVM_ERASE_LEN 0xFFFF
|
||||
__le32 offset;
|
||||
__le32 addr_high;
|
||||
__le32 addr_low;
|
||||
__le32 addr_high;
|
||||
__le32 addr_low;
|
||||
};
|
||||
|
||||
/* Get/Set RSS key (indirect 0x0B04/0x0B02) */
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
* Read the NVM using the admin queue commands (0x0701)
|
||||
*/
|
||||
static enum ice_status
|
||||
ice_aq_read_nvm(struct ice_hw *hw, u8 module_typeid, u32 offset, u16 length,
|
||||
ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
|
||||
void *data, bool last_command, struct ice_sq_cd *cd)
|
||||
{
|
||||
struct ice_aq_desc desc;
|
||||
|
@ -33,8 +33,9 @@ ice_aq_read_nvm(struct ice_hw *hw, u8 module_typeid, u32 offset, u16 length,
|
|||
/* If this is the last command in a series, set the proper flag. */
|
||||
if (last_command)
|
||||
cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD;
|
||||
cmd->module_typeid = module_typeid;
|
||||
cmd->offset = cpu_to_le32(offset);
|
||||
cmd->module_typeid = cpu_to_le16(module_typeid);
|
||||
cmd->offset_low = cpu_to_le16(offset & 0xFFFF);
|
||||
cmd->offset_high = (offset >> 16) & 0xFF;
|
||||
cmd->length = cpu_to_le16(length);
|
||||
|
||||
return ice_aq_send_cmd(hw, &desc, data, length, cd);
|
||||
|
|
|
@ -1436,7 +1436,8 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
|
|||
{
|
||||
|
||||
u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
|
||||
u32 bucket_hash = 0, hi_dword = 0;
|
||||
u32 bucket_hash = 0;
|
||||
__be32 hi_dword = 0;
|
||||
int i;
|
||||
|
||||
/* Apply masks to input data */
|
||||
|
@ -1475,7 +1476,7 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
|
|||
* Limit hash to 13 bits since max bucket count is 8K.
|
||||
* Store result at the end of the input stream.
|
||||
*/
|
||||
input->formatted.bkt_hash = bucket_hash & 0x1FFF;
|
||||
input->formatted.bkt_hash = (__force __be16)(bucket_hash & 0x1FFF);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1584,7 +1585,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
|
|||
return IXGBE_ERR_CONFIG;
|
||||
}
|
||||
|
||||
switch (input_mask->formatted.flex_bytes & 0xFFFF) {
|
||||
switch ((__force u16)input_mask->formatted.flex_bytes & 0xFFFF) {
|
||||
case 0x0000:
|
||||
/* Mask Flex Bytes */
|
||||
fdirm |= IXGBE_FDIRM_FLEX;
|
||||
|
@ -1654,13 +1655,13 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
|
|||
IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
|
||||
|
||||
/* record vlan (little-endian) and flex_bytes(big-endian) */
|
||||
fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
|
||||
fdirvlan = IXGBE_STORE_AS_BE16((__force u16)input->formatted.flex_bytes);
|
||||
fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
|
||||
fdirvlan |= ntohs(input->formatted.vlan_id);
|
||||
IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
|
||||
|
||||
/* configure FDIRHASH register */
|
||||
fdirhash = input->formatted.bkt_hash;
|
||||
fdirhash = (__force u32)input->formatted.bkt_hash;
|
||||
fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
|
||||
|
||||
|
@ -1698,7 +1699,7 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
|
|||
s32 err;
|
||||
|
||||
/* configure FDIRHASH register */
|
||||
fdirhash = input->formatted.bkt_hash;
|
||||
fdirhash = (__force u32)input->formatted.bkt_hash;
|
||||
fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
|
||||
|
||||
|
|
|
@ -3626,7 +3626,7 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
|
|||
*/
|
||||
for (i = 0; i < dword_len; i++)
|
||||
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
|
||||
i, cpu_to_le32(buffer[i]));
|
||||
i, (__force u32)cpu_to_le32(buffer[i]));
|
||||
|
||||
/* Setting this bit tells the ARC that a new command is pending. */
|
||||
IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
|
||||
|
|
|
@ -440,7 +440,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
|
|||
case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
|
||||
dma_unmap_sg(&adapter->pdev->dev, ddp->sgl,
|
||||
ddp->sgc, DMA_FROM_DEVICE);
|
||||
ddp->err = ddp_err;
|
||||
ddp->err = (__force u32)ddp_err;
|
||||
ddp->sgl = NULL;
|
||||
ddp->sgc = 0;
|
||||
/* fall through */
|
||||
|
|
|
@ -19,8 +19,9 @@ static void ixgbe_ipsec_set_tx_sa(struct ixgbe_hw *hw, u16 idx,
|
|||
int i;
|
||||
|
||||
for (i = 0; i < 4; i++)
|
||||
IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i), cpu_to_be32(key[3 - i]));
|
||||
IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, cpu_to_be32(salt));
|
||||
IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i),
|
||||
(__force u32)cpu_to_be32(key[3 - i]));
|
||||
IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, (__force u32)cpu_to_be32(salt));
|
||||
IXGBE_WRITE_FLUSH(hw);
|
||||
|
||||
reg = IXGBE_READ_REG(hw, IXGBE_IPSTXIDX);
|
||||
|
@ -69,7 +70,8 @@ static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi,
|
|||
int i;
|
||||
|
||||
/* store the SPI (in bigendian) and IPidx */
|
||||
IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, cpu_to_le32(spi));
|
||||
IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI,
|
||||
(__force u32)cpu_to_le32((__force u32)spi));
|
||||
IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, ip_idx);
|
||||
IXGBE_WRITE_FLUSH(hw);
|
||||
|
||||
|
@ -77,8 +79,9 @@ static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi,
|
|||
|
||||
/* store the key, salt, and mode */
|
||||
for (i = 0; i < 4; i++)
|
||||
IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i), cpu_to_be32(key[3 - i]));
|
||||
IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, cpu_to_be32(salt));
|
||||
IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i),
|
||||
(__force u32)cpu_to_be32(key[3 - i]));
|
||||
IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, (__force u32)cpu_to_be32(salt));
|
||||
IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, mode);
|
||||
IXGBE_WRITE_FLUSH(hw);
|
||||
|
||||
|
@ -97,7 +100,8 @@ static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[])
|
|||
|
||||
/* store the ip address */
|
||||
for (i = 0; i < 4; i++)
|
||||
IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i), cpu_to_le32(addr[i]));
|
||||
IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i),
|
||||
(__force u32)cpu_to_le32((__force u32)addr[i]));
|
||||
IXGBE_WRITE_FLUSH(hw);
|
||||
|
||||
ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_ip_tbl);
|
||||
|
@ -367,7 +371,8 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
|
|||
struct xfrm_state *ret = NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, spi)
|
||||
hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist,
|
||||
(__force u32)spi) {
|
||||
if (spi == rsa->xs->id.spi &&
|
||||
((ip4 && *daddr == rsa->xs->id.daddr.a4) ||
|
||||
(!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6,
|
||||
|
@ -377,6 +382,7 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
|
|||
xfrm_state_hold(ret);
|
||||
break;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
@ -569,7 +575,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
|
|||
|
||||
/* hash the new entry for faster search in Rx path */
|
||||
hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist,
|
||||
rsa.xs->id.spi);
|
||||
(__force u64)rsa.xs->id.spi);
|
||||
} else {
|
||||
struct tx_sa tsa;
|
||||
|
||||
|
@ -653,7 +659,8 @@ static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
|
|||
if (!ipsec->ip_tbl[ipi].ref_cnt) {
|
||||
memset(&ipsec->ip_tbl[ipi], 0,
|
||||
sizeof(struct rx_ip_sa));
|
||||
ixgbe_ipsec_set_rx_ip(hw, ipi, zerobuf);
|
||||
ixgbe_ipsec_set_rx_ip(hw, ipi,
|
||||
(__force __be32 *)zerobuf);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -727,8 +727,8 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
|
|||
ring_desc = "";
|
||||
pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p%s",
|
||||
i,
|
||||
le64_to_cpu(u0->a),
|
||||
le64_to_cpu(u0->b),
|
||||
le64_to_cpu((__force __le64)u0->a),
|
||||
le64_to_cpu((__force __le64)u0->b),
|
||||
(u64)dma_unmap_addr(tx_buffer, dma),
|
||||
dma_unmap_len(tx_buffer, len),
|
||||
tx_buffer->next_to_watch,
|
||||
|
@ -839,15 +839,15 @@ rx_ring_summary:
|
|||
/* Descriptor Done */
|
||||
pr_info("RWB[0x%03X] %016llX %016llX ---------------- %p%s\n",
|
||||
i,
|
||||
le64_to_cpu(u0->a),
|
||||
le64_to_cpu(u0->b),
|
||||
le64_to_cpu((__force __le64)u0->a),
|
||||
le64_to_cpu((__force __le64)u0->b),
|
||||
rx_buffer_info->skb,
|
||||
ring_desc);
|
||||
} else {
|
||||
pr_info("R [0x%03X] %016llX %016llX %016llX %p%s\n",
|
||||
i,
|
||||
le64_to_cpu(u0->a),
|
||||
le64_to_cpu(u0->b),
|
||||
le64_to_cpu((__force __le64)u0->a),
|
||||
le64_to_cpu((__force __le64)u0->b),
|
||||
(u64)rx_buffer_info->dma,
|
||||
rx_buffer_info->skb,
|
||||
ring_desc);
|
||||
|
@ -6698,8 +6698,15 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
|
|||
rtnl_lock();
|
||||
netif_device_detach(netdev);
|
||||
|
||||
if (netif_running(netdev))
|
||||
if (netif_running(netdev)) {
|
||||
/* Suspend takes a long time, device_shutdown may be
|
||||
* parallelized this function, so drop lock for the
|
||||
* duration of this call.
|
||||
*/
|
||||
rtnl_unlock();
|
||||
ixgbe_close_suspend(adapter);
|
||||
rtnl_lock();
|
||||
}
|
||||
|
||||
ixgbe_clear_interrupt_scheme(adapter);
|
||||
rtnl_unlock();
|
||||
|
@ -7751,7 +7758,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
|
|||
|
||||
/* remove payload length from inner checksum */
|
||||
paylen = skb->len - l4_offset;
|
||||
csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
|
||||
csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
|
||||
|
||||
/* update gso size and bytecount with header size */
|
||||
first->gso_segs = skb_shinfo(skb)->gso_segs;
|
||||
|
@ -9104,7 +9111,8 @@ static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input,
|
|||
|
||||
for (j = 0; field_ptr[j].val; j++) {
|
||||
if (field_ptr[j].off == off) {
|
||||
field_ptr[j].val(input, mask, val, m);
|
||||
field_ptr[j].val(input, mask, (__force u32)val,
|
||||
(__force u32)m);
|
||||
input->filter.formatted.flow_type |=
|
||||
field_ptr[j].type;
|
||||
found_entry = true;
|
||||
|
@ -9113,8 +9121,10 @@ static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input,
|
|||
}
|
||||
if (nexthdr) {
|
||||
if (nexthdr->off == cls->knode.sel->keys[i].off &&
|
||||
nexthdr->val == cls->knode.sel->keys[i].val &&
|
||||
nexthdr->mask == cls->knode.sel->keys[i].mask)
|
||||
nexthdr->val ==
|
||||
(__force u32)cls->knode.sel->keys[i].val &&
|
||||
nexthdr->mask ==
|
||||
(__force u32)cls->knode.sel->keys[i].mask)
|
||||
found_jump_field = true;
|
||||
else
|
||||
continue;
|
||||
|
@ -9218,7 +9228,8 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
|
|||
for (i = 0; nexthdr[i].jump; i++) {
|
||||
if (nexthdr[i].o != cls->knode.sel->offoff ||
|
||||
nexthdr[i].s != cls->knode.sel->offshift ||
|
||||
nexthdr[i].m != cls->knode.sel->offmask)
|
||||
nexthdr[i].m !=
|
||||
(__force u32)cls->knode.sel->offmask)
|
||||
return err;
|
||||
|
||||
jump = kzalloc(sizeof(*jump), GFP_KERNEL);
|
||||
|
@ -9991,7 +10002,8 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
|
|||
}
|
||||
} else {
|
||||
for (i = 0; i < adapter->num_rx_queues; i++)
|
||||
xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog);
|
||||
(void)xchg(&adapter->rx_ring[i]->xdp_prog,
|
||||
adapter->xdp_prog);
|
||||
}
|
||||
|
||||
if (old_prog)
|
||||
|
@ -10930,14 +10942,14 @@ skip_bad_vf_detection:
|
|||
rtnl_lock();
|
||||
netif_device_detach(netdev);
|
||||
|
||||
if (netif_running(netdev))
|
||||
ixgbe_close_suspend(adapter);
|
||||
|
||||
if (state == pci_channel_io_perm_failure) {
|
||||
rtnl_unlock();
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
}
|
||||
|
||||
if (netif_running(netdev))
|
||||
ixgbe_close_suspend(adapter);
|
||||
|
||||
if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
|
||||
pci_disable_device(pdev);
|
||||
rtnl_unlock();
|
||||
|
|
|
@ -29,8 +29,8 @@ static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input,
|
|||
union ixgbe_atr_input *mask,
|
||||
u32 val, u32 m)
|
||||
{
|
||||
input->filter.formatted.src_ip[0] = val;
|
||||
mask->formatted.src_ip[0] = m;
|
||||
input->filter.formatted.src_ip[0] = (__force __be32)val;
|
||||
mask->formatted.src_ip[0] = (__force __be32)m;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -38,8 +38,8 @@ static inline int ixgbe_mat_prgm_dip(struct ixgbe_fdir_filter *input,
|
|||
union ixgbe_atr_input *mask,
|
||||
u32 val, u32 m)
|
||||
{
|
||||
input->filter.formatted.dst_ip[0] = val;
|
||||
mask->formatted.dst_ip[0] = m;
|
||||
input->filter.formatted.dst_ip[0] = (__force __be32)val;
|
||||
mask->formatted.dst_ip[0] = (__force __be32)m;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -55,10 +55,10 @@ static inline int ixgbe_mat_prgm_ports(struct ixgbe_fdir_filter *input,
|
|||
union ixgbe_atr_input *mask,
|
||||
u32 val, u32 m)
|
||||
{
|
||||
input->filter.formatted.src_port = val & 0xffff;
|
||||
mask->formatted.src_port = m & 0xffff;
|
||||
input->filter.formatted.dst_port = val >> 16;
|
||||
mask->formatted.dst_port = m >> 16;
|
||||
input->filter.formatted.src_port = (__force __be16)(val & 0xffff);
|
||||
mask->formatted.src_port = (__force __be16)(m & 0xffff);
|
||||
input->filter.formatted.dst_port = (__force __be16)(val >> 16);
|
||||
mask->formatted.dst_port = (__force __be16)(m >> 16);
|
||||
|
||||
return 0;
|
||||
};
|
||||
|
|
|
@ -854,14 +854,11 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
|
|||
|
||||
/* reply to reset with ack and vf mac address */
|
||||
msgbuf[0] = IXGBE_VF_RESET;
|
||||
if (!is_zero_ether_addr(vf_mac)) {
|
||||
if (!is_zero_ether_addr(vf_mac) && adapter->vfinfo[vf].pf_set_mac) {
|
||||
msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
|
||||
memcpy(addr, vf_mac, ETH_ALEN);
|
||||
} else {
|
||||
msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
|
||||
dev_warn(&adapter->pdev->dev,
|
||||
"VF %d has no MAC address assigned, you may have to assign one manually\n",
|
||||
vf);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -878,8 +878,9 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
|
|||
buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
|
||||
|
||||
/* convert offset from words to bytes */
|
||||
buffer.address = cpu_to_be32((offset + current_word) * 2);
|
||||
buffer.length = cpu_to_be16(words_to_read * 2);
|
||||
buffer.address = (__force u32)cpu_to_be32((offset +
|
||||
current_word) * 2);
|
||||
buffer.length = (__force u16)cpu_to_be16(words_to_read * 2);
|
||||
buffer.pad2 = 0;
|
||||
buffer.pad3 = 0;
|
||||
|
||||
|
@ -1089,9 +1090,9 @@ static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
|
|||
buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
|
||||
|
||||
/* convert offset from words to bytes */
|
||||
buffer.address = cpu_to_be32(offset * 2);
|
||||
buffer.address = (__force u32)cpu_to_be32(offset * 2);
|
||||
/* one word */
|
||||
buffer.length = cpu_to_be16(sizeof(u16));
|
||||
buffer.length = (__force u16)cpu_to_be16(sizeof(u16));
|
||||
|
||||
status = hw->mac.ops.acquire_swfw_sync(hw, mask);
|
||||
if (status)
|
||||
|
|
|
@ -4164,6 +4164,7 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
|
|||
return -EPERM;
|
||||
|
||||
ether_addr_copy(hw->mac.addr, addr->sa_data);
|
||||
ether_addr_copy(hw->mac.perm_addr, addr->sa_data);
|
||||
ether_addr_copy(netdev->dev_addr, addr->sa_data);
|
||||
|
||||
return 0;
|
||||
|
@ -4747,14 +4748,14 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
|
|||
rtnl_lock();
|
||||
netif_device_detach(netdev);
|
||||
|
||||
if (netif_running(netdev))
|
||||
ixgbevf_close_suspend(adapter);
|
||||
|
||||
if (state == pci_channel_io_perm_failure) {
|
||||
rtnl_unlock();
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
}
|
||||
|
||||
if (netif_running(netdev))
|
||||
ixgbevf_close_suspend(adapter);
|
||||
|
||||
if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
|
||||
pci_disable_device(pdev);
|
||||
rtnl_unlock();
|
||||
|
|
Загрузка…
Ссылка в новой задаче