Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-next
This commit is contained in:
Коммит
10d1e8ca8d
|
@ -1,5 +1,5 @@
|
|||
config SFC
|
||||
tristate "Solarflare Solarstorm SFC4000/SFC9000-family support"
|
||||
tristate "Solarflare SFC4000/SFC9000-family support"
|
||||
depends on PCI && INET
|
||||
select MDIO
|
||||
select CRC32
|
||||
|
@ -7,13 +7,12 @@ config SFC
|
|||
select I2C_ALGOBIT
|
||||
help
|
||||
This driver supports 10-gigabit Ethernet cards based on
|
||||
the Solarflare Communications Solarstorm SFC4000 and
|
||||
SFC9000-family controllers.
|
||||
the Solarflare SFC4000 and SFC9000-family controllers.
|
||||
|
||||
To compile this driver as a module, choose M here. The module
|
||||
will be called sfc.
|
||||
config SFC_MTD
|
||||
bool "Solarflare Solarstorm SFC4000/SFC9000-family MTD support"
|
||||
bool "Solarflare SFC4000/SFC9000-family MTD support"
|
||||
depends on SFC && MTD && !(SFC=y && MTD=m)
|
||||
default y
|
||||
help
|
||||
|
|
|
@ -229,8 +229,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
|
|||
struct efx_nic *efx = channel->efx;
|
||||
int spent;
|
||||
|
||||
if (unlikely(efx->reset_pending != RESET_TYPE_NONE ||
|
||||
!channel->enabled))
|
||||
if (unlikely(efx->reset_pending || !channel->enabled))
|
||||
return 0;
|
||||
|
||||
spent = efx_nic_process_eventq(channel, budget);
|
||||
|
@ -1461,7 +1460,7 @@ static void efx_start_all(struct efx_nic *efx)
|
|||
* reset_pending [modified from an atomic context], we instead guarantee
|
||||
* that efx_mcdi_mode_poll() isn't reverted erroneously */
|
||||
efx_mcdi_mode_event(efx);
|
||||
if (efx->reset_pending != RESET_TYPE_NONE)
|
||||
if (efx->reset_pending)
|
||||
efx_mcdi_mode_poll(efx);
|
||||
|
||||
/* Start the hardware monitor if there is one. Otherwise (we're link
|
||||
|
@ -2118,8 +2117,10 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
|
|||
goto out;
|
||||
}
|
||||
|
||||
/* Allow resets to be rescheduled. */
|
||||
efx->reset_pending = RESET_TYPE_NONE;
|
||||
/* Clear flags for the scopes we covered. We assume the NIC and
|
||||
* driver are now quiescent so that there is no race here.
|
||||
*/
|
||||
efx->reset_pending &= -(1 << (method + 1));
|
||||
|
||||
/* Reinitialise bus-mastering, which may have been turned off before
|
||||
* the reset was scheduled. This is still appropriate, even in the
|
||||
|
@ -2154,12 +2155,13 @@ out:
|
|||
static void efx_reset_work(struct work_struct *data)
|
||||
{
|
||||
struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
|
||||
unsigned long pending = ACCESS_ONCE(efx->reset_pending);
|
||||
|
||||
if (efx->reset_pending == RESET_TYPE_NONE)
|
||||
if (!pending)
|
||||
return;
|
||||
|
||||
/* If we're not RUNNING then don't reset. Leave the reset_pending
|
||||
* flag set so that efx_pci_probe_main will be retried */
|
||||
* flags set so that efx_pci_probe_main will be retried */
|
||||
if (efx->state != STATE_RUNNING) {
|
||||
netif_info(efx, drv, efx->net_dev,
|
||||
"scheduled reset quenched. NIC not RUNNING\n");
|
||||
|
@ -2167,7 +2169,7 @@ static void efx_reset_work(struct work_struct *data)
|
|||
}
|
||||
|
||||
rtnl_lock();
|
||||
(void)efx_reset(efx, efx->reset_pending);
|
||||
(void)efx_reset(efx, fls(pending) - 1);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
|
@ -2175,40 +2177,24 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
|
|||
{
|
||||
enum reset_type method;
|
||||
|
||||
if (efx->reset_pending != RESET_TYPE_NONE) {
|
||||
netif_info(efx, drv, efx->net_dev,
|
||||
"quenching already scheduled reset\n");
|
||||
return;
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case RESET_TYPE_INVISIBLE:
|
||||
case RESET_TYPE_ALL:
|
||||
case RESET_TYPE_WORLD:
|
||||
case RESET_TYPE_DISABLE:
|
||||
method = type;
|
||||
netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
|
||||
RESET_TYPE(method));
|
||||
break;
|
||||
case RESET_TYPE_RX_RECOVERY:
|
||||
case RESET_TYPE_RX_DESC_FETCH:
|
||||
case RESET_TYPE_TX_DESC_FETCH:
|
||||
case RESET_TYPE_TX_SKIP:
|
||||
method = RESET_TYPE_INVISIBLE;
|
||||
break;
|
||||
case RESET_TYPE_MC_FAILURE:
|
||||
default:
|
||||
method = RESET_TYPE_ALL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (method != type)
|
||||
method = efx->type->map_reset_reason(type);
|
||||
netif_dbg(efx, drv, efx->net_dev,
|
||||
"scheduling %s reset for %s\n",
|
||||
RESET_TYPE(method), RESET_TYPE(type));
|
||||
else
|
||||
netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
|
||||
RESET_TYPE(method));
|
||||
break;
|
||||
}
|
||||
|
||||
efx->reset_pending = method;
|
||||
set_bit(method, &efx->reset_pending);
|
||||
|
||||
/* efx_process_channel() will no longer read events once a
|
||||
* reset is scheduled. So switch back to poll'd MCDI completions. */
|
||||
|
@ -2288,7 +2274,6 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
|
|||
efx->pci_dev = pci_dev;
|
||||
efx->msg_enable = debug;
|
||||
efx->state = STATE_INIT;
|
||||
efx->reset_pending = RESET_TYPE_NONE;
|
||||
strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
|
||||
|
||||
efx->net_dev = net_dev;
|
||||
|
@ -2491,7 +2476,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
|
|||
goto fail1;
|
||||
|
||||
netif_info(efx, probe, efx->net_dev,
|
||||
"Solarflare Communications NIC detected\n");
|
||||
"Solarflare NIC detected\n");
|
||||
|
||||
/* Set up basic I/O (BAR mappings etc) */
|
||||
rc = efx_init_io(efx);
|
||||
|
@ -2510,7 +2495,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
|
|||
cancel_work_sync(&efx->reset_work);
|
||||
|
||||
if (rc == 0) {
|
||||
if (efx->reset_pending != RESET_TYPE_NONE) {
|
||||
if (efx->reset_pending) {
|
||||
/* If there was a scheduled reset during
|
||||
* probe, the NIC is probably hosed anyway */
|
||||
efx_pci_remove_main(efx);
|
||||
|
@ -2521,11 +2506,12 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
|
|||
}
|
||||
|
||||
/* Retry if a recoverably reset event has been scheduled */
|
||||
if ((efx->reset_pending != RESET_TYPE_INVISIBLE) &&
|
||||
(efx->reset_pending != RESET_TYPE_ALL))
|
||||
if (efx->reset_pending &
|
||||
~(1 << RESET_TYPE_INVISIBLE | 1 << RESET_TYPE_ALL) ||
|
||||
!efx->reset_pending)
|
||||
goto fail3;
|
||||
|
||||
efx->reset_pending = RESET_TYPE_NONE;
|
||||
efx->reset_pending = 0;
|
||||
}
|
||||
|
||||
if (rc) {
|
||||
|
@ -2609,7 +2595,7 @@ static int efx_pm_poweroff(struct device *dev)
|
|||
|
||||
efx->type->fini(efx);
|
||||
|
||||
efx->reset_pending = RESET_TYPE_NONE;
|
||||
efx->reset_pending = 0;
|
||||
|
||||
pci_save_state(pci_dev);
|
||||
return pci_set_power_state(pci_dev, PCI_D3hot);
|
||||
|
|
|
@ -134,6 +134,8 @@ enum efx_loopback_mode {
|
|||
* other valuesspecify reasons, which efx_schedule_reset() will choose
|
||||
* a method for.
|
||||
*
|
||||
* Reset methods are numbered in order of increasing scope.
|
||||
*
|
||||
* @RESET_TYPE_INVISIBLE: don't reset the PHYs or interrupts
|
||||
* @RESET_TYPE_ALL: reset everything but PCI core blocks
|
||||
* @RESET_TYPE_WORLD: reset everything, save & restore PCI config
|
||||
|
@ -147,7 +149,6 @@ enum efx_loopback_mode {
|
|||
* @RESET_TYPE_MC_FAILURE: MC reboot/assertion
|
||||
*/
|
||||
enum reset_type {
|
||||
RESET_TYPE_NONE = -1,
|
||||
RESET_TYPE_INVISIBLE = 0,
|
||||
RESET_TYPE_ALL = 1,
|
||||
RESET_TYPE_WORLD = 2,
|
||||
|
|
|
@ -796,30 +796,13 @@ static int efx_ethtool_set_wol(struct net_device *net_dev,
|
|||
static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
enum reset_type method;
|
||||
enum {
|
||||
ETH_RESET_EFX_INVISIBLE = (ETH_RESET_DMA | ETH_RESET_FILTER |
|
||||
ETH_RESET_OFFLOAD | ETH_RESET_MAC)
|
||||
};
|
||||
int rc;
|
||||
|
||||
/* Check for minimal reset flags */
|
||||
if ((*flags & ETH_RESET_EFX_INVISIBLE) != ETH_RESET_EFX_INVISIBLE)
|
||||
return -EINVAL;
|
||||
*flags ^= ETH_RESET_EFX_INVISIBLE;
|
||||
method = RESET_TYPE_INVISIBLE;
|
||||
rc = efx->type->map_reset_flags(flags);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
if (*flags & ETH_RESET_PHY) {
|
||||
*flags ^= ETH_RESET_PHY;
|
||||
method = RESET_TYPE_ALL;
|
||||
}
|
||||
|
||||
if ((*flags & efx->type->reset_world_flags) ==
|
||||
efx->type->reset_world_flags) {
|
||||
*flags ^= efx->type->reset_world_flags;
|
||||
method = RESET_TYPE_WORLD;
|
||||
}
|
||||
|
||||
return efx_reset(efx, method);
|
||||
return efx_reset(efx, rc);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -536,7 +536,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
|
|||
efx_oword_t reg;
|
||||
int link_speed, isolate;
|
||||
|
||||
isolate = (efx->reset_pending != RESET_TYPE_NONE);
|
||||
isolate = !!ACCESS_ONCE(efx->reset_pending);
|
||||
|
||||
switch (link_state->speed) {
|
||||
case 10000: link_speed = 3; break;
|
||||
|
@ -1051,6 +1051,49 @@ static int falcon_b0_test_registers(struct efx_nic *efx)
|
|||
**************************************************************************
|
||||
*/
|
||||
|
||||
static enum reset_type falcon_map_reset_reason(enum reset_type reason)
|
||||
{
|
||||
switch (reason) {
|
||||
case RESET_TYPE_RX_RECOVERY:
|
||||
case RESET_TYPE_RX_DESC_FETCH:
|
||||
case RESET_TYPE_TX_DESC_FETCH:
|
||||
case RESET_TYPE_TX_SKIP:
|
||||
/* These can occasionally occur due to hardware bugs.
|
||||
* We try to reset without disrupting the link.
|
||||
*/
|
||||
return RESET_TYPE_INVISIBLE;
|
||||
default:
|
||||
return RESET_TYPE_ALL;
|
||||
}
|
||||
}
|
||||
|
||||
static int falcon_map_reset_flags(u32 *flags)
|
||||
{
|
||||
enum {
|
||||
FALCON_RESET_INVISIBLE = (ETH_RESET_DMA | ETH_RESET_FILTER |
|
||||
ETH_RESET_OFFLOAD | ETH_RESET_MAC),
|
||||
FALCON_RESET_ALL = FALCON_RESET_INVISIBLE | ETH_RESET_PHY,
|
||||
FALCON_RESET_WORLD = FALCON_RESET_ALL | ETH_RESET_IRQ,
|
||||
};
|
||||
|
||||
if ((*flags & FALCON_RESET_WORLD) == FALCON_RESET_WORLD) {
|
||||
*flags &= ~FALCON_RESET_WORLD;
|
||||
return RESET_TYPE_WORLD;
|
||||
}
|
||||
|
||||
if ((*flags & FALCON_RESET_ALL) == FALCON_RESET_ALL) {
|
||||
*flags &= ~FALCON_RESET_ALL;
|
||||
return RESET_TYPE_ALL;
|
||||
}
|
||||
|
||||
if ((*flags & FALCON_RESET_INVISIBLE) == FALCON_RESET_INVISIBLE) {
|
||||
*flags &= ~FALCON_RESET_INVISIBLE;
|
||||
return RESET_TYPE_INVISIBLE;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Resets NIC to known state. This routine must be called in process
|
||||
* context and is allowed to sleep. */
|
||||
static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
|
||||
|
@ -1709,6 +1752,8 @@ const struct efx_nic_type falcon_a1_nic_type = {
|
|||
.init = falcon_init_nic,
|
||||
.fini = efx_port_dummy_op_void,
|
||||
.monitor = falcon_monitor,
|
||||
.map_reset_reason = falcon_map_reset_reason,
|
||||
.map_reset_flags = falcon_map_reset_flags,
|
||||
.reset = falcon_reset_hw,
|
||||
.probe_port = falcon_probe_port,
|
||||
.remove_port = falcon_remove_port,
|
||||
|
@ -1741,7 +1786,6 @@ const struct efx_nic_type falcon_a1_nic_type = {
|
|||
.tx_dc_base = 0x130000,
|
||||
.rx_dc_base = 0x100000,
|
||||
.offload_features = NETIF_F_IP_CSUM,
|
||||
.reset_world_flags = ETH_RESET_IRQ,
|
||||
};
|
||||
|
||||
const struct efx_nic_type falcon_b0_nic_type = {
|
||||
|
@ -1750,6 +1794,8 @@ const struct efx_nic_type falcon_b0_nic_type = {
|
|||
.init = falcon_init_nic,
|
||||
.fini = efx_port_dummy_op_void,
|
||||
.monitor = falcon_monitor,
|
||||
.map_reset_reason = falcon_map_reset_reason,
|
||||
.map_reset_flags = falcon_map_reset_flags,
|
||||
.reset = falcon_reset_hw,
|
||||
.probe_port = falcon_probe_port,
|
||||
.remove_port = falcon_remove_port,
|
||||
|
@ -1791,6 +1837,5 @@ const struct efx_nic_type falcon_b0_nic_type = {
|
|||
.tx_dc_base = 0x130000,
|
||||
.rx_dc_base = 0x100000,
|
||||
.offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
|
||||
.reset_world_flags = ETH_RESET_IRQ,
|
||||
};
|
||||
|
||||
|
|
|
@ -335,28 +335,35 @@ static int efx_filter_search(struct efx_filter_table *table,
|
|||
bool for_insert, int *depth_required)
|
||||
{
|
||||
unsigned hash, incr, filter_idx, depth, depth_max;
|
||||
struct efx_filter_spec *cmp;
|
||||
|
||||
hash = efx_filter_hash(key);
|
||||
incr = efx_filter_increment(key);
|
||||
depth_max = (spec->priority <= EFX_FILTER_PRI_HINT ?
|
||||
FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX);
|
||||
|
||||
for (depth = 1, filter_idx = hash & (table->size - 1);
|
||||
depth <= depth_max && test_bit(filter_idx, table->used_bitmap);
|
||||
++depth) {
|
||||
cmp = &table->spec[filter_idx];
|
||||
if (efx_filter_equal(spec, cmp))
|
||||
goto found;
|
||||
filter_idx = hash & (table->size - 1);
|
||||
depth = 1;
|
||||
depth_max = (for_insert ?
|
||||
(spec->priority <= EFX_FILTER_PRI_HINT ?
|
||||
FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX) :
|
||||
table->search_depth[spec->type]);
|
||||
|
||||
for (;;) {
|
||||
/* Return success if entry is used and matches this spec
|
||||
* or entry is unused and we are trying to insert.
|
||||
*/
|
||||
if (test_bit(filter_idx, table->used_bitmap) ?
|
||||
efx_filter_equal(spec, &table->spec[filter_idx]) :
|
||||
for_insert) {
|
||||
*depth_required = depth;
|
||||
return filter_idx;
|
||||
}
|
||||
|
||||
/* Return failure if we reached the maximum search depth */
|
||||
if (depth == depth_max)
|
||||
return for_insert ? -EBUSY : -ENOENT;
|
||||
|
||||
filter_idx = (filter_idx + incr) & (table->size - 1);
|
||||
++depth;
|
||||
}
|
||||
if (!for_insert)
|
||||
return -ENOENT;
|
||||
if (depth > depth_max)
|
||||
return -EBUSY;
|
||||
found:
|
||||
*depth_required = depth;
|
||||
return filter_idx;
|
||||
}
|
||||
|
||||
/* Construct/deconstruct external filter IDs */
|
||||
|
@ -650,11 +657,11 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
|
|||
return -EPROTONOSUPPORT;
|
||||
|
||||
/* RFS must validate the IP header length before calling us */
|
||||
EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + sizeof(*ip)));
|
||||
EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
|
||||
ip = (const struct iphdr *)(skb->data + nhoff);
|
||||
if (ip_is_fragment(ip))
|
||||
return -EPROTONOSUPPORT;
|
||||
EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + 4 * ip->ihl + 4));
|
||||
EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
|
||||
ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
|
||||
|
||||
efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 0, rxq_index);
|
||||
|
|
|
@ -644,7 +644,7 @@ struct efx_filter_state;
|
|||
* @irq_rx_moderation: IRQ moderation time for RX event queues
|
||||
* @msg_enable: Log message enable flags
|
||||
* @state: Device state flag. Serialised by the rtnl_lock.
|
||||
* @reset_pending: Pending reset method (normally RESET_TYPE_NONE)
|
||||
* @reset_pending: Bitmask for pending resets
|
||||
* @tx_queue: TX DMA queues
|
||||
* @rx_queue: RX DMA queues
|
||||
* @channel: Channels
|
||||
|
@ -727,7 +727,7 @@ struct efx_nic {
|
|||
u32 msg_enable;
|
||||
|
||||
enum nic_state state;
|
||||
enum reset_type reset_pending;
|
||||
unsigned long reset_pending;
|
||||
|
||||
struct efx_channel *channel[EFX_MAX_CHANNELS];
|
||||
char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6];
|
||||
|
@ -827,6 +827,8 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
|
|||
* @init: Initialise the controller
|
||||
* @fini: Shut down the controller
|
||||
* @monitor: Periodic function for polling link state and hardware monitor
|
||||
* @map_reset_reason: Map ethtool reset reason to a reset method
|
||||
* @map_reset_flags: Map ethtool reset flags to a reset method, if possible
|
||||
* @reset: Reset the controller hardware and possibly the PHY. This will
|
||||
* be called while the controller is uninitialised.
|
||||
* @probe_port: Probe the MAC and PHY
|
||||
|
@ -864,8 +866,6 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
|
|||
* @rx_dc_base: Base address in SRAM of RX queue descriptor caches
|
||||
* @offload_features: net_device feature flags for protocol offload
|
||||
* features implemented in hardware
|
||||
* @reset_world_flags: Flags for additional components covered by
|
||||
* reset method RESET_TYPE_WORLD
|
||||
*/
|
||||
struct efx_nic_type {
|
||||
int (*probe)(struct efx_nic *efx);
|
||||
|
@ -873,6 +873,8 @@ struct efx_nic_type {
|
|||
int (*init)(struct efx_nic *efx);
|
||||
void (*fini)(struct efx_nic *efx);
|
||||
void (*monitor)(struct efx_nic *efx);
|
||||
enum reset_type (*map_reset_reason)(enum reset_type reason);
|
||||
int (*map_reset_flags)(u32 *flags);
|
||||
int (*reset)(struct efx_nic *efx, enum reset_type method);
|
||||
int (*probe_port)(struct efx_nic *efx);
|
||||
void (*remove_port)(struct efx_nic *efx);
|
||||
|
@ -907,7 +909,6 @@ struct efx_nic_type {
|
|||
unsigned int tx_dc_base;
|
||||
unsigned int rx_dc_base;
|
||||
u32 offload_features;
|
||||
u32 reset_world_flags;
|
||||
};
|
||||
|
||||
/**************************************************************************
|
||||
|
|
|
@ -177,6 +177,36 @@ static int siena_test_registers(struct efx_nic *efx)
|
|||
**************************************************************************
|
||||
*/
|
||||
|
||||
static enum reset_type siena_map_reset_reason(enum reset_type reason)
|
||||
{
|
||||
return RESET_TYPE_ALL;
|
||||
}
|
||||
|
||||
static int siena_map_reset_flags(u32 *flags)
|
||||
{
|
||||
enum {
|
||||
SIENA_RESET_PORT = (ETH_RESET_DMA | ETH_RESET_FILTER |
|
||||
ETH_RESET_OFFLOAD | ETH_RESET_MAC |
|
||||
ETH_RESET_PHY),
|
||||
SIENA_RESET_MC = (SIENA_RESET_PORT |
|
||||
ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT),
|
||||
};
|
||||
|
||||
if ((*flags & SIENA_RESET_MC) == SIENA_RESET_MC) {
|
||||
*flags &= ~SIENA_RESET_MC;
|
||||
return RESET_TYPE_WORLD;
|
||||
}
|
||||
|
||||
if ((*flags & SIENA_RESET_PORT) == SIENA_RESET_PORT) {
|
||||
*flags &= ~SIENA_RESET_PORT;
|
||||
return RESET_TYPE_ALL;
|
||||
}
|
||||
|
||||
/* no invisible reset implemented */
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
|
||||
{
|
||||
int rc;
|
||||
|
@ -390,14 +420,13 @@ static void siena_remove_nic(struct efx_nic *efx)
|
|||
efx->nic_data = NULL;
|
||||
}
|
||||
|
||||
#define STATS_GENERATION_INVALID ((u64)(-1))
|
||||
#define STATS_GENERATION_INVALID ((__force __le64)(-1))
|
||||
|
||||
static int siena_try_update_nic_stats(struct efx_nic *efx)
|
||||
{
|
||||
u64 *dma_stats;
|
||||
__le64 *dma_stats;
|
||||
struct efx_mac_stats *mac_stats;
|
||||
u64 generation_start;
|
||||
u64 generation_end;
|
||||
__le64 generation_start, generation_end;
|
||||
|
||||
mac_stats = &efx->mac_stats;
|
||||
dma_stats = efx->stats_buffer.addr;
|
||||
|
@ -408,7 +437,7 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
|
|||
rmb();
|
||||
|
||||
#define MAC_STAT(M, D) \
|
||||
mac_stats->M = dma_stats[MC_CMD_MAC_ ## D]
|
||||
mac_stats->M = le64_to_cpu(dma_stats[MC_CMD_MAC_ ## D])
|
||||
|
||||
MAC_STAT(tx_bytes, TX_BYTES);
|
||||
MAC_STAT(tx_bad_bytes, TX_BAD_BYTES);
|
||||
|
@ -478,7 +507,8 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
|
|||
MAC_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS);
|
||||
mac_stats->rx_good_lt64 = 0;
|
||||
|
||||
efx->n_rx_nodesc_drop_cnt = dma_stats[MC_CMD_MAC_RX_NODESC_DROPS];
|
||||
efx->n_rx_nodesc_drop_cnt =
|
||||
le64_to_cpu(dma_stats[MC_CMD_MAC_RX_NODESC_DROPS]);
|
||||
|
||||
#undef MAC_STAT
|
||||
|
||||
|
@ -507,7 +537,7 @@ static void siena_update_nic_stats(struct efx_nic *efx)
|
|||
|
||||
static void siena_start_nic_stats(struct efx_nic *efx)
|
||||
{
|
||||
u64 *dma_stats = (u64 *)efx->stats_buffer.addr;
|
||||
__le64 *dma_stats = efx->stats_buffer.addr;
|
||||
|
||||
dma_stats[MC_CMD_MAC_GENERATION_END] = STATS_GENERATION_INVALID;
|
||||
|
||||
|
@ -605,6 +635,8 @@ const struct efx_nic_type siena_a0_nic_type = {
|
|||
.init = siena_init_nic,
|
||||
.fini = efx_port_dummy_op_void,
|
||||
.monitor = NULL,
|
||||
.map_reset_reason = siena_map_reset_reason,
|
||||
.map_reset_flags = siena_map_reset_flags,
|
||||
.reset = siena_reset_hw,
|
||||
.probe_port = siena_probe_port,
|
||||
.remove_port = siena_remove_port,
|
||||
|
@ -641,5 +673,4 @@ const struct efx_nic_type siena_a0_nic_type = {
|
|||
.rx_dc_base = 0x68000,
|
||||
.offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
||||
NETIF_F_RXHASH | NETIF_F_NTUPLE),
|
||||
.reset_world_flags = ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT,
|
||||
};
|
||||
|
|
Загрузка…
Ссылка в новой задаче