New 7.0 FW: bnx2x, cnic, bnx2i, bnx2fc

New FW/HSI (7.0):
 - Added support to 578xx chips
 - Improved HSI - much less driver's direct access to the FW internal
   memory needed.

New implementation of the HSI handling layer in the bnx2x (bnx2x_sp.c):
 - Introduced chip dependent objects that have chip independent interfaces
   for configuration of MACs, multicast addresses, Rx mode, indirection table,
   fast path queues and function initialization/cleanup.
 - Objects functionality is based on the private function pointers, which
   allows not only a per-chip but also PF/VF differentiation while still
   preserving the same interface towards the driver.
 - Objects interface is not influenced by the HSI changes which do not require
   providing new parameters keeping the code outside the bnx2x_sp.c invariant
   with regard to such HSI chnages.

Changes in a CNIC, bnx2fc and bnx2i modules due to the new HSI.

Signed-off-by: Vladislav Zolotarov <vladz@broadcom.com>
Signed-off-by: Michael Chan <mchan@broadcom.com>
Signed-off-by: Bhanu Prakash Gollapudi <bprakash@broadcom.com>
Signed-off-by: Eilon Greenstein <eilong@broadcom.com>
Signed-off-by: David S. Miller <davem@conan.davemloft.net>
This commit is contained in:
Vlad Zolotarov 2011-06-14 14:33:44 +03:00 коммит произвёл David S. Miller
Родитель 042181f5aa
Коммит 619c5cb688
32 изменённых файлов: 20553 добавлений и 9987 удалений

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -18,11 +18,15 @@
#define BNX2X_CMN_H
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include "bnx2x.h"
/* This is used as a replacement for an MCP if it's not present */
extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
extern int num_queues;
/************************ Macros ********************************/
@ -61,6 +65,73 @@ extern int num_queues;
/*********************** Interfaces ****************************
* Functions that need to be implemented by each driver version
*/
/* Init */
/**
* bnx2x_send_unload_req - request unload mode from the MCP.
*
* @bp: driver handle
* @unload_mode: requested function's unload mode
*
* Return unload mode returned by the MCP: COMMON, PORT or FUNC.
*/
u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode);
/**
* bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
*
* @bp: driver handle
*/
void bnx2x_send_unload_done(struct bnx2x *bp);
/**
* bnx2x_config_rss_pf - configure RSS parameters.
*
* @bp: driver handle
* @ind_table: indirection table to configure
* @config_hash: re-configure RSS hash keys configuration
*/
int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash);
/**
* bnx2x__init_func_obj - init function object
*
* @bp: driver handle
*
* Initializes the Function Object with the appropriate
* parameters which include a function slow path driver
* interface.
*/
void bnx2x__init_func_obj(struct bnx2x *bp);
/**
* bnx2x_setup_queue - setup eth queue.
*
* @bp: driver handle
* @fp: pointer to the fastpath structure
* @leading: boolean
*
*/
int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
bool leading);
/**
* bnx2x_setup_leading - bring up a leading eth queue.
*
* @bp: driver handle
*/
int bnx2x_setup_leading(struct bnx2x *bp);
/**
* bnx2x_fw_command - send the MCP a request
*
* @bp: driver handle
* @command: request
* @param: request's parameter
*
* block until there is a reply
*/
u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
/**
* bnx2x_initial_phy_init - initialize link parameters structure variables.
@ -87,6 +158,29 @@ void bnx2x_link_set(struct bnx2x *bp);
*/
u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
/**
* bnx2x_drv_pulse - write driver pulse to shmem
*
* @bp: driver handle
*
* writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox
* in the shmem.
*/
void bnx2x_drv_pulse(struct bnx2x *bp);
/**
* bnx2x_igu_ack_sb - update IGU with current SB value
*
* @bp: driver handle
* @igu_sb_id: SB id
* @segment: SB segment
* @index: SB index
* @op: SB operation
* @update: is HW update required
*/
void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
u16 index, u8 op, u8 update);
/**
* bnx2x__link_status_update - handles link status change.
*
@ -164,21 +258,6 @@ void bnx2x_int_enable(struct bnx2x *bp);
*/
void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
/**
* bnx2x_init_firmware - loads device firmware
*
* @bp: driver handle
*/
int bnx2x_init_firmware(struct bnx2x *bp);
/**
* bnx2x_init_hw - init HW blocks according to current initialization stage.
*
* @bp: driver handle
* @load_code: COMMON, PORT or FUNCTION
*/
int bnx2x_init_hw(struct bnx2x *bp, u32 load_code);
/**
* bnx2x_nic_init - init driver internals.
*
@ -206,16 +285,6 @@ int bnx2x_alloc_mem(struct bnx2x *bp);
*/
void bnx2x_free_mem(struct bnx2x *bp);
/**
* bnx2x_setup_client - setup eth client.
*
* @bp: driver handle
* @fp: pointer to fastpath structure
* @is_leading: boolean
*/
int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
int is_leading);
/**
* bnx2x_set_num_queues - set number of queues according to mode.
*
@ -259,29 +328,7 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
*
* Configures according to the value in netdev->dev_addr.
*/
void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
#ifdef BCM_CNIC
/**
* bnx2x_set_fip_eth_mac_addr - Set/Clear FIP MAC(s)
*
* @bp: driver handle
* @set: set or clear the CAM entry
*
* Used next enties in the CAM after the ETH MAC(s).
* This function will wait until the ramdord completion returns.
* Return 0 if cussess, -ENODEV if ramrod doesn't return.
*/
int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set);
/**
* bnx2x_set_all_enode_macs - Set/Clear ALL_ENODE mcast MAC.
*
* @bp: driver handle
* @set: set or clear
*/
int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set);
#endif
int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);
/**
* bnx2x_set_rx_mode - set MAC filtering configurations.
@ -289,9 +336,37 @@ int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set);
* @dev: netdevice
*
* called with netif_tx_lock from dev_mcast.c
* If bp->state is OPEN, should be called with
* netif_addr_lock_bh()
*/
void bnx2x_set_rx_mode(struct net_device *dev);
/**
* bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
*
* @bp: driver handle
*
* If bp->state is OPEN, should be called with
* netif_addr_lock_bh().
*/
void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
/**
* bnx2x_set_q_rx_mode - configures rx_mode for a single queue.
*
* @bp: driver handle
* @cl_id: client id
* @rx_mode_flags: rx mode configuration
* @rx_accept_flags: rx accept configuration
* @tx_accept_flags: tx accept configuration (tx switch)
* @ramrod_flags: ramrod configuration
*/
void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
unsigned long rx_mode_flags,
unsigned long rx_accept_flags,
unsigned long tx_accept_flags,
unsigned long ramrod_flags);
/* Parity errors related */
void bnx2x_inc_load_cnt(struct bnx2x *bp);
u32 bnx2x_dec_load_cnt(struct bnx2x *bp);
@ -299,14 +374,6 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp);
bool bnx2x_reset_is_done(struct bnx2x *bp);
void bnx2x_disable_close_the_gate(struct bnx2x *bp);
/**
* bnx2x_stats_handle - perform statistics handling according to event.
*
* @bp: driver handle
* @event: bnx2x_stats_event
*/
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
/**
* bnx2x_sp_event - handle ramrods completion.
*
@ -315,15 +382,6 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
*/
void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
/**
* bnx2x_func_start - init function
*
* @bp: driver handle
*
* Must be called before sending CLIENT_SETUP for the first client.
*/
int bnx2x_func_start(struct bnx2x *bp);
/**
* bnx2x_ilt_set_info - prepare ILT configurations.
*
@ -355,6 +413,8 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
* @value: new value
*/
void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
/* Error handling */
void bnx2x_panic_dump(struct bnx2x *bp);
void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
@ -378,6 +438,9 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p);
/* NAPI poll Rx part */
int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod);
/* NAPI poll Tx part */
int bnx2x_tx_int(struct bnx2x_fastpath *fp);
@ -390,7 +453,6 @@ void bnx2x_free_irq(struct bnx2x *bp);
void bnx2x_free_fp_mem(struct bnx2x *bp);
int bnx2x_alloc_fp_mem(struct bnx2x *bp);
void bnx2x_init_rx_rings(struct bnx2x *bp);
void bnx2x_free_skbs(struct bnx2x *bp);
void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
@ -455,19 +517,20 @@ int bnx2x_set_features(struct net_device *dev, u32 features);
*/
void bnx2x_tx_timeout(struct net_device *dev);
/*********************** Inlines **********************************/
/*********************** Fast path ********************************/
static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
{
barrier(); /* status block is written to by the chip */
fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
}
static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
struct bnx2x_fastpath *fp,
u16 bd_prod, u16 rx_comp_prod,
u16 rx_sge_prod)
static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp,
struct bnx2x_fastpath *fp, u16 bd_prod,
u16 rx_comp_prod, u16 rx_sge_prod, u32 start)
{
struct ustorm_eth_rx_producers rx_prods = {0};
int i;
u32 i;
/* Update producers */
rx_prods.bd_prod = bd_prod;
@ -484,10 +547,8 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
*/
wmb();
for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
REG_WR(bp,
BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset + i*4,
((u32 *)&rx_prods)[i]);
for (i = 0; i < sizeof(rx_prods)/4; i++)
REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]);
mmiowb(); /* keep prod updates ordered */
@ -517,7 +578,7 @@ static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
barrier();
}
static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp,
static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
u8 idu_sb_id, bool is_Pf)
{
u32 data, ctl, cnt = 100;
@ -525,7 +586,7 @@ static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp,
u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
u32 sb_bit = 1 << (idu_sb_id%32);
u32 func_encode = BP_FUNC(bp) |
u32 func_encode = func |
((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT);
u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
@ -588,15 +649,6 @@ static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
barrier();
}
static inline void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
u16 index, u8 op, u8 update)
{
u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
igu_addr);
}
static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm,
u16 index, u8 op, u8 update)
{
@ -703,7 +755,7 @@ static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
}
/**
* disables tx from stack point of view
* bnx2x_tx_disable - disables tx from stack point of view
*
* @bp: driver handle
*/
@ -738,7 +790,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
int i;
/* Add NAPI objects */
for_each_napi_queue(bp, i)
for_each_rx_queue(bp, i)
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
bnx2x_poll, BNX2X_NAPI_WEIGHT);
}
@ -747,7 +799,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
{
int i;
for_each_napi_queue(bp, i)
for_each_rx_queue(bp, i)
netif_napi_del(&bnx2x_fp(bp, i, napi));
}
@ -777,7 +829,7 @@ static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
int idx = RX_SGE_CNT * i - 1;
for (j = 0; j < 2; j++) {
SGE_MASK_CLEAR_BIT(fp, idx);
BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
idx--;
}
}
@ -787,7 +839,7 @@ static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
{
/* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
memset(fp->sge_mask, 0xff,
(NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
(NUM_RX_SGE >> BIT_VEC64_ELEM_SHIFT)*sizeof(u64));
/* Clear the two last indices in the page to 1:
these are the indices that correspond to the "next" element,
@ -869,12 +921,61 @@ static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
dma_unmap_addr(cons_rx_buf, mapping),
RX_COPY_THRESH, DMA_FROM_DEVICE);
prod_rx_buf->skb = cons_rx_buf->skb;
dma_unmap_addr_set(prod_rx_buf, mapping,
dma_unmap_addr(cons_rx_buf, mapping));
prod_rx_buf->skb = cons_rx_buf->skb;
*prod_bd = *cons_bd;
}
/************************* Init ******************************************/
/**
* bnx2x_func_start - init function
*
* @bp: driver handle
*
* Must be called before sending CLIENT_SETUP for the first client.
*/
static inline int bnx2x_func_start(struct bnx2x *bp)
{
struct bnx2x_func_state_params func_params = {0};
struct bnx2x_func_start_params *start_params =
&func_params.params.start;
/* Prepare parameters for function state transitions */
__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
func_params.f_obj = &bp->func_obj;
func_params.cmd = BNX2X_F_CMD_START;
/* Function parameters */
start_params->mf_mode = bp->mf_mode;
start_params->sd_vlan_tag = bp->mf_ov;
start_params->network_cos_mode = OVERRIDE_COS;
return bnx2x_func_state_change(bp, &func_params);
}
/**
* bnx2x_set_fw_mac_addr - fill in a MAC address in FW format
*
* @fw_hi: pointer to upper part
* @fw_mid: pointer to middle part
* @fw_lo: pointer to lower part
* @mac: pointer to MAC address
*/
static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo,
u8 *mac)
{
((u8 *)fw_hi)[0] = mac[1];
((u8 *)fw_hi)[1] = mac[0];
((u8 *)fw_mid)[0] = mac[3];
((u8 *)fw_mid)[1] = mac[2];
((u8 *)fw_lo)[0] = mac[5];
((u8 *)fw_lo)[1] = mac[4];
}
static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
struct bnx2x_fastpath *fp, int last)
{
@ -893,21 +994,20 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
int i;
for (i = 0; i < last; i++) {
struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
struct sk_buff *skb = rx_buf->skb;
struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
struct sw_rx_bd *first_buf = &tpa_info->first_buf;
struct sk_buff *skb = first_buf->skb;
if (skb == NULL) {
DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
continue;
}
if (fp->tpa_state[i] == BNX2X_TPA_START)
if (tpa_info->tpa_state == BNX2X_TPA_START)
dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
dma_unmap_addr(first_buf, mapping),
fp->rx_buf_size, DMA_FROM_DEVICE);
dev_kfree_skb(skb);
rx_buf->skb = NULL;
first_buf->skb = NULL;
}
}
@ -1036,31 +1136,199 @@ static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
return i - fp->eth_q_stats.rx_skb_alloc_failed;
}
/* Statistics ID are global per chip/path, while Client IDs for E1x are per
* port.
*/
static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp)
{
if (!CHIP_IS_E1x(fp->bp))
return fp->cl_id;
else
return fp->cl_id + BP_PORT(fp->bp) * FP_SB_MAX_E1x;
}
static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
bnx2x_obj_type obj_type)
{
struct bnx2x *bp = fp->bp;
/* Configure classification DBs */
bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid,
BP_FUNC(bp), bnx2x_sp(bp, mac_rdata),
bnx2x_sp_mapping(bp, mac_rdata),
BNX2X_FILTER_MAC_PENDING,
&bp->sp_state, obj_type,
&bp->macs_pool);
}
/**
* bnx2x_get_path_func_num - get number of active functions
*
* @bp: driver handle
*
* Calculates the number of active (not hidden) functions on the
* current path.
*/
static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
{
u8 func_num = 0, i;
/* 57710 has only one function per-port */
if (CHIP_IS_E1(bp))
return 1;
/* Calculate a number of functions enabled on the current
* PATH/PORT.
*/
if (CHIP_REV_IS_SLOW(bp)) {
if (IS_MF(bp))
func_num = 4;
else
func_num = 2;
} else {
for (i = 0; i < E1H_FUNC_MAX / 2; i++) {
u32 func_config =
MF_CFG_RD(bp,
func_mf_config[BP_PORT(bp) + 2 * i].
config);
func_num +=
((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1);
}
}
WARN_ON(!func_num);
return func_num;
}
static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
{
/* RX_MODE controlling object */
bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
/* multicast configuration controlling object */
bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
BP_FUNC(bp), BP_FUNC(bp),
bnx2x_sp(bp, mcast_rdata),
bnx2x_sp_mapping(bp, mcast_rdata),
BNX2X_FILTER_MCAST_PENDING, &bp->sp_state,
BNX2X_OBJ_TYPE_RX);
/* Setup CAM credit pools */
bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp),
bnx2x_get_path_func_num(bp));
/* RSS configuration object */
bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id,
bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp),
bnx2x_sp(bp, rss_rdata),
bnx2x_sp_mapping(bp, rss_rdata),
BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state,
BNX2X_OBJ_TYPE_RX);
}
static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
{
if (CHIP_IS_E1x(fp->bp))
return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H;
else
return fp->cl_id;
}
static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
{
struct bnx2x *bp = fp->bp;
if (!CHIP_IS_E1x(bp))
return USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
else
return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
}
#ifdef BCM_CNIC
static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
{
return bp->cnic_base_cl_id + cl_idx +
(bp->pf_num >> 1) * NONE_ETH_CONTEXT_USE;
}
static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp)
{
/* the 'first' id is allocated for the cnic */
return bp->base_fw_ndsb;
}
static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp)
{
return bp->igu_base_sb;
}
static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
{
bnx2x_fcoe(bp, cl_id) = BNX2X_FCOE_ETH_CL_ID +
BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
unsigned long q_type = 0;
bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
BNX2X_FCOE_ETH_CL_ID_IDX);
/** Current BNX2X_FCOE_ETH_CID deffinition implies not more than
* 16 ETH clients per function when CNIC is enabled!
*
* Fix it ASAP!!!
*/
bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID;
bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
bnx2x_fcoe(bp, bp) = bp;
bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
bnx2x_fcoe(bp, index) = FCOE_IDX;
bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
bnx2x_fcoe(bp, tx_cons_sb) = BNX2X_FCOE_L2_TX_INDEX;
/* qZone id equals to FW (per path) client id */
bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fcoe(bp, cl_id) +
BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
ETH_MAX_RX_CLIENTS_E1H);
bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
/* init shortcut */
bnx2x_fcoe(bp, ustorm_rx_prods_offset) = CHIP_IS_E2(bp) ?
USTORM_RX_PRODS_E2_OFFSET(bnx2x_fcoe(bp, cl_qzone_id)) :
USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), bnx2x_fcoe_fp(bp)->cl_id);
bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
bnx2x_rx_ustorm_prods_offset(fp);
/* Configure Queue State object */
__set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
__set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, fp->cid, BP_FUNC(bp),
bnx2x_sp(bp, q_rdata), bnx2x_sp_mapping(bp, q_rdata),
q_type);
DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d "
"igu_sb %d\n",
fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
fp->igu_sb_id);
}
#endif
static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
struct bnx2x_fastpath *fp)
{
int cnt = 1000;
while (bnx2x_has_tx_work_unload(fp)) {
if (!cnt) {
BNX2X_ERR("timeout waiting for queue[%d]: "
"fp->tx_pkt_prod(%d) != fp->tx_pkt_cons(%d)\n",
fp->index, fp->tx_pkt_prod, fp->tx_pkt_cons);
#ifdef BNX2X_STOP_ON_ERROR
bnx2x_panic();
return -EBUSY;
#else
break;
#endif
}
cnt--;
usleep_range(1000, 1000);
}
return 0;
}
int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
static inline void __storm_memset_struct(struct bnx2x *bp,
@ -1071,48 +1339,81 @@ static inline void __storm_memset_struct(struct bnx2x *bp,
REG_WR(bp, addr + (i * 4), data[i]);
}
static inline void storm_memset_mac_filters(struct bnx2x *bp,
struct tstorm_eth_mac_filter_config *mac_filters,
u16 abs_fid)
static inline void storm_memset_func_cfg(struct bnx2x *bp,
struct tstorm_eth_function_common_config *tcfg,
u16 abs_fid)
{
size_t size = sizeof(struct tstorm_eth_mac_filter_config);
size_t size = sizeof(struct tstorm_eth_function_common_config);
u32 addr = BAR_TSTRORM_INTMEM +
TSTORM_MAC_FILTER_CONFIG_OFFSET(abs_fid);
TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
__storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
__storm_memset_struct(bp, addr, size, (u32 *)tcfg);
}
static inline void storm_memset_cmng(struct bnx2x *bp,
struct cmng_struct_per_port *cmng,
u8 port)
{
size_t size =
sizeof(struct rate_shaping_vars_per_port) +
sizeof(struct fairness_vars_per_port) +
sizeof(struct safc_struct_per_port) +
sizeof(struct pfc_struct_per_port);
size_t size = sizeof(struct cmng_struct_per_port);
u32 addr = BAR_XSTRORM_INTMEM +
XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
__storm_memset_struct(bp, addr, size, (u32 *)cmng);
addr += size + 4 /* SKIP DCB+LLFC */;
size = sizeof(struct cmng_struct_per_port) -
size /* written */ - 4 /*skipped*/;
__storm_memset_struct(bp, addr, size,
(u32 *)(cmng->traffic_type_to_priority_cos));
}
/* HW Lock for shared dual port PHYs */
/**
* bnx2x_wait_sp_comp - wait for the outstanding SP commands.
*
* @bp: driver handle
* @mask: bits that need to be cleared
*/
static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask)
{
int tout = 5000; /* Wait for 5 secs tops */
while (tout--) {
smp_mb();
netif_addr_lock_bh(bp->dev);
if (!(bp->sp_state & mask)) {
netif_addr_unlock_bh(bp->dev);
return true;
}
netif_addr_unlock_bh(bp->dev);
usleep_range(1000, 1000);
}
smp_mb();
netif_addr_lock_bh(bp->dev);
if (bp->sp_state & mask) {
BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, "
"mask 0x%lx\n", bp->sp_state, mask);
netif_addr_unlock_bh(bp->dev);
return false;
}
netif_addr_unlock_bh(bp->dev);
return true;
}
/**
* bnx2x_set_ctx_validation - set CDU context validation values
*
* @bp: driver handle
* @cxt: context of the connection on the host memory
* @cid: SW CID of the connection to be configured
*/
void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
u32 cid);
void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
u8 sb_index, u8 disable, u16 usec);
void bnx2x_acquire_phy_lock(struct bnx2x *bp);
void bnx2x_release_phy_lock(struct bnx2x *bp);
void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
u8 sb_index, u8 disable, u16 usec);
/**
* bnx2x_extract_max_cfg - extract MAX BW part from MF configuration.
*

Просмотреть файл

@ -55,15 +55,14 @@ static void bnx2x_pfc_set(struct bnx2x *bp)
struct bnx2x_nig_brb_pfc_port_params pfc_params = {0};
u32 pri_bit, val = 0;
u8 pri;
int i;
/* Tx COS configuration */
if (bp->dcbx_port_params.ets.cos_params[0].pauseable)
pfc_params.rx_cos0_priority_mask =
bp->dcbx_port_params.ets.cos_params[0].pri_bitmask;
if (bp->dcbx_port_params.ets.cos_params[1].pauseable)
pfc_params.rx_cos1_priority_mask =
bp->dcbx_port_params.ets.cos_params[1].pri_bitmask;
for (i = 0; i < bp->dcbx_port_params.ets.num_of_cos; i++)
if (bp->dcbx_port_params.ets.cos_params[i].pauseable)
pfc_params.rx_cos_priority_mask[i] =
bp->dcbx_port_params.ets.
cos_params[i].pri_bitmask;
/**
* Rx COS configuration
@ -378,7 +377,7 @@ static int bnx2x_dcbx_read_mib(struct bnx2x *bp,
static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
{
if (CHIP_IS_E2(bp)) {
if (!CHIP_IS_E1x(bp)) {
if (BP_PORT(bp)) {
BNX2X_ERR("4 port mode is not supported");
return;
@ -406,7 +405,7 @@ static void bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
0 /* connectionless */,
0 /* dataHi is zero */,
0 /* dataLo is zero */,
1 /* common */);
NONE_CONNECTION_TYPE);
}
static void bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
@ -417,7 +416,7 @@ static void bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
0, /* connectionless */
U64_HI(bnx2x_sp_mapping(bp, pfc_config)),
U64_LO(bnx2x_sp_mapping(bp, pfc_config)),
1 /* commmon */);
NONE_CONNECTION_TYPE);
}
static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp)
@ -425,7 +424,7 @@ static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp)
struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets);
u8 status = 0;
bnx2x_ets_disabled(&bp->link_params);
bnx2x_ets_disabled(&bp->link_params/*, &bp->link_vars*/);
if (!ets->enabled)
return;
@ -527,6 +526,7 @@ static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n");
return -EINVAL;
}
rc = bnx2x_dcbx_read_mib(bp, (u32 *)&local_mib, dcbx_neg_res_offset,
DCBX_READ_LOCAL_MIB);
@ -563,15 +563,6 @@ u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent)
DCB_APP_IDTYPE_ETHTYPE;
}
static inline
void bnx2x_dcbx_invalidate_local_apps(struct bnx2x *bp)
{
int i;
for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
bp->dcbx_local_feat.app.app_pri_tbl[i].appBitfield &=
~DCBX_APP_ENTRY_VALID;
}
int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
{
int i, err = 0;
@ -597,32 +588,28 @@ int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
}
#endif
static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
{
if (SHMEM2_HAS(bp, drv_flags)) {
u32 drv_flags;
bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS);
drv_flags = SHMEM2_RD(bp, drv_flags);
if (set)
SET_FLAGS(drv_flags, flags);
else
RESET_FLAGS(drv_flags, flags);
SHMEM2_WR(bp, drv_flags, drv_flags);
DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags);
bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS);
}
}
void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
{
switch (state) {
case BNX2X_DCBX_STATE_NEG_RECEIVED:
#ifdef BCM_CNIC
if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
struct cnic_ops *c_ops;
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
rcu_read_lock();
c_ops = rcu_dereference(bp->cnic_ops);
if (c_ops) {
bnx2x_cnic_notify(bp, CNIC_CTL_STOP_ISCSI_CMD);
rcu_read_unlock();
return;
}
rcu_read_unlock();
}
/* fall through if no CNIC initialized */
case BNX2X_DCBX_STATE_ISCSI_STOPPED:
#endif
{
DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
#ifdef BCM_DCBNL
@ -646,41 +633,28 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
bp->dcbx_error);
if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
#ifdef BCM_DCBNL
/**
* Add new app tlvs to dcbnl
*/
bnx2x_dcbnl_update_applist(bp, false);
#endif
bnx2x_dcbx_stop_hw_tx(bp);
return;
}
/* fall through */
/* mark DCBX result for PMF migration */
bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 1);
#ifdef BCM_DCBNL
/**
* Invalidate the local app tlvs if they are not added
* to the dcbnl app list to avoid deleting them from
* the list later on
* Add new app tlvs to dcbnl
*/
bnx2x_dcbx_invalidate_local_apps(bp);
bnx2x_dcbnl_update_applist(bp, false);
#endif
bnx2x_dcbx_stop_hw_tx(bp);
return;
}
case BNX2X_DCBX_STATE_TX_PAUSED:
DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n");
bnx2x_pfc_set_pfc(bp);
bnx2x_dcbx_update_ets_params(bp);
if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
bnx2x_dcbx_resume_hw_tx(bp);
return;
}
/* fall through */
bnx2x_dcbx_resume_hw_tx(bp);
return;
case BNX2X_DCBX_STATE_TX_RELEASED:
DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n");
if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD)
bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0);
bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0);
return;
default:
BNX2X_ERR("Unknown DCBX_STATE\n");
@ -868,7 +842,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
{
if (CHIP_IS_E2(bp) && !CHIP_MODE_IS_4_PORT(bp)) {
if (!CHIP_IS_E1x(bp) && !CHIP_MODE_IS_4_PORT(bp)) {
bp->dcb_state = dcb_on;
bp->dcbx_enabled = dcbx_enabled;
} else {
@ -966,7 +940,7 @@ void bnx2x_dcbx_init(struct bnx2x *bp)
DP(NETIF_MSG_LINK, "dcb_state %d bp->port.pmf %d\n",
bp->dcb_state, bp->port.pmf);
if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf &&
if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf &&
SHMEM2_HAS(bp, dcbx_lldp_params_offset)) {
dcbx_lldp_params_offset =
SHMEM2_RD(bp, dcbx_lldp_params_offset);
@ -974,6 +948,8 @@ void bnx2x_dcbx_init(struct bnx2x *bp)
DP(NETIF_MSG_LINK, "dcbx_lldp_params_offset 0x%x\n",
dcbx_lldp_params_offset);
bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0);
if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) {
bnx2x_dcbx_lldp_updated_params(bp,
dcbx_lldp_params_offset);
@ -981,46 +957,12 @@ void bnx2x_dcbx_init(struct bnx2x *bp)
bnx2x_dcbx_admin_mib_updated_params(bp,
dcbx_lldp_params_offset);
/* set default configuration BC has */
bnx2x_dcbx_set_params(bp,
BNX2X_DCBX_STATE_NEG_RECEIVED);
/* Let HW start negotiation */
bnx2x_fw_command(bp,
DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0);
}
}
}
void bnx2x_dcb_init_intmem_pfc(struct bnx2x *bp)
{
struct priority_cos pricos[MAX_PFC_TRAFFIC_TYPES];
u32 i = 0, addr;
memset(pricos, 0, sizeof(pricos));
/* Default initialization */
for (i = 0; i < MAX_PFC_TRAFFIC_TYPES; i++)
pricos[i].priority = LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED;
/* Store per port struct to internal memory */
addr = BAR_XSTRORM_INTMEM +
XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) +
offsetof(struct cmng_struct_per_port,
traffic_type_to_priority_cos);
__storm_memset_struct(bp, addr, sizeof(pricos), (u32 *)pricos);
/* LLFC disabled.*/
REG_WR8(bp , BAR_XSTRORM_INTMEM +
XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) +
offsetof(struct cmng_struct_per_port, llfc_mode),
LLFC_MODE_NONE);
/* DCBX disabled.*/
REG_WR8(bp , BAR_XSTRORM_INTMEM +
XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) +
offsetof(struct cmng_struct_per_port, dcb_enabled),
DCB_DISABLED);
}
static void
bnx2x_dcbx_print_cos_params(struct bnx2x *bp,
struct flow_control_configuration *pfc_fw_cfg)
@ -1591,13 +1533,7 @@ static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp)
/* Fw version should be incremented each update */
pfc_fw_cfg->dcb_version = ++bp->dcb_version;
pfc_fw_cfg->dcb_enabled = DCB_ENABLED;
/* Default initialization */
for (pri = 0; pri < MAX_PFC_TRAFFIC_TYPES ; pri++) {
tt2cos[pri].priority = LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED;
tt2cos[pri].cos = 0;
}
pfc_fw_cfg->dcb_enabled = 1;
/* Fill priority parameters */
for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) {
@ -1605,14 +1541,37 @@ static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp)
pri_bit = 1 << tt2cos[pri].priority;
/* Fill COS parameters based on COS calculated to
* make it more generally for future use */
* make it more general for future use */
for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++)
if (bp->dcbx_port_params.ets.cos_params[cos].
pri_bitmask & pri_bit)
tt2cos[pri].cos = cos;
}
/* we never want the FW to add a 0 vlan tag */
pfc_fw_cfg->dont_add_pri_0_en = 1;
bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg);
}
void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
{
/* if we need to syncronize DCBX result from prev PMF
* read it from shmem and update bp accordingly
*/
if (SHMEM2_HAS(bp, drv_flags) &&
GET_FLAGS(SHMEM2_RD(bp, drv_flags), DRV_FLAGS_DCB_CONFIGURED)) {
/* Read neg results if dcbx is in the FW */
if (bnx2x_dcbx_read_shmem_neg_results(bp))
return;
bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat,
bp->dcbx_error);
bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
bp->dcbx_error);
}
}
/* DCB netlink */
#ifdef BCM_DCBNL

Просмотреть файл

@ -179,9 +179,6 @@ void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled);
enum {
BNX2X_DCBX_STATE_NEG_RECEIVED = 0x1,
#ifdef BCM_CNIC
BNX2X_DCBX_STATE_ISCSI_STOPPED,
#endif
BNX2X_DCBX_STATE_TX_PAUSED,
BNX2X_DCBX_STATE_TX_RELEASED
};

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -38,8 +38,6 @@ static const struct {
char string[ETH_GSTRING_LEN];
} bnx2x_q_stats_arr[] = {
/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" },
{ Q_STATS_OFFSET32(error_bytes_received_hi),
8, "[%s]: rx_error_bytes" },
{ Q_STATS_OFFSET32(total_unicast_packets_received_hi),
8, "[%s]: rx_ucast_packets" },
{ Q_STATS_OFFSET32(total_multicast_packets_received_hi),
@ -53,13 +51,18 @@ static const struct {
4, "[%s]: rx_skb_alloc_discard" },
{ Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" },
/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" },
{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" },
/* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8, "[%s]: tx_ucast_packets" },
{ Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
8, "[%s]: tx_mcast_packets" },
{ Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
8, "[%s]: tx_bcast_packets" }
8, "[%s]: tx_bcast_packets" },
{ Q_STATS_OFFSET32(total_tpa_aggregations_hi),
8, "[%s]: tpa_aggregations" },
{ Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
8, "[%s]: tpa_aggregated_frames"},
{ Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"}
};
#define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr)
@ -99,8 +102,8 @@ static const struct {
8, STATS_FLAGS_BOTH, "rx_discards" },
{ STATS_OFFSET32(mac_filter_discard),
4, STATS_FLAGS_PORT, "rx_filtered_packets" },
{ STATS_OFFSET32(xxoverflow_discard),
4, STATS_FLAGS_PORT, "rx_fw_discards" },
{ STATS_OFFSET32(mf_tag_discard),
4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
{ STATS_OFFSET32(brb_drop_hi),
8, STATS_FLAGS_PORT, "rx_brb_discard" },
{ STATS_OFFSET32(brb_truncate_hi),
@ -159,7 +162,13 @@ static const struct {
{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
{ STATS_OFFSET32(pause_frames_sent_hi),
8, STATS_FLAGS_PORT, "tx_pause_frames" }
8, STATS_FLAGS_PORT, "tx_pause_frames" },
{ STATS_OFFSET32(total_tpa_aggregations_hi),
8, STATS_FLAGS_FUNC, "tpa_aggregations" },
{ STATS_OFFSET32(total_tpa_aggregated_frames_hi),
8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
{ STATS_OFFSET32(total_tpa_bytes_hi),
8, STATS_FLAGS_FUNC, "tpa_bytes"}
};
#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
@ -517,7 +526,7 @@ static int bnx2x_get_regs_len(struct net_device *dev)
if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
regdump_len += wreg_addrs_e1h[i].size *
(1 + wreg_addrs_e1h[i].read_regs_count);
} else if (CHIP_IS_E2(bp)) {
} else if (!CHIP_IS_E1x(bp)) {
for (i = 0; i < REGS_COUNT; i++)
if (IS_E2_ONLINE(reg_addrs[i].info))
regdump_len += reg_addrs[i].size;
@ -589,7 +598,7 @@ static void bnx2x_get_regs(struct net_device *dev,
dump_hdr.info = RI_E1_ONLINE;
else if (CHIP_IS_E1H(bp))
dump_hdr.info = RI_E1H_ONLINE;
else if (CHIP_IS_E2(bp))
else if (!CHIP_IS_E1x(bp))
dump_hdr.info = RI_E2_ONLINE |
(BP_PATH(bp) ? RI_PATH1_DUMP : RI_PATH0_DUMP);
@ -610,14 +619,18 @@ static void bnx2x_get_regs(struct net_device *dev,
*p++ = REG_RD(bp,
reg_addrs[i].addr + j*4);
} else if (CHIP_IS_E2(bp)) {
} else if (!CHIP_IS_E1x(bp)) {
for (i = 0; i < REGS_COUNT; i++)
if (IS_E2_ONLINE(reg_addrs[i].info))
for (j = 0; j < reg_addrs[i].size; j++)
*p++ = REG_RD(bp,
reg_addrs[i].addr + j*4);
bnx2x_read_pages_regs_e2(bp, p);
if (CHIP_IS_E2(bp))
bnx2x_read_pages_regs_e2(bp, p);
else
/* E3 paged registers read is unimplemented yet */
WARN_ON(1);
}
/* Re-enable parity attentions */
bnx2x_clear_blocks_parity(bp);
@ -625,8 +638,6 @@ static void bnx2x_get_regs(struct net_device *dev,
bnx2x_enable_blocks_parity(bp);
}
#define PHY_FW_VER_LEN 20
static void bnx2x_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@ -1334,60 +1345,129 @@ static const struct {
{ "idle check (online)" }
};
enum {
BNX2X_CHIP_E1_OFST = 0,
BNX2X_CHIP_E1H_OFST,
BNX2X_CHIP_E2_OFST,
BNX2X_CHIP_E3_OFST,
BNX2X_CHIP_E3B0_OFST,
BNX2X_CHIP_MAX_OFST
};
#define BNX2X_CHIP_MASK_E1 (1 << BNX2X_CHIP_E1_OFST)
#define BNX2X_CHIP_MASK_E1H (1 << BNX2X_CHIP_E1H_OFST)
#define BNX2X_CHIP_MASK_E2 (1 << BNX2X_CHIP_E2_OFST)
#define BNX2X_CHIP_MASK_E3 (1 << BNX2X_CHIP_E3_OFST)
#define BNX2X_CHIP_MASK_E3B0 (1 << BNX2X_CHIP_E3B0_OFST)
#define BNX2X_CHIP_MASK_ALL ((1 << BNX2X_CHIP_MAX_OFST) - 1)
#define BNX2X_CHIP_MASK_E1X (BNX2X_CHIP_MASK_E1 | BNX2X_CHIP_MASK_E1H)
static int bnx2x_test_registers(struct bnx2x *bp)
{
int idx, i, rc = -ENODEV;
u32 wr_val = 0;
u32 wr_val = 0, hw;
int port = BP_PORT(bp);
static const struct {
u32 hw;
u32 offset0;
u32 offset1;
u32 mask;
} reg_tbl[] = {
/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
{ DORQ_REG_DB_ADDR0, 4, 0xffffffff },
{ HC_REG_AGG_INT_0, 4, 0x000003ff },
{ PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
{ PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
{ PRS_REG_CID_PORT_0, 4, 0x00ffffff },
{ PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
{ PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
{ PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
{ PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
{ QM_REG_CONNNUM_0, 4, 0x000fffff },
{ TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
{ SRC_REG_KEYRSS0_0, 40, 0xffffffff },
{ SRC_REG_KEYRSS0_7, 40, 0xffffffff },
{ XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
{ XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
{ XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
{ NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
{ NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
{ NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
{ NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
{ NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
{ NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
{ NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
{ NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
{ NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
{ NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
{ NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
{ NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
{ NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
{ NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
{ NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
{ NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
{ NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
/* 0 */ { BNX2X_CHIP_MASK_ALL,
BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
{ BNX2X_CHIP_MASK_ALL,
DORQ_REG_DB_ADDR0, 4, 0xffffffff },
{ BNX2X_CHIP_MASK_E1X,
HC_REG_AGG_INT_0, 4, 0x000003ff },
{ BNX2X_CHIP_MASK_ALL,
PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
{ BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2 | BNX2X_CHIP_MASK_E3,
PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
{ BNX2X_CHIP_MASK_E3B0,
PBF_REG_INIT_CRD_Q0, 4, 0x000007ff },
{ BNX2X_CHIP_MASK_ALL,
PRS_REG_CID_PORT_0, 4, 0x00ffffff },
{ BNX2X_CHIP_MASK_ALL,
PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
{ BNX2X_CHIP_MASK_ALL,
PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
{ BNX2X_CHIP_MASK_ALL,
PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
/* 10 */ { BNX2X_CHIP_MASK_ALL,
PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
{ BNX2X_CHIP_MASK_ALL,
PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
{ BNX2X_CHIP_MASK_ALL,
QM_REG_CONNNUM_0, 4, 0x000fffff },
{ BNX2X_CHIP_MASK_ALL,
TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
{ BNX2X_CHIP_MASK_ALL,
SRC_REG_KEYRSS0_0, 40, 0xffffffff },
{ BNX2X_CHIP_MASK_ALL,
SRC_REG_KEYRSS0_7, 40, 0xffffffff },
{ BNX2X_CHIP_MASK_ALL,
XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
{ BNX2X_CHIP_MASK_ALL,
XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
{ BNX2X_CHIP_MASK_ALL,
XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
{ BNX2X_CHIP_MASK_ALL,
NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
/* 20 */ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
{ BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
{ BNX2X_CHIP_MASK_ALL,
NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
{ BNX2X_CHIP_MASK_ALL,
NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
{ BNX2X_CHIP_MASK_ALL,
NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
{ BNX2X_CHIP_MASK_ALL,
NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
{ BNX2X_CHIP_MASK_ALL,
NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
{ BNX2X_CHIP_MASK_ALL,
NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
{ BNX2X_CHIP_MASK_ALL,
NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
{ BNX2X_CHIP_MASK_ALL,
NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
/* 30 */ { BNX2X_CHIP_MASK_ALL,
NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
{ BNX2X_CHIP_MASK_ALL,
NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
{ BNX2X_CHIP_MASK_ALL,
NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
{ BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
{ BNX2X_CHIP_MASK_ALL,
NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001},
{ BNX2X_CHIP_MASK_ALL,
NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
{ BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
{ BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
{ 0xffffffff, 0, 0x00000000 }
{ BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 }
};
if (!netif_running(bp->dev))
return rc;
if (CHIP_IS_E1(bp))
hw = BNX2X_CHIP_MASK_E1;
else if (CHIP_IS_E1H(bp))
hw = BNX2X_CHIP_MASK_E1H;
else if (CHIP_IS_E2(bp))
hw = BNX2X_CHIP_MASK_E2;
else if (CHIP_IS_E3B0(bp))
hw = BNX2X_CHIP_MASK_E3B0;
else /* e3 A0 */
hw = BNX2X_CHIP_MASK_E3;
/* Repeat the test twice:
First by writing 0x00000000, second by writing 0xffffffff */
for (idx = 0; idx < 2; idx++) {
@ -1403,8 +1483,7 @@ static int bnx2x_test_registers(struct bnx2x *bp)
for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
u32 offset, mask, save_val, val;
if (CHIP_IS_E2(bp) &&
reg_tbl[i].offset0 == HC_REG_AGG_INT_0)
if (!(hw & reg_tbl[i].hw))
continue;
offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
@ -1421,7 +1500,7 @@ static int bnx2x_test_registers(struct bnx2x *bp)
/* verify value is as expected */
if ((val & mask) != (wr_val & mask)) {
DP(NETIF_MSG_PROBE,
DP(NETIF_MSG_HW,
"offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
offset, val, wr_val, mask);
goto test_reg_exit;
@ -1438,7 +1517,7 @@ test_reg_exit:
static int bnx2x_test_memory(struct bnx2x *bp)
{
int i, j, rc = -ENODEV;
u32 val;
u32 val, index;
static const struct {
u32 offset;
int size;
@ -1453,32 +1532,44 @@ static int bnx2x_test_memory(struct bnx2x *bp)
{ 0xffffffff, 0 }
};
static const struct {
char *name;
u32 offset;
u32 e1_mask;
u32 e1h_mask;
u32 e2_mask;
u32 hw_mask[BNX2X_CHIP_MAX_OFST];
} prty_tbl[] = {
{ "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0, 0 },
{ "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2, 0 },
{ "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0, 0 },
{ "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0, 0 },
{ "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0, 0 },
{ "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0, 0 },
{ "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS,
{0x3ffc0, 0, 0, 0} },
{ "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS,
{0x2, 0x2, 0, 0} },
{ "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS,
{0, 0, 0, 0} },
{ "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS,
{0x3ffc0, 0, 0, 0} },
{ "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS,
{0x3ffc0, 0, 0, 0} },
{ "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS,
{0x3ffc1, 0, 0, 0} },
{ NULL, 0xffffffff, 0, 0, 0 }
{ NULL, 0xffffffff, {0, 0, 0, 0} }
};
if (!netif_running(bp->dev))
return rc;
if (CHIP_IS_E1(bp))
index = BNX2X_CHIP_E1_OFST;
else if (CHIP_IS_E1H(bp))
index = BNX2X_CHIP_E1H_OFST;
else if (CHIP_IS_E2(bp))
index = BNX2X_CHIP_E2_OFST;
else /* e3 */
index = BNX2X_CHIP_E3_OFST;
/* pre-Check the parity status */
for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
val = REG_RD(bp, prty_tbl[i].offset);
if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
(CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask))) ||
(CHIP_IS_E2(bp) && (val & ~(prty_tbl[i].e2_mask)))) {
if (val & ~(prty_tbl[i].hw_mask[index])) {
DP(NETIF_MSG_HW,
"%s is 0x%x\n", prty_tbl[i].name, val);
goto test_mem_exit;
@ -1493,9 +1584,7 @@ static int bnx2x_test_memory(struct bnx2x *bp)
/* Check the parity status */
for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
val = REG_RD(bp, prty_tbl[i].offset);
if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
(CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask))) ||
(CHIP_IS_E2(bp) && (val & ~(prty_tbl[i].e2_mask)))) {
if (val & ~(prty_tbl[i].hw_mask[index])) {
DP(NETIF_MSG_HW,
"%s is 0x%x\n", prty_tbl[i].name, val);
goto test_mem_exit;
@ -1512,12 +1601,16 @@ static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
{
int cnt = 1400;
if (link_up)
if (link_up) {
while (bnx2x_link_test(bp, is_serdes) && cnt--)
msleep(10);
msleep(20);
if (cnt <= 0 && bnx2x_link_test(bp, is_serdes))
DP(NETIF_MSG_LINK, "Timeout waiting for link up\n");
}
}
static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
{
unsigned int pkt_size, num_pkts, i;
struct sk_buff *skb;
@ -1526,14 +1619,14 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
struct bnx2x_fastpath *fp_tx = &bp->fp[0];
u16 tx_start_idx, tx_idx;
u16 rx_start_idx, rx_idx;
u16 pkt_prod, bd_prod;
u16 pkt_prod, bd_prod, rx_comp_cons;
struct sw_tx_bd *tx_buf;
struct eth_tx_start_bd *tx_start_bd;
struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
dma_addr_t mapping;
union eth_rx_cqe *cqe;
u8 cqe_fp_flags;
u8 cqe_fp_flags, cqe_fp_type;
struct sw_rx_bd *rx_buf;
u16 len;
int rc = -ENODEV;
@ -1545,7 +1638,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
return -EINVAL;
break;
case BNX2X_MAC_LOOPBACK:
bp->link_params.loopback_mode = LOOPBACK_BMAC;
bp->link_params.loopback_mode = CHIP_IS_E3(bp) ?
LOOPBACK_XMAC : LOOPBACK_BMAC;
bnx2x_phy_init(&bp->link_params, &bp->link_vars);
break;
default:
@ -1566,6 +1660,14 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
for (i = ETH_HLEN; i < pkt_size; i++)
packet[i] = (unsigned char) (i & 0xff);
mapping = dma_map_single(&bp->pdev->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
rc = -ENOMEM;
dev_kfree_skb(skb);
BNX2X_ERR("Unable to map SKB\n");
goto test_loopback_exit;
}
/* send the loopback packet */
num_pkts = 0;
@ -1580,8 +1682,6 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
bd_prod = TX_BD(fp_tx->tx_bd_prod);
tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
mapping = dma_map_single(&bp->pdev->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
@ -1611,6 +1711,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
mmiowb();
barrier();
num_pkts++;
fp_tx->tx_bd_prod += 2; /* start + pbd */
@ -1639,9 +1740,11 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
if (rx_idx != rx_start_idx + num_pkts)
goto test_loopback_exit;
cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
rx_comp_cons = le16_to_cpu(fp_rx->rx_comp_cons);
cqe = &fp_rx->rx_comp_ring[RCQ_BD(rx_comp_cons)];
cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
goto test_loopback_rx_exit;
len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
@ -1649,6 +1752,9 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
goto test_loopback_rx_exit;
rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
dma_sync_single_for_device(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
fp_rx->rx_buf_size, DMA_FROM_DEVICE);
skb = rx_buf->skb;
skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
for (i = ETH_HLEN; i < pkt_size; i++)
@ -1674,7 +1780,7 @@ test_loopback_exit:
return rc;
}
static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
static int bnx2x_test_loopback(struct bnx2x *bp)
{
int rc = 0, res;
@ -1687,13 +1793,13 @@ static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
bnx2x_netif_stop(bp, 1);
bnx2x_acquire_phy_lock(bp);
res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK);
if (res) {
DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
rc |= BNX2X_PHY_LOOPBACK_FAILED;
}
res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK);
if (res) {
DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
rc |= BNX2X_MAC_LOOPBACK_FAILED;
@ -1765,39 +1871,20 @@ test_nvram_exit:
return rc;
}
/* Send an EMPTY ramrod on the first queue */
static int bnx2x_test_intr(struct bnx2x *bp)
{
struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
int i, rc;
struct bnx2x_queue_state_params params = {0};
if (!netif_running(bp->dev))
return -ENODEV;
config->hdr.length = 0;
if (CHIP_IS_E1(bp))
config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
else
config->hdr.offset = BP_FUNC(bp);
config->hdr.client_id = bp->fp->cl_id;
config->hdr.reserved1 = 0;
params.q_obj = &bp->fp->q_obj;
params.cmd = BNX2X_Q_CMD_EMPTY;
bp->set_mac_pending = 1;
smp_wmb();
rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
U64_HI(bnx2x_sp_mapping(bp, mac_config)),
U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
if (rc == 0) {
for (i = 0; i < 10; i++) {
if (!bp->set_mac_pending)
break;
smp_rmb();
msleep_interruptible(10);
}
if (i == 10)
rc = -ENODEV;
}
__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
return rc;
return bnx2x_queue_state_change(bp, &params);
}
static void bnx2x_self_test(struct net_device *dev,
@ -1836,7 +1923,7 @@ static void bnx2x_self_test(struct net_device *dev,
bnx2x_nic_unload(bp, UNLOAD_NORMAL);
bnx2x_nic_load(bp, LOAD_DIAG);
/* wait until link state is restored */
bnx2x_wait_for_link(bp, link_up, is_serdes);
bnx2x_wait_for_link(bp, 1, is_serdes);
if (bnx2x_test_registers(bp) != 0) {
buf[0] = 1;
@ -1847,7 +1934,7 @@ static void bnx2x_self_test(struct net_device *dev,
etest->flags |= ETH_TEST_FL_FAILED;
}
buf[2] = bnx2x_test_loopback(bp, link_up);
buf[2] = bnx2x_test_loopback(bp);
if (buf[2] != 0)
etest->flags |= ETH_TEST_FL_FAILED;
@ -1885,6 +1972,14 @@ static void bnx2x_self_test(struct net_device *dev,
#define IS_MF_MODE_STAT(bp) \
(IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
/* ethtool statistics are displayed for all regular ethernet queues and the
* fcoe L2 queue if not disabled
*/
static inline int bnx2x_num_stat_queues(struct bnx2x *bp)
{
return BNX2X_NUM_ETH_QUEUES(bp);
}
static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
{
struct bnx2x *bp = netdev_priv(dev);
@ -1893,7 +1988,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
switch (stringset) {
case ETH_SS_STATS:
if (is_multi(bp)) {
num_stats = BNX2X_NUM_STAT_QUEUES(bp) *
num_stats = bnx2x_num_stat_queues(bp) *
BNX2X_NUM_Q_STATS;
if (!IS_MF_MODE_STAT(bp))
num_stats += BNX2X_NUM_STATS;
@ -1926,14 +2021,9 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
case ETH_SS_STATS:
if (is_multi(bp)) {
k = 0;
for_each_napi_queue(bp, i) {
for_each_eth_queue(bp, i) {
memset(queue_name, 0, sizeof(queue_name));
if (IS_FCOE_IDX(i))
sprintf(queue_name, "fcoe");
else
sprintf(queue_name, "%d", i);
sprintf(queue_name, "%d", i);
for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
snprintf(buf + (k + j)*ETH_GSTRING_LEN,
ETH_GSTRING_LEN,
@ -1972,7 +2062,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
if (is_multi(bp)) {
k = 0;
for_each_napi_queue(bp, i) {
for_each_eth_queue(bp, i) {
hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
if (bnx2x_q_stats_arr[j].size == 0) {
@ -2090,14 +2180,30 @@ static int bnx2x_get_rxfh_indir(struct net_device *dev,
{
struct bnx2x *bp = netdev_priv(dev);
size_t copy_size =
min_t(size_t, indir->size, TSTORM_INDIRECTION_TABLE_SIZE);
min_t(size_t, indir->size, T_ETH_INDIRECTION_TABLE_SIZE);
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
size_t i;
if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
return -EOPNOTSUPP;
indir->size = TSTORM_INDIRECTION_TABLE_SIZE;
memcpy(indir->ring_index, bp->rx_indir_table,
copy_size * sizeof(bp->rx_indir_table[0]));
/* Get the current configuration of the RSS indirection table */
bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table);
/*
* We can't use a memcpy() as an internal storage of an
* indirection table is a u8 array while indir->ring_index
* points to an array of u32.
*
* Indirection table contains the FW Client IDs, so we need to
* align the returned table to the Client ID of the leading RSS
* queue.
*/
for (i = 0; i < copy_size; i++)
indir->ring_index[i] = ind_table[i] - bp->fp->cl_id;
indir->size = T_ETH_INDIRECTION_TABLE_SIZE;
return 0;
}
@ -2106,21 +2212,33 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev,
{
struct bnx2x *bp = netdev_priv(dev);
size_t i;
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
u32 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
return -EOPNOTSUPP;
/* Validate size and indices */
if (indir->size != TSTORM_INDIRECTION_TABLE_SIZE)
/* validate the size */
if (indir->size != T_ETH_INDIRECTION_TABLE_SIZE)
return -EINVAL;
for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
if (indir->ring_index[i] >= BNX2X_NUM_ETH_QUEUES(bp))
return -EINVAL;
memcpy(bp->rx_indir_table, indir->ring_index,
indir->size * sizeof(bp->rx_indir_table[0]));
bnx2x_push_indir_table(bp);
return 0;
for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
/* validate the indices */
if (indir->ring_index[i] >= num_eth_queues)
return -EINVAL;
/*
* The same as in bnx2x_get_rxfh_indir: we can't use a memcpy()
* as an internal storage of an indirection table is a u8 array
* while indir->ring_index points to an array of u32.
*
* Indirection table contains the FW Client IDs, so we need to
* align the received table to the Client ID of the leading RSS
* queue
*/
ind_table[i] = indir->ring_index[i] + bp->fp->cl_id;
}
return bnx2x_config_rss_pf(bp, ind_table, false);
}
static const struct ethtool_ops bnx2x_ethtool_ops = {

Просмотреть файл

@ -10,249 +10,221 @@
#ifndef BNX2X_FW_DEFS_H
#define BNX2X_FW_DEFS_H
#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[142].base)
#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[148].base)
#define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[141].base + ((assertListEntry) * IRO[141].m1))
#define CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
(IRO[144].base + ((pfId) * IRO[144].m1))
(IRO[147].base + ((assertListEntry) * IRO[147].m1))
#define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \
(IRO[149].base + (((pfId)>>1) * IRO[149].m1) + (((pfId)&1) * \
IRO[149].m2))
(IRO[153].base + (((pfId)>>1) * IRO[153].m1) + (((pfId)&1) * \
IRO[153].m2))
#define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \
(IRO[150].base + (((pfId)>>1) * IRO[150].m1) + (((pfId)&1) * \
IRO[150].m2))
(IRO[154].base + (((pfId)>>1) * IRO[154].m1) + (((pfId)&1) * \
IRO[154].m2))
#define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \
(IRO[156].base + ((funcId) * IRO[156].m1))
(IRO[159].base + ((funcId) * IRO[159].m1))
#define CSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[146].base + ((funcId) * IRO[146].m1))
#define CSTORM_FUNCTION_MODE_OFFSET (IRO[153].base)
#define CSTORM_IGU_MODE_OFFSET (IRO[154].base)
(IRO[149].base + ((funcId) * IRO[149].m1))
#define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
(IRO[311].base + ((pfId) * IRO[311].m1))
(IRO[315].base + ((pfId) * IRO[315].m1))
#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[312].base + ((pfId) * IRO[312].m1))
#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
(IRO[304].base + ((pfId) * IRO[304].m1) + ((iscsiEqId) * \
IRO[304].m2))
#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
(IRO[306].base + ((pfId) * IRO[306].m1) + ((iscsiEqId) * \
IRO[306].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
(IRO[305].base + ((pfId) * IRO[305].m1) + ((iscsiEqId) * \
IRO[305].m2))
#define \
CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
(IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * \
IRO[307].m2))
#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
(IRO[303].base + ((pfId) * IRO[303].m1) + ((iscsiEqId) * \
IRO[303].m2))
#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
(IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * \
IRO[309].m2))
#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
(IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * \
IRO[308].m2))
(IRO[316].base + ((pfId) * IRO[316].m1))
#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
(IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2))
#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
(IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
(IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
(IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
(IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * IRO[307].m2))
#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
(IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
(IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
(IRO[310].base + ((pfId) * IRO[310].m1))
(IRO[314].base + ((pfId) * IRO[314].m1))
#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
(IRO[302].base + ((pfId) * IRO[302].m1))
(IRO[306].base + ((pfId) * IRO[306].m1))
#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
(IRO[301].base + ((pfId) * IRO[301].m1))
(IRO[305].base + ((pfId) * IRO[305].m1))
#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
(IRO[300].base + ((pfId) * IRO[300].m1))
#define CSTORM_PATH_ID_OFFSET (IRO[159].base)
(IRO[304].base + ((pfId) * IRO[304].m1))
#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[151].base + ((funcId) * IRO[151].m1))
#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
(IRO[137].base + ((pfId) * IRO[137].m1))
#define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \
(IRO[136].base + ((pfId) * IRO[136].m1))
#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[136].size)
#define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \
(IRO[138].base + ((pfId) * IRO[138].m1))
#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[138].size)
#define CSTORM_STATS_FLAGS_OFFSET(pfId) \
(IRO[142].base + ((pfId) * IRO[142].m1))
#define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \
(IRO[143].base + ((pfId) * IRO[143].m1))
#define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \
(IRO[141].base + ((pfId) * IRO[141].m1))
#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[141].size)
#define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \
(IRO[144].base + ((pfId) * IRO[144].m1))
#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[144].size)
#define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \
(IRO[136].base + ((sbId) * IRO[136].m1) + ((hcIndex) * IRO[136].m2))
#define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \
(IRO[129].base + ((sbId) * IRO[129].m1))
(IRO[133].base + ((sbId) * IRO[133].m1))
#define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \
(IRO[134].base + ((sbId) * IRO[134].m1))
#define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \
(IRO[135].base + ((sbId) * IRO[135].m1) + ((hcIndex) * IRO[135].m2))
#define CSTORM_STATUS_BLOCK_OFFSET(sbId) \
(IRO[128].base + ((sbId) * IRO[128].m1))
#define CSTORM_STATUS_BLOCK_SIZE (IRO[128].size)
#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \
(IRO[132].base + ((sbId) * IRO[132].m1))
#define CSTORM_SYNC_BLOCK_SIZE (IRO[132].size)
#define CSTORM_STATUS_BLOCK_SIZE (IRO[132].size)
#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \
(IRO[137].base + ((sbId) * IRO[137].m1))
#define CSTORM_SYNC_BLOCK_SIZE (IRO[137].size)
#define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \
(IRO[151].base + ((vfId) * IRO[151].m1))
(IRO[155].base + ((vfId) * IRO[155].m1))
#define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \
(IRO[152].base + ((vfId) * IRO[152].m1))
(IRO[156].base + ((vfId) * IRO[156].m1))
#define CSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[147].base + ((funcId) * IRO[147].m1))
#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[199].base)
(IRO[150].base + ((funcId) * IRO[150].m1))
#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base)
#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
(IRO[198].base + ((pfId) * IRO[198].m1))
#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[99].base)
(IRO[203].base + ((pfId) * IRO[203].m1))
#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base)
#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[98].base + ((assertListEntry) * IRO[98].m1))
#define TSTORM_CLIENT_CONFIG_OFFSET(portId, clientId) \
(IRO[197].base + ((portId) * IRO[197].m1) + ((clientId) * \
IRO[197].m2))
#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[104].base)
(IRO[101].base + ((assertListEntry) * IRO[101].m1))
#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[107].base)
#define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \
(IRO[105].base)
#define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
(IRO[96].base + ((pfId) * IRO[96].m1))
#define TSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[101].base + ((funcId) * IRO[101].m1))
(IRO[108].base)
#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \
(IRO[195].base + ((pfId) * IRO[195].m1))
#define TSTORM_FUNCTION_MODE_OFFSET (IRO[103].base)
#define TSTORM_INDIRECTION_TABLE_OFFSET(pfId) \
(IRO[91].base + ((pfId) * IRO[91].m1))
#define TSTORM_INDIRECTION_TABLE_SIZE (IRO[91].size)
#define \
TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfId, iscsiConBufPblEntry) \
(IRO[260].base + ((pfId) * IRO[260].m1) + ((iscsiConBufPblEntry) \
* IRO[260].m2))
(IRO[201].base + ((pfId) * IRO[201].m1))
#define TSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[103].base + ((funcId) * IRO[103].m1))
#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
(IRO[264].base + ((pfId) * IRO[264].m1))
(IRO[271].base + ((pfId) * IRO[271].m1))
#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
(IRO[265].base + ((pfId) * IRO[265].m1))
(IRO[272].base + ((pfId) * IRO[272].m1))
#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
(IRO[266].base + ((pfId) * IRO[266].m1))
#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
(IRO[267].base + ((pfId) * IRO[267].m1))
#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
(IRO[263].base + ((pfId) * IRO[263].m1))
#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
(IRO[262].base + ((pfId) * IRO[262].m1))
#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
(IRO[261].base + ((pfId) * IRO[261].m1))
#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[259].base + ((pfId) * IRO[259].m1))
#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
(IRO[269].base + ((pfId) * IRO[269].m1))
#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
(IRO[256].base + ((pfId) * IRO[256].m1))
#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IRO[257].base + ((pfId) * IRO[257].m1))
#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IRO[258].base + ((pfId) * IRO[258].m1))
#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
(IRO[196].base + ((pfId) * IRO[196].m1))
#define TSTORM_PER_COUNTER_ID_STATS_OFFSET(portId, tStatCntId) \
(IRO[100].base + ((portId) * IRO[100].m1) + ((tStatCntId) * \
IRO[100].m2))
#define TSTORM_STATS_FLAGS_OFFSET(pfId) \
(IRO[95].base + ((pfId) * IRO[95].m1))
#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
(IRO[211].base + ((pfId) * IRO[211].m1))
#define TSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[102].base + ((funcId) * IRO[102].m1))
#define USTORM_AGG_DATA_OFFSET (IRO[201].base)
#define USTORM_AGG_DATA_SIZE (IRO[201].size)
#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[170].base)
#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[169].base + ((assertListEntry) * IRO[169].m1))
#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
(IRO[178].base + ((portId) * IRO[178].m1))
#define USTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
(IRO[172].base + ((pfId) * IRO[172].m1))
#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
(IRO[313].base + ((pfId) * IRO[313].m1))
#define USTORM_FUNC_EN_OFFSET(funcId) \
(IRO[174].base + ((funcId) * IRO[174].m1))
#define USTORM_FUNCTION_MODE_OFFSET (IRO[177].base)
#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
(IRO[277].base + ((pfId) * IRO[277].m1))
#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[278].base + ((pfId) * IRO[278].m1))
#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
(IRO[282].base + ((pfId) * IRO[282].m1))
#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
(IRO[279].base + ((pfId) * IRO[279].m1))
#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
(IRO[275].base + ((pfId) * IRO[275].m1))
#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
(IRO[274].base + ((pfId) * IRO[274].m1))
#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
(IRO[273].base + ((pfId) * IRO[273].m1))
#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
(IRO[274].base + ((pfId) * IRO[274].m1))
#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
(IRO[270].base + ((pfId) * IRO[270].m1))
#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
(IRO[269].base + ((pfId) * IRO[269].m1))
#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
(IRO[268].base + ((pfId) * IRO[268].m1))
#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[267].base + ((pfId) * IRO[267].m1))
#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
(IRO[276].base + ((pfId) * IRO[276].m1))
#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
(IRO[280].base + ((pfId) * IRO[280].m1))
#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
(IRO[263].base + ((pfId) * IRO[263].m1))
#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IRO[264].base + ((pfId) * IRO[264].m1))
#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IRO[265].base + ((pfId) * IRO[265].m1))
#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IRO[266].base + ((pfId) * IRO[266].m1))
#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
(IRO[202].base + ((pfId) * IRO[202].m1))
#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[105].base + ((funcId) * IRO[105].m1))
#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
(IRO[216].base + ((pfId) * IRO[216].m1))
#define TSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[104].base + ((funcId) * IRO[104].m1))
#define USTORM_AGG_DATA_OFFSET (IRO[206].base)
#define USTORM_AGG_DATA_SIZE (IRO[206].size)
#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base)
#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[176].base + ((assertListEntry) * IRO[176].m1))
#define USTORM_CQE_PAGE_NEXT_OFFSET(portId, clientId) \
(IRO[205].base + ((portId) * IRO[205].m1) + ((clientId) * \
IRO[205].m2))
#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
(IRO[183].base + ((portId) * IRO[183].m1))
#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
(IRO[317].base + ((pfId) * IRO[317].m1))
#define USTORM_FUNC_EN_OFFSET(funcId) \
(IRO[178].base + ((funcId) * IRO[178].m1))
#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
(IRO[281].base + ((pfId) * IRO[281].m1))
#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
(IRO[176].base + ((pfId) * IRO[176].m1))
#define USTORM_PER_COUNTER_ID_STATS_OFFSET(portId, uStatCntId) \
(IRO[173].base + ((portId) * IRO[173].m1) + ((uStatCntId) * \
IRO[173].m2))
#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
(IRO[204].base + ((portId) * IRO[204].m1) + ((clientId) * \
IRO[204].m2))
#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \
(IRO[205].base + ((qzoneId) * IRO[205].m1))
#define USTORM_STATS_FLAGS_OFFSET(pfId) \
(IRO[171].base + ((pfId) * IRO[171].m1))
#define USTORM_TPA_BTR_OFFSET (IRO[202].base)
#define USTORM_TPA_BTR_SIZE (IRO[202].size)
#define USTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[175].base + ((funcId) * IRO[175].m1))
#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[59].base)
#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[58].base)
#define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[54].base)
#define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[53].base + ((assertListEntry) * IRO[53].m1))
#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \
(IRO[47].base + ((portId) * IRO[47].m1))
#define XSTORM_E1HOV_OFFSET(pfId) \
(IRO[55].base + ((pfId) * IRO[55].m1))
#define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
(IRO[45].base + ((pfId) * IRO[45].m1))
#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \
(IRO[49].base + ((pfId) * IRO[49].m1))
#define XSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[51].base + ((funcId) * IRO[51].m1))
#define XSTORM_FUNCTION_MODE_OFFSET (IRO[56].base)
#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
(IRO[290].base + ((pfId) * IRO[290].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
(IRO[293].base + ((pfId) * IRO[293].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
(IRO[294].base + ((pfId) * IRO[294].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
(IRO[295].base + ((pfId) * IRO[295].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
(IRO[296].base + ((pfId) * IRO[296].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
(IRO[297].base + ((pfId) * IRO[297].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
(IRO[298].base + ((pfId) * IRO[298].m1))
#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
(IRO[299].base + ((pfId) * IRO[299].m1))
#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
(IRO[289].base + ((pfId) * IRO[289].m1))
#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
(IRO[288].base + ((pfId) * IRO[288].m1))
#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
(IRO[287].base + ((pfId) * IRO[287].m1))
#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
(IRO[292].base + ((pfId) * IRO[292].m1))
#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
(IRO[291].base + ((pfId) * IRO[291].m1))
#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[282].base + ((pfId) * IRO[282].m1))
#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
(IRO[286].base + ((pfId) * IRO[286].m1))
#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
(IRO[285].base + ((pfId) * IRO[285].m1))
#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
(IRO[284].base + ((pfId) * IRO[284].m1))
#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
(IRO[283].base + ((pfId) * IRO[283].m1))
#define XSTORM_PATH_ID_OFFSET (IRO[65].base)
#define XSTORM_PER_COUNTER_ID_STATS_OFFSET(portId, xStatCntId) \
(IRO[50].base + ((portId) * IRO[50].m1) + ((xStatCntId) * \
IRO[50].m2))
#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
(IRO[279].base + ((pfId) * IRO[279].m1))
#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
(IRO[278].base + ((pfId) * IRO[278].m1))
#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
(IRO[277].base + ((pfId) * IRO[277].m1))
#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
(IRO[280].base + ((pfId) * IRO[280].m1))
#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
(IRO[284].base + ((pfId) * IRO[284].m1))
#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[285].base + ((pfId) * IRO[285].m1))
#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
(IRO[182].base + ((pfId) * IRO[182].m1))
#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[180].base + ((funcId) * IRO[180].m1))
#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
(IRO[209].base + ((portId) * IRO[209].m1) + ((clientId) * \
IRO[209].m2))
#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \
(IRO[210].base + ((qzoneId) * IRO[210].m1))
#define USTORM_TPA_BTR_OFFSET (IRO[207].base)
#define USTORM_TPA_BTR_SIZE (IRO[207].size)
#define USTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[179].base + ((funcId) * IRO[179].m1))
#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base)
#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base)
#define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[51].base)
#define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[50].base + ((assertListEntry) * IRO[50].m1))
#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \
(IRO[43].base + ((portId) * IRO[43].m1))
#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \
(IRO[45].base + ((pfId) * IRO[45].m1))
#define XSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[47].base + ((funcId) * IRO[47].m1))
#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
(IRO[294].base + ((pfId) * IRO[294].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
(IRO[297].base + ((pfId) * IRO[297].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
(IRO[298].base + ((pfId) * IRO[298].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
(IRO[299].base + ((pfId) * IRO[299].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
(IRO[300].base + ((pfId) * IRO[300].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
(IRO[301].base + ((pfId) * IRO[301].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
(IRO[302].base + ((pfId) * IRO[302].m1))
#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
(IRO[303].base + ((pfId) * IRO[303].m1))
#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
(IRO[293].base + ((pfId) * IRO[293].m1))
#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
(IRO[292].base + ((pfId) * IRO[292].m1))
#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
(IRO[291].base + ((pfId) * IRO[291].m1))
#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
(IRO[296].base + ((pfId) * IRO[296].m1))
#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
(IRO[295].base + ((pfId) * IRO[295].m1))
#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
(IRO[290].base + ((pfId) * IRO[290].m1))
#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
(IRO[289].base + ((pfId) * IRO[289].m1))
#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
(IRO[288].base + ((pfId) * IRO[288].m1))
#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
(IRO[287].base + ((pfId) * IRO[287].m1))
#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
(IRO[48].base + ((pfId) * IRO[48].m1))
(IRO[44].base + ((pfId) * IRO[44].m1))
#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[49].base + ((funcId) * IRO[49].m1))
#define XSTORM_SPQ_DATA_OFFSET(funcId) \
(IRO[32].base + ((funcId) * IRO[32].m1))
#define XSTORM_SPQ_DATA_SIZE (IRO[32].size)
@ -260,42 +232,37 @@
(IRO[30].base + ((funcId) * IRO[30].m1))
#define XSTORM_SPQ_PROD_OFFSET(funcId) \
(IRO[31].base + ((funcId) * IRO[31].m1))
#define XSTORM_STATS_FLAGS_OFFSET(pfId) \
(IRO[43].base + ((pfId) * IRO[43].m1))
#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \
(IRO[206].base + ((portId) * IRO[206].m1))
(IRO[211].base + ((portId) * IRO[211].m1))
#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
(IRO[207].base + ((portId) * IRO[207].m1))
(IRO[212].base + ((portId) * IRO[212].m1))
#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \
(IRO[209].base + (((pfId)>>1) * IRO[209].m1) + (((pfId)&1) * \
IRO[209].m2))
(IRO[214].base + (((pfId)>>1) * IRO[214].m1) + (((pfId)&1) * \
IRO[214].m2))
#define XSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[52].base + ((funcId) * IRO[52].m1))
(IRO[48].base + ((funcId) * IRO[48].m1))
#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
/* RSS hash types */
#define DEFAULT_HASH_TYPE 0
#define IPV4_HASH_TYPE 1
#define TCP_IPV4_HASH_TYPE 2
#define IPV6_HASH_TYPE 3
#define TCP_IPV6_HASH_TYPE 4
#define VLAN_PRI_HASH_TYPE 5
#define E1HOV_PRI_HASH_TYPE 6
#define DSCP_HASH_TYPE 7
/**
* This file defines HSI constants for the ETH flow
*/
#ifdef _EVEREST_MICROCODE
#include "Microcode\Generated\DataTypes\eth_rx_bd.h"
#include "Microcode\Generated\DataTypes\eth_tx_bd.h"
#include "Microcode\Generated\DataTypes\eth_rx_cqe.h"
#include "Microcode\Generated\DataTypes\eth_rx_sge.h"
#include "Microcode\Generated\DataTypes\eth_rx_cqe_next_page.h"
#endif
/* Ethernet Ring parameters */
#define X_ETH_LOCAL_RING_SIZE 13
#define FIRST_BD_IN_PKT 0
#define FIRST_BD_IN_PKT 0
#define PARSE_BD_INDEX 1
#define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8))
#define U_ETH_NUM_OF_SGES_TO_FETCH 8
#define U_ETH_MAX_SGES_FOR_PACKET 3
/*Tx params*/
#define X_ETH_NO_VLAN 0
#define X_ETH_OUTBAND_VLAN 1
#define X_ETH_INBAND_VLAN 2
/* Rx ring params */
#define U_ETH_LOCAL_BD_RING_SIZE 8
#define U_ETH_LOCAL_SGE_RING_SIZE 10
@ -311,79 +278,64 @@
#define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8))
#define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8))
#define U_ETH_BDS_PER_PAGE_MASK (U_ETH_BDS_PER_PAGE-1)
#define U_ETH_CQE_PER_PAGE_MASK (TU_ETH_CQES_PER_PAGE-1)
#define U_ETH_BDS_PER_PAGE_MASK (U_ETH_BDS_PER_PAGE-1)
#define U_ETH_CQE_PER_PAGE_MASK (TU_ETH_CQES_PER_PAGE-1)
#define U_ETH_SGES_PER_PAGE_MASK (U_ETH_SGES_PER_PAGE-1)
#define U_ETH_UNDEFINED_Q 0xFF
/* values of command IDs in the ramrod message */
#define RAMROD_CMD_ID_ETH_UNUSED 0
#define RAMROD_CMD_ID_ETH_CLIENT_SETUP 1
#define RAMROD_CMD_ID_ETH_UPDATE 2
#define RAMROD_CMD_ID_ETH_HALT 3
#define RAMROD_CMD_ID_ETH_FORWARD_SETUP 4
#define RAMROD_CMD_ID_ETH_ACTIVATE 5
#define RAMROD_CMD_ID_ETH_DEACTIVATE 6
#define RAMROD_CMD_ID_ETH_EMPTY 7
#define RAMROD_CMD_ID_ETH_TERMINATE 8
/* command values for set mac command */
#define T_ETH_MAC_COMMAND_SET 0
#define T_ETH_MAC_COMMAND_INVALIDATE 1
#define T_ETH_INDIRECTION_TABLE_SIZE 128
#define T_ETH_RSS_KEY 10
#define ETH_NUM_OF_RSS_ENGINES_E2 72
#define FILTER_RULES_COUNT 16
#define MULTICAST_RULES_COUNT 16
#define CLASSIFY_RULES_COUNT 16
/*The CRC32 seed, that is used for the hash(reduction) multicast address */
#define T_ETH_CRC32_HASH_SEED 0x00000000
#define ETH_CRC32_HASH_SEED 0x00000000
#define ETH_CRC32_HASH_BIT_SIZE (8)
#define ETH_CRC32_HASH_MASK EVAL((1<<ETH_CRC32_HASH_BIT_SIZE)-1)
/* Maximal L2 clients supported */
#define ETH_MAX_RX_CLIENTS_E1 18
#define ETH_MAX_RX_CLIENTS_E1H 28
#define ETH_MAX_RX_CLIENTS_E2 152
/* Maximal statistics client Ids */
#define MAX_STAT_COUNTER_ID_E1 36
#define MAX_STAT_COUNTER_ID_E1H 56
#define MAX_STAT_COUNTER_ID_E2 140
#define MAX_MAC_CREDIT_E1 192 /* Per Chip */
#define MAX_MAC_CREDIT_E1H 256 /* Per Chip */
#define MAX_MAC_CREDIT_E2 272 /* Per Path */
#define MAX_VLAN_CREDIT_E1 0 /* Per Chip */
#define MAX_VLAN_CREDIT_E1H 0 /* Per Chip */
#define MAX_VLAN_CREDIT_E2 272 /* Per Path */
#define MAX_STAT_COUNTER_ID ETH_MAX_RX_CLIENTS_E1H
/* Maximal aggregation queues supported */
#define ETH_MAX_AGGREGATION_QUEUES_E1 32
#define ETH_MAX_AGGREGATION_QUEUES_E1H 64
/* ETH RSS modes */
#define ETH_RSS_MODE_DISABLED 0
#define ETH_RSS_MODE_REGULAR 1
#define ETH_RSS_MODE_VLAN_PRI 2
#define ETH_RSS_MODE_E1HOV_PRI 3
#define ETH_RSS_MODE_IP_DSCP 4
#define ETH_RSS_MODE_E2_INTEG 5
#define ETH_MAX_AGGREGATION_QUEUES_E1H_E2 64
/* ETH vlan filtering modes */
#define ETH_VLAN_FILTER_ANY_VLAN 0 /* Don't filter by vlan */
#define ETH_VLAN_FILTER_SPECIFIC_VLAN \
1 /* Only the vlan_id is allowed */
#define ETH_VLAN_FILTER_CLASSIFY \
2 /* vlan will be added to CAM for classification */
#define ETH_NUM_OF_MCAST_BINS 256
#define ETH_NUM_OF_MCAST_ENGINES_E2 72
/* Fast path CQE selection */
#define ETH_FP_CQE_REGULAR 0
#define ETH_FP_CQE_SGL 1
#define ETH_FP_CQE_RAW 2
#define ETH_MIN_RX_CQES_WITHOUT_TPA (MAX_RAMRODS_PER_PORT + 3)
#define ETH_MIN_RX_CQES_WITH_TPA_E1 \
(ETH_MAX_AGGREGATION_QUEUES_E1 + ETH_MIN_RX_CQES_WITHOUT_TPA)
#define ETH_MIN_RX_CQES_WITH_TPA_E1H_E2 \
(ETH_MAX_AGGREGATION_QUEUES_E1H_E2 + ETH_MIN_RX_CQES_WITHOUT_TPA)
#define DISABLE_STATISTIC_COUNTER_ID_VALUE 0
/**
* This file defines HSI constants common to all microcode flows
*/
/* Connection types */
#define ETH_CONNECTION_TYPE 0
#define TOE_CONNECTION_TYPE 1
#define RDMA_CONNECTION_TYPE 2
#define ISCSI_CONNECTION_TYPE 3
#define FCOE_CONNECTION_TYPE 4
#define RESERVED_CONNECTION_TYPE_0 5
#define RESERVED_CONNECTION_TYPE_1 6
#define RESERVED_CONNECTION_TYPE_2 7
#define NONE_CONNECTION_TYPE 8
* This file defines HSI constants common to all microcode flows
*/
#define PROTOCOL_STATE_BIT_OFFSET 6
@ -391,25 +343,9 @@
#define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
#define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
/* values of command IDs in the ramrod message */
#define RAMROD_CMD_ID_COMMON_FUNCTION_START 1
#define RAMROD_CMD_ID_COMMON_FUNCTION_STOP 2
#define RAMROD_CMD_ID_COMMON_CFC_DEL 3
#define RAMROD_CMD_ID_COMMON_CFC_DEL_WB 4
#define RAMROD_CMD_ID_COMMON_SET_MAC 5
#define RAMROD_CMD_ID_COMMON_STAT_QUERY 6
#define RAMROD_CMD_ID_COMMON_STOP_TRAFFIC 7
#define RAMROD_CMD_ID_COMMON_START_TRAFFIC 8
/* microcode fixed page page size 4K (chains and ring segments) */
#define MC_PAGE_SIZE 4096
/* Host coalescing constants */
#define HC_IGU_BC_MODE 0
#define HC_IGU_NBC_MODE 1
/* Host coalescing constants. E1 includes E1H as well */
/* Number of indices per slow-path SB */
#define HC_SP_SB_MAX_INDICES 16
@ -418,30 +354,17 @@
#define HC_SB_MAX_INDICES_E2 8
#define HC_SB_MAX_SB_E1X 32
#define HC_SB_MAX_SB_E2 136
#define HC_SB_MAX_SB_E2 136
#define HC_SP_SB_ID 0xde
#define HC_REGULAR_SEGMENT 0
#define HC_DEFAULT_SEGMENT 1
#define HC_SB_MAX_SM 2
#define HC_SB_MAX_DYNAMIC_INDICES 4
#define HC_FUNCTION_DISABLED 0xff
/* used by the driver to get the SB offset */
#define USTORM_ID 0
#define CSTORM_ID 1
#define XSTORM_ID 2
#define TSTORM_ID 3
#define ATTENTION_ID 4
/* max number of slow path commands per port */
#define MAX_RAMRODS_PER_PORT 8
/* values for RX ETH CQE type field */
#define RX_ETH_CQE_TYPE_ETH_FASTPATH 0
#define RX_ETH_CQE_TYPE_ETH_RAMROD 1
/**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
@ -451,7 +374,7 @@
#define XSEMI_CLK1_RESUL_CHIP (1e-3)
#define SDM_TIMER_TICK_RESUL_CHIP (4*(1e-6))
#define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6))
/**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
@ -460,72 +383,28 @@
#define FW_LOG_LIST_SIZE 50
#define NUM_OF_PROTOCOLS 4
#define NUM_OF_SAFC_BITS 16
#define MAX_COS_NUMBER 4
#define FAIRNESS_COS_WRR_MODE 0
#define FAIRNESS_COS_ETS_MODE 1
/* Priority Flow Control (PFC) */
#define MAX_TRAFFIC_TYPES 8
#define MAX_PFC_PRIORITIES 8
#define MAX_PFC_TRAFFIC_TYPES 8
/* Available Traffic Types for Link Layer Flow Control */
#define LLFC_TRAFFIC_TYPE_NW 0
#define LLFC_TRAFFIC_TYPE_FCOE 1
#define LLFC_TRAFFIC_TYPE_ISCSI 2
/***************** START OF E2 INTEGRATION \
CODE***************************************/
#define LLFC_TRAFFIC_TYPE_NW_COS1_E2INTEG 3
/***************** END OF E2 INTEGRATION \
CODE***************************************/
#define LLFC_TRAFFIC_TYPE_MAX 4
/* used by array traffic_type_to_priority[] to mark traffic type \
that is not mapped to priority*/
#define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
#define LLFC_MODE_NONE 0
#define LLFC_MODE_PFC 1
#define LLFC_MODE_SAFC 2
#define DCB_DISABLED 0
#define DCB_ENABLED 1
#define UNKNOWN_ADDRESS 0
#define UNICAST_ADDRESS 1
#define MULTICAST_ADDRESS 2
#define BROADCAST_ADDRESS 3
#define SINGLE_FUNCTION 0
#define MULTI_FUNCTION_SD 1
#define MULTI_FUNCTION_SI 2
#define IP_V4 0
#define IP_V6 1
#define C_ERES_PER_PAGE \
(PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem)))
#define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1)
#define EVENT_RING_OPCODE_VF_PF_CHANNEL 0
#define EVENT_RING_OPCODE_FUNCTION_START 1
#define EVENT_RING_OPCODE_FUNCTION_STOP 2
#define EVENT_RING_OPCODE_CFC_DEL 3
#define EVENT_RING_OPCODE_CFC_DEL_WB 4
#define EVENT_RING_OPCODE_SET_MAC 5
#define EVENT_RING_OPCODE_STAT_QUERY 6
#define EVENT_RING_OPCODE_STOP_TRAFFIC 7
#define EVENT_RING_OPCODE_START_TRAFFIC 8
#define EVENT_RING_OPCODE_FORWARD_SETUP 9
#define STATS_QUERY_CMD_COUNT 16
#define VF_PF_CHANNEL_STATE_READY 0
#define VF_PF_CHANNEL_STATE_WAITING_FOR_ACK 1
#define NIV_LIST_TABLE_SIZE 4096
#define VF_PF_CHANNEL_STATE_MAX_NUMBER 2
#define INVALID_VNIC_ID 0xFF
#define UNDEF_IRO 0x80000000
#endif /* BNX2X_FW_DEFS_H */

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -15,98 +15,34 @@
#ifndef BNX2X_INIT_H
#define BNX2X_INIT_H
/* RAM0 size in bytes */
#define STORM_INTMEM_SIZE_E1 0x5800
#define STORM_INTMEM_SIZE_E1H 0x10000
#define STORM_INTMEM_SIZE(bp) ((CHIP_IS_E1(bp) ? STORM_INTMEM_SIZE_E1 : \
STORM_INTMEM_SIZE_E1H) / 4)
/* Init operation types and structures */
/* Common for both E1 and E1H */
#define OP_RD 0x1 /* read single register */
#define OP_WR 0x2 /* write single register */
#define OP_IW 0x3 /* write single register using mailbox */
#define OP_SW 0x4 /* copy a string to the device */
#define OP_SI 0x5 /* copy a string using mailbox */
#define OP_ZR 0x6 /* clear memory */
#define OP_ZP 0x7 /* unzip then copy with DMAE */
#define OP_WR_64 0x8 /* write 64 bit pattern */
#define OP_WB 0x9 /* copy a string using DMAE */
/* FPGA and EMUL specific operations */
#define OP_WR_EMUL 0xa /* write single register on Emulation */
#define OP_WR_FPGA 0xb /* write single register on FPGA */
#define OP_WR_ASIC 0xc /* write single register on ASIC */
/* Init stages */
/* Never reorder stages !!! */
#define COMMON_STAGE 0
#define PORT0_STAGE 1
#define PORT1_STAGE 2
#define FUNC0_STAGE 3
#define FUNC1_STAGE 4
#define FUNC2_STAGE 5
#define FUNC3_STAGE 6
#define FUNC4_STAGE 7
#define FUNC5_STAGE 8
#define FUNC6_STAGE 9
#define FUNC7_STAGE 10
#define STAGE_IDX_MAX 11
#define STAGE_START 0
#define STAGE_END 1
/* Indices of blocks */
#define PRS_BLOCK 0
#define SRCH_BLOCK 1
#define TSDM_BLOCK 2
#define TCM_BLOCK 3
#define BRB1_BLOCK 4
#define TSEM_BLOCK 5
#define PXPCS_BLOCK 6
#define EMAC0_BLOCK 7
#define EMAC1_BLOCK 8
#define DBU_BLOCK 9
#define MISC_BLOCK 10
#define DBG_BLOCK 11
#define NIG_BLOCK 12
#define MCP_BLOCK 13
#define UPB_BLOCK 14
#define CSDM_BLOCK 15
#define USDM_BLOCK 16
#define CCM_BLOCK 17
#define UCM_BLOCK 18
#define USEM_BLOCK 19
#define CSEM_BLOCK 20
#define XPB_BLOCK 21
#define DQ_BLOCK 22
#define TIMERS_BLOCK 23
#define XSDM_BLOCK 24
#define QM_BLOCK 25
#define PBF_BLOCK 26
#define XCM_BLOCK 27
#define XSEM_BLOCK 28
#define CDU_BLOCK 29
#define DMAE_BLOCK 30
#define PXP_BLOCK 31
#define CFC_BLOCK 32
#define HC_BLOCK 33
#define PXP2_BLOCK 34
#define MISC_AEU_BLOCK 35
#define PGLUE_B_BLOCK 36
#define IGU_BLOCK 37
#define ATC_BLOCK 38
#define QM_4PORT_BLOCK 39
#define XSEM_4PORT_BLOCK 40
enum {
OP_RD = 0x1, /* read a single register */
OP_WR, /* write a single register */
OP_SW, /* copy a string to the device */
OP_ZR, /* clear memory */
OP_ZP, /* unzip then copy with DMAE */
OP_WR_64, /* write 64 bit pattern */
OP_WB, /* copy a string using DMAE */
OP_WB_ZR, /* Clear a string using DMAE or indirect-wr */
/* Skip the following ops if all of the init modes don't match */
OP_IF_MODE_OR,
/* Skip the following ops if any of the init modes don't match */
OP_IF_MODE_AND,
OP_MAX
};
enum {
STAGE_START,
STAGE_END,
};
/* Returns the index of start or end of a specific block stage in ops array*/
#define BLOCK_OPS_IDX(block, stage, end) \
(2*(((block)*STAGE_IDX_MAX) + (stage)) + (end))
(2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
/* structs for the various opcodes */
struct raw_op {
u32 op:8;
u32 offset:24;
@ -116,7 +52,7 @@ struct raw_op {
struct op_read {
u32 op:8;
u32 offset:24;
u32 pad;
u32 val;
};
struct op_write {
@ -125,15 +61,15 @@ struct op_write {
u32 val;
};
struct op_string_write {
struct op_arr_write {
u32 op:8;
u32 offset:24;
#ifdef __LITTLE_ENDIAN
u16 data_off;
u16 data_len;
#else /* __BIG_ENDIAN */
#ifdef __BIG_ENDIAN
u16 data_len;
u16 data_off;
#else /* __LITTLE_ENDIAN */
u16 data_off;
u16 data_len;
#endif
};
@ -143,14 +79,210 @@ struct op_zero {
u32 len;
};
struct op_if_mode {
u32 op:8;
u32 cmd_offset:24;
u32 mode_bit_map;
};
union init_op {
struct op_read read;
struct op_write write;
struct op_string_write str_wr;
struct op_arr_write arr_wr;
struct op_zero zero;
struct raw_op raw;
struct op_if_mode if_mode;
};
/* Init Phases */
enum {
PHASE_COMMON,
PHASE_PORT0,
PHASE_PORT1,
PHASE_PF0,
PHASE_PF1,
PHASE_PF2,
PHASE_PF3,
PHASE_PF4,
PHASE_PF5,
PHASE_PF6,
PHASE_PF7,
NUM_OF_INIT_PHASES
};
/* Init Modes */
enum {
MODE_ASIC = 0x00000001,
MODE_FPGA = 0x00000002,
MODE_EMUL = 0x00000004,
MODE_E2 = 0x00000008,
MODE_E3 = 0x00000010,
MODE_PORT2 = 0x00000020,
MODE_PORT4 = 0x00000040,
MODE_SF = 0x00000080,
MODE_MF = 0x00000100,
MODE_MF_SD = 0x00000200,
MODE_MF_SI = 0x00000400,
MODE_MF_NIV = 0x00000800,
MODE_E3_A0 = 0x00001000,
MODE_E3_B0 = 0x00002000,
MODE_COS_BC = 0x00004000,
MODE_COS3 = 0x00008000,
MODE_COS6 = 0x00010000,
MODE_LITTLE_ENDIAN = 0x00020000,
MODE_BIG_ENDIAN = 0x00040000,
};
/* Init Blocks */
enum {
BLOCK_ATC,
BLOCK_BRB1,
BLOCK_CCM,
BLOCK_CDU,
BLOCK_CFC,
BLOCK_CSDM,
BLOCK_CSEM,
BLOCK_DBG,
BLOCK_DMAE,
BLOCK_DORQ,
BLOCK_HC,
BLOCK_IGU,
BLOCK_MISC,
BLOCK_NIG,
BLOCK_PBF,
BLOCK_PGLUE_B,
BLOCK_PRS,
BLOCK_PXP2,
BLOCK_PXP,
BLOCK_QM,
BLOCK_SRC,
BLOCK_TCM,
BLOCK_TM,
BLOCK_TSDM,
BLOCK_TSEM,
BLOCK_UCM,
BLOCK_UPB,
BLOCK_USDM,
BLOCK_USEM,
BLOCK_XCM,
BLOCK_XPB,
BLOCK_XSDM,
BLOCK_XSEM,
BLOCK_MISC_AEU,
NUM_OF_INIT_BLOCKS
};
/* QM queue numbers */
#define BNX2X_ETH_Q 0
#define BNX2X_TOE_Q 3
#define BNX2X_TOE_ACK_Q 6
#define BNX2X_ISCSI_Q 9
#define BNX2X_ISCSI_ACK_Q 8
#define BNX2X_FCOE_Q 10
/* Vnics per mode */
#define BNX2X_PORT2_MODE_NUM_VNICS 4
#define BNX2X_PORT4_MODE_NUM_VNICS 2
/* COS offset for port1 in E3 B0 4port mode */
#define BNX2X_E3B0_PORT1_COS_OFFSET 3
/* QM Register addresses */
#define BNX2X_Q_VOQ_REG_ADDR(pf_q_num)\
(QM_REG_QVOQIDX_0 + 4 * (pf_q_num))
#define BNX2X_VOQ_Q_REG_ADDR(cos, pf_q_num)\
(QM_REG_VOQQMASK_0_LSB + 4 * ((cos) * 2 + ((pf_q_num) >> 5)))
#define BNX2X_Q_CMDQ_REG_ADDR(pf_q_num)\
(QM_REG_BYTECRDCMDQ_0 + 4 * ((pf_q_num) >> 4))
/* extracts the QM queue number for the specified port and vnic */
#define BNX2X_PF_Q_NUM(q_num, port, vnic)\
((((port) << 1) | (vnic)) * 16 + (q_num))
/* Maps the specified queue to the specified COS */
static inline void bnx2x_map_q_cos(struct bnx2x *bp, u32 q_num, u32 new_cos)
{
/* find current COS mapping */
u32 curr_cos = REG_RD(bp, QM_REG_QVOQIDX_0 + q_num * 4);
/* check if queue->COS mapping has changed */
if (curr_cos != new_cos) {
u32 num_vnics = BNX2X_PORT2_MODE_NUM_VNICS;
u32 reg_addr, reg_bit_map, vnic;
/* update parameters for 4port mode */
if (INIT_MODE_FLAGS(bp) & MODE_PORT4) {
num_vnics = BNX2X_PORT4_MODE_NUM_VNICS;
if (BP_PORT(bp)) {
curr_cos += BNX2X_E3B0_PORT1_COS_OFFSET;
new_cos += BNX2X_E3B0_PORT1_COS_OFFSET;
}
}
/* change queue mapping for each VNIC */
for (vnic = 0; vnic < num_vnics; vnic++) {
u32 pf_q_num =
BNX2X_PF_Q_NUM(q_num, BP_PORT(bp), vnic);
u32 q_bit_map = 1 << (pf_q_num & 0x1f);
/* overwrite queue->VOQ mapping */
REG_WR(bp, BNX2X_Q_VOQ_REG_ADDR(pf_q_num), new_cos);
/* clear queue bit from current COS bit map */
reg_addr = BNX2X_VOQ_Q_REG_ADDR(curr_cos, pf_q_num);
reg_bit_map = REG_RD(bp, reg_addr);
REG_WR(bp, reg_addr, reg_bit_map & (~q_bit_map));
/* set queue bit in new COS bit map */
reg_addr = BNX2X_VOQ_Q_REG_ADDR(new_cos, pf_q_num);
reg_bit_map = REG_RD(bp, reg_addr);
REG_WR(bp, reg_addr, reg_bit_map | q_bit_map);
/* set/clear queue bit in command-queue bit map
(E2/E3A0 only, valid COS values are 0/1) */
if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) {
reg_addr = BNX2X_Q_CMDQ_REG_ADDR(pf_q_num);
reg_bit_map = REG_RD(bp, reg_addr);
q_bit_map = 1 << (2 * (pf_q_num & 0xf));
reg_bit_map = new_cos ?
(reg_bit_map | q_bit_map) :
(reg_bit_map & (~q_bit_map));
REG_WR(bp, reg_addr, reg_bit_map);
}
}
}
}
/* Configures the QM according to the specified per-traffic-type COSes */
static inline void bnx2x_dcb_config_qm(struct bnx2x *bp,
struct priority_cos *traffic_cos)
{
bnx2x_map_q_cos(bp, BNX2X_FCOE_Q,
traffic_cos[LLFC_TRAFFIC_TYPE_FCOE].cos);
bnx2x_map_q_cos(bp, BNX2X_ISCSI_Q,
traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
if (INIT_MODE_FLAGS(bp) & MODE_COS_BC) {
/* required only in backward compatible COS mode */
bnx2x_map_q_cos(bp, BNX2X_ETH_Q,
traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
bnx2x_map_q_cos(bp, BNX2X_TOE_Q,
traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
bnx2x_map_q_cos(bp, BNX2X_TOE_ACK_Q,
traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
bnx2x_map_q_cos(bp, BNX2X_ISCSI_ACK_Q,
traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
}
}
/* Returns the index of start or end of a specific block stage in ops array*/
#define BLOCK_OPS_IDX(block, stage, end) \
(2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
#define INITOP_SET 0 /* set the HW directly */
#define INITOP_CLEAR 1 /* clear the HW directly */
#define INITOP_INIT 2 /* set the init-value array */

Просмотреть файл

@ -15,13 +15,39 @@
#ifndef BNX2X_INIT_OPS_H
#define BNX2X_INIT_OPS_H
#ifndef BP_ILT
#define BP_ILT(bp) NULL
#endif
#ifndef BP_FUNC
#define BP_FUNC(bp) 0
#endif
#ifndef BP_PORT
#define BP_PORT(bp) 0
#endif
#ifndef BNX2X_ILT_FREE
#define BNX2X_ILT_FREE(x, y, sz)
#endif
#ifndef BNX2X_ILT_ZALLOC
#define BNX2X_ILT_ZALLOC(x, y, sz)
#endif
#ifndef ILOG2
#define ILOG2(x) x
#endif
static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
u32 addr, u32 len);
static void bnx2x_write_dmae_phys_len(struct bnx2x *bp,
dma_addr_t phys_addr, u32 addr,
u32 len);
static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
u32 len)
static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr,
const u32 *data, u32 len)
{
u32 i;
@ -29,24 +55,32 @@ static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
REG_WR(bp, addr + i*4, data[i]);
}
static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr, const u32 *data,
u32 len)
static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr,
const u32 *data, u32 len)
{
u32 i;
for (i = 0; i < len; i++)
REG_WR_IND(bp, addr + i*4, data[i]);
bnx2x_reg_wr_ind(bp, addr + i*4, data[i]);
}
static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len)
static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len,
u8 wb)
{
if (bp->dmae_ready)
bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
else if (wb)
/*
* Wide bus registers with no dmae need to be written
* using indirect write.
*/
bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
else
bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len);
}
static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill,
u32 len, u8 wb)
{
u32 buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4));
u32 buf_len32 = buf_len/4;
@ -57,12 +91,20 @@ static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
for (i = 0; i < len; i += buf_len32) {
u32 cur_len = min(buf_len32, len - i);
bnx2x_write_big_buf(bp, addr + i*4, cur_len);
bnx2x_write_big_buf(bp, addr + i*4, cur_len, wb);
}
}
static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data,
u32 len64)
static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len)
{
if (bp->dmae_ready)
bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
else
bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
}
static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr,
const u32 *data, u32 len64)
{
u32 buf_len32 = FW_BUF_SIZE/4;
u32 len = len64*2;
@ -82,7 +124,7 @@ static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data,
for (i = 0; i < len; i += buf_len32) {
u32 cur_len = min(buf_len32, len - i);
bnx2x_write_big_buf(bp, addr + i*4, cur_len);
bnx2x_write_big_buf_wb(bp, addr + i*4, cur_len);
}
}
@ -100,7 +142,8 @@ static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data,
#define IF_IS_PRAM_ADDR(base, addr) \
if (((base) <= (addr)) && ((base) + 0x40000 >= (addr)))
static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr, const u8 *data)
static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr,
const u8 *data)
{
IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr)
data = INIT_TSEM_INT_TABLE_DATA(bp);
@ -129,31 +172,17 @@ static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr, const u8 *data)
return data;
}
static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len)
static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr,
const u32 *data, u32 len)
{
if (bp->dmae_ready)
bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
VIRT_WR_DMAE_LEN(bp, data, addr, len, 0);
else
bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
}
static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, const u32 *data,
u32 len)
{
const u32 *old_data = data;
data = (const u32 *)bnx2x_sel_blob(bp, addr, (const u8 *)data);
if (bp->dmae_ready) {
if (old_data != data)
VIRT_WR_DMAE_LEN(bp, data, addr, len, 1);
else
VIRT_WR_DMAE_LEN(bp, data, addr, len, 0);
} else
bnx2x_init_ind_wr(bp, addr, data, len);
}
static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo, u32 val_hi)
static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo,
u32 val_hi)
{
u32 wb_write[2];
@ -161,8 +190,8 @@ static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo, u32 val_hi)
wb_write[1] = val_hi;
REG_WR_DMAE_LEN(bp, reg, wb_write, 2);
}
static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, u32 blob_off)
static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len,
u32 blob_off)
{
const u8 *data = NULL;
int rc;
@ -186,39 +215,33 @@ static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, u32 blob_off)
static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
{
u16 op_start =
INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, STAGE_START)];
INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
STAGE_START)];
u16 op_end =
INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, STAGE_END)];
INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
STAGE_END)];
union init_op *op;
int hw_wr;
u32 i, op_type, addr, len;
u32 op_idx, op_type, addr, len;
const u32 *data, *data_base;
/* If empty block */
if (op_start == op_end)
return;
if (CHIP_REV_IS_FPGA(bp))
hw_wr = OP_WR_FPGA;
else if (CHIP_REV_IS_EMUL(bp))
hw_wr = OP_WR_EMUL;
else
hw_wr = OP_WR_ASIC;
data_base = INIT_DATA(bp);
for (i = op_start; i < op_end; i++) {
for (op_idx = op_start; op_idx < op_end; op_idx++) {
op = (union init_op *)&(INIT_OPS(bp)[i]);
op_type = op->str_wr.op;
addr = op->str_wr.offset;
len = op->str_wr.data_len;
data = data_base + op->str_wr.data_off;
/* HW/EMUL specific */
if ((op_type > OP_WB) && (op_type == hw_wr))
op_type = OP_WR;
op = (union init_op *)&(INIT_OPS(bp)[op_idx]);
/* Get generic data */
op_type = op->raw.op;
addr = op->raw.offset;
/* Get data that's used for OP_SW, OP_WB, OP_FW, OP_ZP and
* OP_WR64 (we assume that op_arr_write and op_write have the
* same structure).
*/
len = op->arr_wr.data_len;
data = data_base + op->arr_wr.data_off;
switch (op_type) {
case OP_RD:
@ -233,21 +256,39 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
case OP_WB:
bnx2x_init_wr_wb(bp, addr, data, len);
break;
case OP_SI:
bnx2x_init_ind_wr(bp, addr, data, len);
break;
case OP_ZR:
bnx2x_init_fill(bp, addr, 0, op->zero.len);
bnx2x_init_fill(bp, addr, 0, op->zero.len, 0);
break;
case OP_WB_ZR:
bnx2x_init_fill(bp, addr, 0, op->zero.len, 1);
break;
case OP_ZP:
bnx2x_init_wr_zp(bp, addr, len,
op->str_wr.data_off);
op->arr_wr.data_off);
break;
case OP_WR_64:
bnx2x_init_wr_64(bp, addr, data, len);
break;
case OP_IF_MODE_AND:
/* if any of the flags doesn't match, skip the
* conditional block.
*/
if ((INIT_MODE_FLAGS(bp) &
op->if_mode.mode_bit_map) !=
op->if_mode.mode_bit_map)
op_idx += op->if_mode.cmd_offset;
break;
case OP_IF_MODE_OR:
/* if all the flags don't match, skip the conditional
* block.
*/
if ((INIT_MODE_FLAGS(bp) &
op->if_mode.mode_bit_map) == 0)
op_idx += op->if_mode.cmd_offset;
break;
default:
/* happens whenever an op is of a diff HW */
/* Should never get here! */
break;
}
}
@ -417,7 +458,8 @@ static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
PXP2_REG_RQ_BW_WR_UBOUND30}
};
static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order)
static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order,
int w_order)
{
u32 val, i;
@ -491,19 +533,21 @@ static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order)
if ((CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) && (r_order == MAX_RD_ORD))
REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
if (CHIP_IS_E2(bp))
if (CHIP_IS_E3(bp))
REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order));
else if (CHIP_IS_E2(bp))
REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order));
else
REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
if (CHIP_IS_E1H(bp) || CHIP_IS_E2(bp)) {
if (!CHIP_IS_E1(bp)) {
/* MPS w_order optimal TH presently TH
* 128 0 0 2
* 256 1 1 3
* >=512 2 2 3
*/
/* DMAE is special */
if (CHIP_IS_E2(bp)) {
if (!CHIP_IS_E1H(bp)) {
/* E2 can use optimal TH */
val = w_order;
REG_WR(bp, PXP2_REG_WR_DMAE_MPS, val);
@ -557,8 +601,8 @@ static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order)
#define ILT_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
#define ILT_RANGE(f, l) (((l) << 10) | f)
static int bnx2x_ilt_line_mem_op(struct bnx2x *bp, struct ilt_line *line,
u32 size, u8 memop)
static int bnx2x_ilt_line_mem_op(struct bnx2x *bp,
struct ilt_line *line, u32 size, u8 memop)
{
if (memop == ILT_MEMOP_FREE) {
BNX2X_ILT_FREE(line->page, line->page_mapping, line->size);
@ -572,7 +616,8 @@ static int bnx2x_ilt_line_mem_op(struct bnx2x *bp, struct ilt_line *line,
}
static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, u8 memop)
static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num,
u8 memop)
{
int i, rc;
struct bnx2x_ilt *ilt = BP_ILT(bp);
@ -617,8 +662,8 @@ static void bnx2x_ilt_line_wr(struct bnx2x *bp, int abs_idx,
bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping));
}
static void bnx2x_ilt_line_init_op(struct bnx2x *bp, struct bnx2x_ilt *ilt,
int idx, u8 initop)
static void bnx2x_ilt_line_init_op(struct bnx2x *bp,
struct bnx2x_ilt *ilt, int idx, u8 initop)
{
dma_addr_t null_mapping;
int abs_idx = ilt->start_line + idx;
@ -733,7 +778,7 @@ static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
}
static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
u32 psz_reg, u8 initop)
u32 psz_reg, u8 initop)
{
struct bnx2x_ilt *ilt = BP_ILT(bp);
struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
@ -848,7 +893,8 @@ static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
/* Initialize T2 */
for (i = 0; i < src_cid_count-1; i++)
t2[i].next = (u64)(t2_mapping + (i+1)*sizeof(struct src_ent));
t2[i].next = (u64)(t2_mapping +
(i+1)*sizeof(struct src_ent));
/* tell the searcher where the T2 table is */
REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, src_cid_count);

Просмотреть файл

@ -25,6 +25,8 @@
#include <linux/mutex.h>
#include "bnx2x.h"
#include "bnx2x_cmn.h"
/********************************************************/
#define ETH_HLEN 14
@ -874,6 +876,54 @@ static void bnx2x_update_pfc_brb(struct link_params *params,
}
}
/******************************************************************************
* Description:
* This function is needed because NIG ARB_CREDIT_WEIGHT_X are
* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
******************************************************************************/
int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp,
u8 cos_entry,
u32 priority_mask, u8 port)
{
u32 nig_reg_rx_priority_mask_add = 0;
switch (cos_entry) {
case 0:
nig_reg_rx_priority_mask_add = (port) ?
NIG_REG_P1_RX_COS0_PRIORITY_MASK :
NIG_REG_P0_RX_COS0_PRIORITY_MASK;
break;
case 1:
nig_reg_rx_priority_mask_add = (port) ?
NIG_REG_P1_RX_COS1_PRIORITY_MASK :
NIG_REG_P0_RX_COS1_PRIORITY_MASK;
break;
case 2:
nig_reg_rx_priority_mask_add = (port) ?
NIG_REG_P1_RX_COS2_PRIORITY_MASK :
NIG_REG_P0_RX_COS2_PRIORITY_MASK;
break;
case 3:
if (port)
return -EINVAL;
nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS3_PRIORITY_MASK;
break;
case 4:
if (port)
return -EINVAL;
nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS4_PRIORITY_MASK;
break;
case 5:
if (port)
return -EINVAL;
nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS5_PRIORITY_MASK;
break;
}
REG_WR(bp, nig_reg_rx_priority_mask_add, priority_mask);
return 0;
}
static void bnx2x_update_pfc_nig(struct link_params *params,
struct link_vars *vars,
struct bnx2x_nig_brb_pfc_port_params *nig_params)
@ -958,15 +1008,12 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT, val);
if (nig_params) {
u8 i = 0;
pkt_priority_to_cos = nig_params->pkt_priority_to_cos;
REG_WR(bp, port ? NIG_REG_P1_RX_COS0_PRIORITY_MASK :
NIG_REG_P0_RX_COS0_PRIORITY_MASK,
nig_params->rx_cos0_priority_mask);
REG_WR(bp, port ? NIG_REG_P1_RX_COS1_PRIORITY_MASK :
NIG_REG_P0_RX_COS1_PRIORITY_MASK,
nig_params->rx_cos1_priority_mask);
for (i = 0; i < nig_params->num_of_rx_cos_priority_mask; i++)
bnx2x_pfc_nig_rx_priority_mask(bp, i,
nig_params->rx_cos_priority_mask[i], port);
REG_WR(bp, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 :
NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0,
@ -1824,26 +1871,6 @@ void bnx2x_link_status_update(struct link_params *params,
vars->line_speed = SPEED_10000;
break;
case LINK_12GTFD:
vars->line_speed = SPEED_12000;
break;
case LINK_12_5GTFD:
vars->line_speed = SPEED_12500;
break;
case LINK_13GTFD:
vars->line_speed = SPEED_13000;
break;
case LINK_15GTFD:
vars->line_speed = SPEED_15000;
break;
case LINK_16GTFD:
vars->line_speed = SPEED_16000;
break;
default:
break;
}
@ -2667,31 +2694,6 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
vars->link_status |= LINK_10GTFD;
break;
case GP_STATUS_12G_HIG:
new_line_speed = SPEED_12000;
vars->link_status |= LINK_12GTFD;
break;
case GP_STATUS_12_5G:
new_line_speed = SPEED_12500;
vars->link_status |= LINK_12_5GTFD;
break;
case GP_STATUS_13G:
new_line_speed = SPEED_13000;
vars->link_status |= LINK_13GTFD;
break;
case GP_STATUS_15G:
new_line_speed = SPEED_15000;
vars->link_status |= LINK_15GTFD;
break;
case GP_STATUS_16G:
new_line_speed = SPEED_16000;
vars->link_status |= LINK_16GTFD;
break;
default:
DP(NETIF_MSG_LINK,
"link speed unsupported gp_status 0x%x\n",

Просмотреть файл

@ -81,6 +81,7 @@
#define PFC_BRB_FULL_LB_XOFF_THRESHOLD 170
#define PFC_BRB_FULL_LB_XON_THRESHOLD 250
#define MAXVAL(a, b) (((a) > (b)) ? (a) : (b))
/***********************************************************/
/* Structs */
/***********************************************************/
@ -262,6 +263,8 @@ struct link_vars {
#define MAC_TYPE_NONE 0
#define MAC_TYPE_EMAC 1
#define MAC_TYPE_BMAC 2
#define MAC_TYPE_UMAC 3
#define MAC_TYPE_XMAC 4
u8 phy_link_up; /* internal phy link indication */
u8 link_up;
@ -363,6 +366,20 @@ int bnx2x_phy_probe(struct link_params *params);
u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base,
u32 shmem2_base, u8 port);
/* DCBX structs */
/* Number of maximum COS per chip */
#define DCBX_E2E3_MAX_NUM_COS (2)
#define DCBX_E3B0_MAX_NUM_COS_PORT0 (6)
#define DCBX_E3B0_MAX_NUM_COS_PORT1 (3)
#define DCBX_E3B0_MAX_NUM_COS ( \
MAXVAL(DCBX_E3B0_MAX_NUM_COS_PORT0, \
DCBX_E3B0_MAX_NUM_COS_PORT1))
#define DCBX_MAX_NUM_COS ( \
MAXVAL(DCBX_E3B0_MAX_NUM_COS, \
DCBX_E2E3_MAX_NUM_COS))
/* PFC port configuration params */
struct bnx2x_nig_brb_pfc_port_params {
/* NIG */
@ -370,8 +387,8 @@ struct bnx2x_nig_brb_pfc_port_params {
u32 llfc_out_en;
u32 llfc_enable;
u32 pkt_priority_to_cos;
u32 rx_cos0_priority_mask;
u32 rx_cos1_priority_mask;
u8 num_of_rx_cos_priority_mask;
u32 rx_cos_priority_mask[DCBX_MAX_NUM_COS];
u32 llfc_high_priority_classes;
u32 llfc_low_priority_classes;
/* BRB */

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -422,6 +422,7 @@
#define CFC_REG_NUM_LCIDS_ALLOC 0x104020
/* [R 9] Number of Arriving LCIDs in Link List Block */
#define CFC_REG_NUM_LCIDS_ARRIVING 0x104004
#define CFC_REG_NUM_LCIDS_INSIDE_PF 0x104120
/* [R 9] Number of Leaving LCIDs in Link List Block */
#define CFC_REG_NUM_LCIDS_LEAVING 0x104018
#define CFC_REG_WEAK_ENABLE_PF 0x104124
@ -783,6 +784,7 @@
/* [RW 3] The number of simultaneous outstanding requests to Context Fetch
Interface. */
#define DORQ_REG_OUTST_REQ 0x17003c
#define DORQ_REG_PF_USAGE_CNT 0x1701d0
#define DORQ_REG_REGN 0x170038
/* [R 4] Current value of response A counter credit. Initial credit is
configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd
@ -1645,6 +1647,17 @@
#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4)
#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2)
#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3)
/* [RW 5] MDIO PHY Address. The WC uses this address to determine whether or
* not it is the recipient of the message on the MDIO interface. The value
* is compared to the value on ctrl_md_devad. Drives output
* misc_xgxs0_phy_addr. Global register. */
#define MISC_REG_WC0_CTRL_PHY_ADDR 0xa9cc
/* [RW 32] 1 [47] Packet Size = 64 Write to this register write bits 31:0.
* Reads from this register will clear bits 31:0. */
#define MSTAT_REG_RX_STAT_GR64_LO 0x200
/* [RW 32] 1 [00] Tx Good Packet Count Write to this register write bits
* 31:0. Reads from this register will clear bits 31:0. */
#define MSTAT_REG_TX_STAT_GTXPOK_LO 0
#define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN (0x1<<0)
#define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN (0x1<<0)
#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1<<0)
@ -1838,6 +1851,10 @@
#define NIG_REG_LLH1_FUNC_MEM 0x161c0
#define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160
#define NIG_REG_LLH1_FUNC_MEM_SIZE 16
/* [RW 1] When this bit is set; the LLH will classify the packet before
* sending it to the BRB or calculating WoL on it. This bit controls port 1
* only. The legacy llh_multi_function_mode bit controls port 0. */
#define NIG_REG_LLH1_MF_MODE 0x18614
/* [RW 8] init credit counter for port1 in LLH */
#define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564
#define NIG_REG_LLH1_XCM_MASK 0x10134
@ -1889,6 +1906,26 @@
* than one bit may be set; allowing multiple priorities to be mapped to one
* COS. */
#define NIG_REG_P0_RX_COS1_PRIORITY_MASK 0x1805c
/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A
* priority is mapped to COS 2 when the corresponding mask bit is 1. More
* than one bit may be set; allowing multiple priorities to be mapped to one
* COS. */
#define NIG_REG_P0_RX_COS2_PRIORITY_MASK 0x186b0
/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 3. A
* priority is mapped to COS 3 when the corresponding mask bit is 1. More
* than one bit may be set; allowing multiple priorities to be mapped to one
* COS. */
#define NIG_REG_P0_RX_COS3_PRIORITY_MASK 0x186b4
/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 4. A
* priority is mapped to COS 4 when the corresponding mask bit is 1. More
* than one bit may be set; allowing multiple priorities to be mapped to one
* COS. */
#define NIG_REG_P0_RX_COS4_PRIORITY_MASK 0x186b8
/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 5. A
* priority is mapped to COS 5 when the corresponding mask bit is 1. More
* than one bit may be set; allowing multiple priorities to be mapped to one
* COS. */
#define NIG_REG_P0_RX_COS5_PRIORITY_MASK 0x186bc
/* [RW 15] Specify which of the credit registers the client is to be mapped
* to. Bits[2:0] are for client 0; bits [14:12] are for client 4. For
* clients that are not subject to WFQ credit blocking - their
@ -1926,6 +1963,9 @@
* for management at priority 0; debug traffic at priorities 1 and 2; COS0
* traffic at priority 3; and COS1 traffic at priority 4. */
#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT 0x180e4
/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
* Ethernet header. */
#define NIG_REG_P1_HDRS_AFTER_BASIC 0x1818c
#define NIG_REG_P1_LLH_FUNC_MEM2 0x184c0
#define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460
/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for
@ -1944,6 +1984,11 @@
* than one bit may be set; allowing multiple priorities to be mapped to one
* COS. */
#define NIG_REG_P1_RX_COS1_PRIORITY_MASK 0x181b0
/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A
* priority is mapped to COS 2 when the corresponding mask bit is 1. More
* than one bit may be set; allowing multiple priorities to be mapped to one
* COS. */
#define NIG_REG_P1_RX_COS2_PRIORITY_MASK 0x186f8
/* [RW 1] Pause enable for port0. This register may get 1 only when
~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same
port */
@ -2033,6 +2078,15 @@
#define PBF_REG_COS1_UPPER_BOUND 0x15c060
/* [RW 31] The weight of COS1 in the ETS command arbiter. */
#define PBF_REG_COS1_WEIGHT 0x15c058
/* [R 11] Current credit for the LB queue in the tx port buffers in 16 byte
* lines. */
#define PBF_REG_CREDIT_LB_Q 0x140338
/* [R 11] Current credit for queue 0 in the tx port buffers in 16 byte
* lines. */
#define PBF_REG_CREDIT_Q0 0x14033c
/* [R 11] Current credit for queue 1 in the tx port buffers in 16 byte
* lines. */
#define PBF_REG_CREDIT_Q1 0x140340
/* [RW 1] Disable processing further tasks from port 0 (after ending the
current task in process). */
#define PBF_REG_DISABLE_NEW_TASK_PROC_P0 0x14005c
@ -2050,14 +2104,25 @@
/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
* Ethernet header. */
#define PBF_REG_HDRS_AFTER_BASIC 0x15c0a8
/* [RW 1] Indicates which COS is conncted to the highest priority in the
* command arbiter. */
/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */
#define PBF_REG_HDRS_AFTER_TAG_0 0x15c0b8
/* [R 1] Removed for E3 B0 - Indicates which COS is conncted to the highest
* priority in the command arbiter. */
#define PBF_REG_HIGH_PRIORITY_COS_NUM 0x15c04c
#define PBF_REG_IF_ENABLE_REG 0x140044
/* [RW 1] Init bit. When set the initial credits are copied to the credit
registers (except the port credits). Should be set and then reset after
the configuration of the block has ended. */
#define PBF_REG_INIT 0x140000
/* [RW 11] Initial credit for the LB queue in the tx port buffers in 16 byte
* lines. */
#define PBF_REG_INIT_CRD_LB_Q 0x15c248
/* [RW 11] Initial credit for queue 0 in the tx port buffers in 16 byte
* lines. */
#define PBF_REG_INIT_CRD_Q0 0x15c230
/* [RW 11] Initial credit for queue 1 in the tx port buffers in 16 byte
* lines. */
#define PBF_REG_INIT_CRD_Q1 0x15c234
/* [RW 1] Init bit for port 0. When set the initial credit of port 0 is
copied to the credit register. Should be set and then reset after the
configuration of the port has ended. */
@ -2070,6 +2135,15 @@
copied to the credit register. Should be set and then reset after the
configuration of the port has ended. */
#define PBF_REG_INIT_P4 0x14000c
/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
* the LB queue. Reset upon init. */
#define PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q 0x140354
/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
* queue 0. Reset upon init. */
#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 0x140358
/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
* queue 1. Reset upon init. */
#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 0x14035c
/* [RW 1] Enable for mac interface 0. */
#define PBF_REG_MAC_IF0_ENABLE 0x140030
/* [RW 1] Enable for mac interface 1. */
@ -2090,24 +2164,49 @@
/* [RW 11] Initial credit for port 0 in the tx port buffers in 16 byte
lines. */
#define PBF_REG_P0_INIT_CRD 0x1400d0
/* [RW 1] Indication that pause is enabled for port 0. */
#define PBF_REG_P0_PAUSE_ENABLE 0x140014
/* [R 8] Number of tasks in port 0 task queue. */
/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
* port 0. Reset upon init. */
#define PBF_REG_P0_INTERNAL_CRD_FREED_CNT 0x140308
/* [R 1] Removed for E3 B0 - Indication that pause is enabled for port 0. */
#define PBF_REG_P0_PAUSE_ENABLE 0x140014
/* [R 8] Removed for E3 B0 - Number of tasks in port 0 task queue. */
#define PBF_REG_P0_TASK_CNT 0x140204
/* [R 11] Current credit for port 1 in the tx port buffers in 16 byte lines. */
/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
* freed from the task queue of port 0. Reset upon init. */
#define PBF_REG_P0_TQ_LINES_FREED_CNT 0x1402f0
/* [R 12] Number of 8 bytes lines occupied in the task queue of port 0. */
#define PBF_REG_P0_TQ_OCCUPANCY 0x1402fc
/* [R 11] Removed for E3 B0 - Current credit for port 1 in the tx port
* buffers in 16 byte lines. */
#define PBF_REG_P1_CREDIT 0x140208
/* [RW 11] Initial credit for port 1 in the tx port buffers in 16 byte
lines. */
/* [R 11] Removed for E3 B0 - Initial credit for port 0 in the tx port
* buffers in 16 byte lines. */
#define PBF_REG_P1_INIT_CRD 0x1400d4
/* [R 8] Number of tasks in port 1 task queue. */
/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
* port 1. Reset upon init. */
#define PBF_REG_P1_INTERNAL_CRD_FREED_CNT 0x14030c
/* [R 8] Removed for E3 B0 - Number of tasks in port 1 task queue. */
#define PBF_REG_P1_TASK_CNT 0x14020c
/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
* freed from the task queue of port 1. Reset upon init. */
#define PBF_REG_P1_TQ_LINES_FREED_CNT 0x1402f4
/* [R 12] Number of 8 bytes lines occupied in the task queue of port 1. */
#define PBF_REG_P1_TQ_OCCUPANCY 0x140300
/* [R 11] Current credit for port 4 in the tx port buffers in 16 byte lines. */
#define PBF_REG_P4_CREDIT 0x140210
/* [RW 11] Initial credit for port 4 in the tx port buffers in 16 byte
lines. */
#define PBF_REG_P4_INIT_CRD 0x1400e0
/* [R 8] Number of tasks in port 4 task queue. */
/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
* port 4. Reset upon init. */
#define PBF_REG_P4_INTERNAL_CRD_FREED_CNT 0x140310
/* [R 8] Removed for E3 B0 - Number of tasks in port 4 task queue. */
#define PBF_REG_P4_TASK_CNT 0x140214
/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
* freed from the task queue of port 4. Reset upon init. */
#define PBF_REG_P4_TQ_LINES_FREED_CNT 0x1402f8
/* [R 12] Number of 8 bytes lines occupied in the task queue of port 4. */
#define PBF_REG_P4_TQ_OCCUPANCY 0x140304
/* [RW 5] Interrupt mask register #0 read/write */
#define PBF_REG_PBF_INT_MASK 0x1401d4
/* [R 5] Interrupt register #0 read */
@ -2116,6 +2215,27 @@
#define PBF_REG_PBF_PRTY_MASK 0x1401e4
/* [RC 20] Parity register #0 read clear */
#define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc
/* [RW 16] The Ethernet type value for L2 tag 0 */
#define PBF_REG_TAG_ETHERTYPE_0 0x15c090
/* [RW 4] The length of the info field for L2 tag 0. The length is between
* 2B and 14B; in 2B granularity */
#define PBF_REG_TAG_LEN_0 0x15c09c
/* [R 32] Cyclic counter for number of 8 byte lines freed from the LB task
* queue. Reset upon init. */
#define PBF_REG_TQ_LINES_FREED_CNT_LB_Q 0x14038c
/* [R 32] Cyclic counter for number of 8 byte lines freed from the task
* queue 0. Reset upon init. */
#define PBF_REG_TQ_LINES_FREED_CNT_Q0 0x140390
/* [R 32] Cyclic counter for number of 8 byte lines freed from task queue 1.
* Reset upon init. */
#define PBF_REG_TQ_LINES_FREED_CNT_Q1 0x140394
/* [R 13] Number of 8 bytes lines occupied in the task queue of the LB
* queue. */
#define PBF_REG_TQ_OCCUPANCY_LB_Q 0x1403a8
/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 0. */
#define PBF_REG_TQ_OCCUPANCY_Q0 0x1403ac
/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 1. */
#define PBF_REG_TQ_OCCUPANCY_Q1 0x1403b0
#define PB_REG_CONTROL 0
/* [RW 2] Interrupt mask register #0 read/write */
#define PB_REG_PB_INT_MASK 0x28
@ -2445,10 +2565,24 @@
/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
* Ethernet header. */
#define PRS_REG_HDRS_AFTER_BASIC 0x40238
/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
* Ethernet header for port 0 packets. */
#define PRS_REG_HDRS_AFTER_BASIC_PORT_0 0x40270
#define PRS_REG_HDRS_AFTER_BASIC_PORT_1 0x40290
/* [R 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */
#define PRS_REG_HDRS_AFTER_TAG_0 0x40248
/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 for
* port 0 packets */
#define PRS_REG_HDRS_AFTER_TAG_0_PORT_0 0x40280
#define PRS_REG_HDRS_AFTER_TAG_0_PORT_1 0x402a0
/* [RW 4] The increment value to send in the CFC load request message */
#define PRS_REG_INC_VALUE 0x40048
/* [RW 6] Bit-map indicating which headers must appear in the packet */
#define PRS_REG_MUST_HAVE_HDRS 0x40254
/* [RW 6] Bit-map indicating which headers must appear in the packet for
* port 0 packets */
#define PRS_REG_MUST_HAVE_HDRS_PORT_0 0x4028c
#define PRS_REG_MUST_HAVE_HDRS_PORT_1 0x402ac
#define PRS_REG_NIC_MODE 0x40138
/* [RW 8] The 8-bit event ID for cases where there is no match on the
connection. Used in packet start message to TCM. */
@ -2497,6 +2631,11 @@
#define PRS_REG_SERIAL_NUM_STATUS_MSB 0x40158
/* [R 4] debug only: SRC current credit. Transaction based. */
#define PRS_REG_SRC_CURRENT_CREDIT 0x4016c
/* [RW 16] The Ethernet type value for L2 tag 0 */
#define PRS_REG_TAG_ETHERTYPE_0 0x401d4
/* [RW 4] The length of the info field for L2 tag 0. The length is between
* 2B and 14B; in 2B granularity */
#define PRS_REG_TAG_LEN_0 0x4022c
/* [R 8] debug only: TCM current credit. Cycle based. */
#define PRS_REG_TCM_CURRENT_CREDIT 0x40160
/* [R 8] debug only: TSDM current credit. Transaction based. */
@ -3081,6 +3220,7 @@
#define QM_REG_BYTECREDITAFULLTHR 0x168094
/* [RW 4] The initial credit for interface */
#define QM_REG_CMINITCRD_0 0x1680cc
#define QM_REG_BYTECRDCMDQ_0 0x16e6e8
#define QM_REG_CMINITCRD_1 0x1680d0
#define QM_REG_CMINITCRD_2 0x1680d4
#define QM_REG_CMINITCRD_3 0x1680d8
@ -3171,7 +3311,10 @@
/* [RW 2] The PCI attributes field used in the PCI request. */
#define QM_REG_PCIREQAT 0x168054
#define QM_REG_PF_EN 0x16e70c
/* [R 16] The byte credit of port 0 */
/* [R 24] The number of tasks stored in the QM for the PF. only even
* functions are valid in E2 (odd I registers will be hard wired to 0) */
#define QM_REG_PF_USG_CNT_0 0x16e040
/* [R 16] NOT USED */
#define QM_REG_PORT0BYTECRD 0x168300
/* [R 16] The byte credit of port 1 */
#define QM_REG_PORT1BYTECRD 0x168304
@ -3783,6 +3926,8 @@
#define TM_REG_LIN0_LOGIC_ADDR 0x164240
/* [RW 18] Linear0 Max active cid (in banks of 32 entries). */
#define TM_REG_LIN0_MAX_ACTIVE_CID 0x164048
/* [ST 16] Linear0 Number of scans counter. */
#define TM_REG_LIN0_NUM_SCANS 0x1640a0
/* [WB 64] Linear0 phy address. */
#define TM_REG_LIN0_PHY_ADDR 0x164270
/* [RW 1] Linear0 physical address valid. */
@ -3790,6 +3935,7 @@
#define TM_REG_LIN0_SCAN_ON 0x1640d0
/* [RW 24] Linear0 array scan timeout. */
#define TM_REG_LIN0_SCAN_TIME 0x16403c
#define TM_REG_LIN0_VNIC_UC 0x164128
/* [RW 32] Linear1 logic address. */
#define TM_REG_LIN1_LOGIC_ADDR 0x164250
/* [WB 64] Linear1 phy address. */
@ -4845,8 +4991,10 @@
#define XSDM_REG_NUM_OF_Q8_CMD 0x166264
/* [ST 32] The number of commands received in queue 9 */
#define XSDM_REG_NUM_OF_Q9_CMD 0x166268
/* [RW 13] The start address in the internal RAM for queue counters */
#define XSDM_REG_Q_COUNTER_START_ADDR 0x166010
/* [W 17] Generate an operation after completion; bit-16 is
* AggVectIdx_valid; bits 15:8 are AggVectIdx; bits 7:5 are the TRIG and
* bits 4:0 are the T124Param[4:0] */
#define XSDM_REG_OPERATION_GEN 0x1664c4
/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
#define XSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x166548
/* [R 1] parser fifo empty in sdm_sync block */
@ -5129,6 +5277,8 @@
#define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27)
#define MISC_REGISTERS_RESET_REG_1_SET 0x584
#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
#define MISC_REGISTERS_RESET_REG_2_MSTAT0 (0x1<<24)
#define MISC_REGISTERS_RESET_REG_2_MSTAT1 (0x1<<25)
#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0)
#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE (0x1<<14)
#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE (0x1<<15)
@ -5161,6 +5311,7 @@
#define MISC_REGISTERS_SPIO_OUTPUT_HIGH 1
#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0
#define MISC_REGISTERS_SPIO_SET_POS 8
#define HW_LOCK_DRV_FLAGS 10
#define HW_LOCK_MAX_RESOURCE_VALUE 31
#define HW_LOCK_RESOURCE_GPIO 1
#define HW_LOCK_RESOURCE_MDIO 0
@ -5168,7 +5319,6 @@
#define HW_LOCK_RESOURCE_RESERVED_08 8
#define HW_LOCK_RESOURCE_SPIO 2
#define HW_LOCK_RESOURCE_UNDI 5
#define PRS_FLAG_OVERETH_IPV4 1
#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4)
#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18)
@ -5320,6 +5470,8 @@
#define GRCBASE_PXP2 0x120000
#define GRCBASE_PBF 0x140000
#define GRCBASE_XPB 0x161000
#define GRCBASE_MSTAT0 0x162000
#define GRCBASE_MSTAT1 0x162800
#define GRCBASE_TIMERS 0x164000
#define GRCBASE_XSDM 0x166000
#define GRCBASE_QM 0x168000
@ -6243,11 +6395,6 @@ Theotherbitsarereservedandshouldbezero*/
#define IGU_ADDR_MSI_ADDR_HI 0x0212
#define IGU_ADDR_MSI_DATA 0x0213
#define IGU_INT_ENABLE 0
#define IGU_INT_DISABLE 1
#define IGU_INT_NOP 2
#define IGU_INT_NOP2 3
#define IGU_USE_REGISTER_ustorm_type_0_sb_cleanup 0
#define IGU_USE_REGISTER_ustorm_type_1_sb_cleanup 1
#define IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 2
@ -6318,15 +6465,6 @@ Theotherbitsarereservedandshouldbezero*/
#define IGU_BC_BASE_DSB_PROD 128
#define IGU_NORM_BASE_DSB_PROD 136
#define IGU_CTRL_CMD_TYPE_WR\
1
#define IGU_CTRL_CMD_TYPE_RD\
0
#define IGU_SEG_ACCESS_NORM 0
#define IGU_SEG_ACCESS_DEF 1
#define IGU_SEG_ACCESS_ATTN 2
/* FID (if VF - [6] = 0; [5:0] = VF number; if PF - [6] = 1; \
[5:2] = 0; [1:0] = PF number) */
#define IGU_FID_ENCODE_IS_PF (0x1<<6)

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -14,48 +14,11 @@
* Statistics and Link management by Yitchak Gertner
*
*/
#ifndef BNX2X_STATS_H
#define BNX2X_STATS_H
#include <linux/types.h>
struct bnx2x_eth_q_stats {
u32 total_bytes_received_hi;
u32 total_bytes_received_lo;
u32 total_bytes_transmitted_hi;
u32 total_bytes_transmitted_lo;
u32 total_unicast_packets_received_hi;
u32 total_unicast_packets_received_lo;
u32 total_multicast_packets_received_hi;
u32 total_multicast_packets_received_lo;
u32 total_broadcast_packets_received_hi;
u32 total_broadcast_packets_received_lo;
u32 total_unicast_packets_transmitted_hi;
u32 total_unicast_packets_transmitted_lo;
u32 total_multicast_packets_transmitted_hi;
u32 total_multicast_packets_transmitted_lo;
u32 total_broadcast_packets_transmitted_hi;
u32 total_broadcast_packets_transmitted_lo;
u32 valid_bytes_received_hi;
u32 valid_bytes_received_lo;
u32 error_bytes_received_hi;
u32 error_bytes_received_lo;
u32 etherstatsoverrsizepkts_hi;
u32 etherstatsoverrsizepkts_lo;
u32 no_buff_discard_hi;
u32 no_buff_discard_lo;
u32 driver_xoff;
u32 rx_err_discard_pkt;
u32 rx_skb_alloc_failed;
u32 hw_csum_err;
};
#define Q_STATS_OFFSET32(stat_name) \
(offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
struct nig_stats {
u32 brb_discard;
u32 brb_packet;
@ -212,7 +175,7 @@ struct bnx2x_eth_stats {
u32 brb_truncate_lo;
u32 mac_filter_discard;
u32 xxoverflow_discard;
u32 mf_tag_discard;
u32 brb_truncate_discard;
u32 mac_discard;
@ -222,16 +185,197 @@ struct bnx2x_eth_stats {
u32 hw_csum_err;
u32 nig_timer_max;
/* TPA */
u32 total_tpa_aggregations_hi;
u32 total_tpa_aggregations_lo;
u32 total_tpa_aggregated_frames_hi;
u32 total_tpa_aggregated_frames_lo;
u32 total_tpa_bytes_hi;
u32 total_tpa_bytes_lo;
};
#define STATS_OFFSET32(stat_name) \
(offsetof(struct bnx2x_eth_stats, stat_name) / 4)
/* Forward declaration */
struct bnx2x_eth_q_stats {
u32 total_unicast_bytes_received_hi;
u32 total_unicast_bytes_received_lo;
u32 total_broadcast_bytes_received_hi;
u32 total_broadcast_bytes_received_lo;
u32 total_multicast_bytes_received_hi;
u32 total_multicast_bytes_received_lo;
u32 total_bytes_received_hi;
u32 total_bytes_received_lo;
u32 total_unicast_bytes_transmitted_hi;
u32 total_unicast_bytes_transmitted_lo;
u32 total_broadcast_bytes_transmitted_hi;
u32 total_broadcast_bytes_transmitted_lo;
u32 total_multicast_bytes_transmitted_hi;
u32 total_multicast_bytes_transmitted_lo;
u32 total_bytes_transmitted_hi;
u32 total_bytes_transmitted_lo;
u32 total_unicast_packets_received_hi;
u32 total_unicast_packets_received_lo;
u32 total_multicast_packets_received_hi;
u32 total_multicast_packets_received_lo;
u32 total_broadcast_packets_received_hi;
u32 total_broadcast_packets_received_lo;
u32 total_unicast_packets_transmitted_hi;
u32 total_unicast_packets_transmitted_lo;
u32 total_multicast_packets_transmitted_hi;
u32 total_multicast_packets_transmitted_lo;
u32 total_broadcast_packets_transmitted_hi;
u32 total_broadcast_packets_transmitted_lo;
u32 valid_bytes_received_hi;
u32 valid_bytes_received_lo;
u32 etherstatsoverrsizepkts_hi;
u32 etherstatsoverrsizepkts_lo;
u32 no_buff_discard_hi;
u32 no_buff_discard_lo;
u32 driver_xoff;
u32 rx_err_discard_pkt;
u32 rx_skb_alloc_failed;
u32 hw_csum_err;
u32 total_packets_received_checksum_discarded_hi;
u32 total_packets_received_checksum_discarded_lo;
u32 total_packets_received_ttl0_discarded_hi;
u32 total_packets_received_ttl0_discarded_lo;
u32 total_transmitted_dropped_packets_error_hi;
u32 total_transmitted_dropped_packets_error_lo;
/* TPA */
u32 total_tpa_aggregations_hi;
u32 total_tpa_aggregations_lo;
u32 total_tpa_aggregated_frames_hi;
u32 total_tpa_aggregated_frames_lo;
u32 total_tpa_bytes_hi;
u32 total_tpa_bytes_lo;
};
/****************************************************************************
* Macros
****************************************************************************/
/* sum[hi:lo] += add[hi:lo] */
#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
do { \
s_lo += a_lo; \
s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
} while (0)
/* difference = minuend - subtrahend */
#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
do { \
if (m_lo < s_lo) { \
/* underflow */ \
d_hi = m_hi - s_hi; \
if (d_hi > 0) { \
/* we can 'loan' 1 */ \
d_hi--; \
d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
} else { \
/* m_hi <= s_hi */ \
d_hi = 0; \
d_lo = 0; \
} \
} else { \
/* m_lo >= s_lo */ \
if (m_hi < s_hi) { \
d_hi = 0; \
d_lo = 0; \
} else { \
/* m_hi >= s_hi */ \
d_hi = m_hi - s_hi; \
d_lo = m_lo - s_lo; \
} \
} \
} while (0)
#define UPDATE_STAT64(s, t) \
do { \
DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
pstats->mac_stx[0].t##_hi = new->s##_hi; \
pstats->mac_stx[0].t##_lo = new->s##_lo; \
ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
pstats->mac_stx[1].t##_lo, diff.lo); \
} while (0)
#define UPDATE_STAT64_NIG(s, t) \
do { \
DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
diff.lo, new->s##_lo, old->s##_lo); \
ADD_64(estats->t##_hi, diff.hi, \
estats->t##_lo, diff.lo); \
} while (0)
/* sum[hi:lo] += add */
#define ADD_EXTEND_64(s_hi, s_lo, a) \
do { \
s_lo += a; \
s_hi += (s_lo < a) ? 1 : 0; \
} while (0)
#define ADD_STAT64(diff, t) \
do { \
ADD_64(pstats->mac_stx[1].t##_hi, new->diff##_hi, \
pstats->mac_stx[1].t##_lo, new->diff##_lo); \
} while (0)
#define UPDATE_EXTEND_STAT(s) \
do { \
ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
pstats->mac_stx[1].s##_lo, \
new->s); \
} while (0)
#define UPDATE_EXTEND_TSTAT(s, t) \
do { \
diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
old_tclient->s = tclient->s; \
ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
} while (0)
#define UPDATE_EXTEND_USTAT(s, t) \
do { \
diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
old_uclient->s = uclient->s; \
ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
} while (0)
#define UPDATE_EXTEND_XSTAT(s, t) \
do { \
diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
old_xclient->s = xclient->s; \
ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
} while (0)
/* minuend -= subtrahend */
#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
do { \
DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
} while (0)
/* minuend[hi:lo] -= subtrahend */
#define SUB_EXTEND_64(m_hi, m_lo, s) \
do { \
SUB_64(m_hi, 0, m_lo, s); \
} while (0)
#define SUB_EXTEND_USTAT(s, t) \
do { \
diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
} while (0)
/* forward */
struct bnx2x;
void bnx2x_stats_init(struct bnx2x *bp);
extern const u32 dmae_reg_go_c[];
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
#endif /* BNX2X_STATS_H */

Просмотреть файл

@ -1,6 +1,6 @@
/* cnic.c: Broadcom CNIC core network driver.
*
* Copyright (c) 2006-2010 Broadcom Corporation
* Copyright (c) 2006-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -836,7 +836,6 @@ static void cnic_free_resc(struct cnic_dev *dev)
cp->ctx_blks = 0;
cnic_free_dma(dev, &cp->gbl_buf_info);
cnic_free_dma(dev, &cp->conn_buf_info);
cnic_free_dma(dev, &cp->kwq_info);
cnic_free_dma(dev, &cp->kwq_16_data_info);
cnic_free_dma(dev, &cp->kcq2.dma);
@ -1176,7 +1175,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
cp->iscsi_start_cid = start_cid;
cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
cp->max_cid_space += BNX2X_FCOE_NUM_CONNECTIONS;
cp->fcoe_init_cid = ethdev->fcoe_init_cid;
if (!cp->fcoe_init_cid)
@ -1232,18 +1231,12 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
if (ret)
goto error;
if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
ret = cnic_alloc_kcq(dev, &cp->kcq2, false);
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
if (ret)
goto error;
}
pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS *
BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE;
ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1);
if (ret)
goto error;
pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
if (ret)
@ -1610,6 +1603,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
struct iscsi_context *ictx;
struct regpair context_addr;
int i, j, n = 2, n_max;
u8 port = CNIC_PORT(cp);
ctx->ctx_flags = 0;
if (!req2->num_additional_wqes)
@ -1661,6 +1655,17 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
ictx->xstorm_st_context.iscsi.flags.flags |=
XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
ETH_P_8021Q;
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
cp->port_mode == CHIP_2_PORT_MODE) {
port = 0;
}
ictx->xstorm_st_context.common.flags =
1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
ictx->xstorm_st_context.common.flags =
port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
/* TSTORM requires the base address of RQ DB & not PTE */
@ -1876,8 +1881,11 @@ static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
hw_cid, NONE_CONNECTION_TYPE, &l5_data);
if (ret == 0)
if (ret == 0) {
wait_event(ctx->waitq, ctx->wait_cond);
if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
return -EBUSY;
}
return ret;
}
@ -1912,8 +1920,10 @@ static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
skip_cfc_delete:
cnic_free_bnx2x_conn_resc(dev, l5_cid);
atomic_dec(&cp->iscsi_conn);
clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
if (!ret) {
atomic_dec(&cp->iscsi_conn);
clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
}
destroy_reply:
memset(&kcqe, 0, sizeof(kcqe));
@ -1972,8 +1982,6 @@ static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
tstorm_buf->ka_interval = kwqe3->ka_interval;
tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
}
tstorm_buf->rcv_buf = kwqe3->rcv_buf;
tstorm_buf->snd_buf = kwqe3->snd_buf;
tstorm_buf->max_rt_time = 0xffffffff;
}
@ -2002,15 +2010,14 @@ static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
mac[4]);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
mac[2]);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 2,
mac[1]);
TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 3,
TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
mac[0]);
}
@ -2189,7 +2196,7 @@ static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
memset(fcoe_stat, 0, sizeof(*fcoe_stat));
memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT, cid,
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
FCOE_CONNECTION_TYPE, &l5_data);
return ret;
}
@ -2234,12 +2241,9 @@ static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
fcoe_init->eq_addr.lo = cp->kcq2.dma.pg_map_arr[0] & 0xffffffff;
fcoe_init->eq_addr.hi = (u64) cp->kcq2.dma.pg_map_arr[0] >> 32;
fcoe_init->eq_next_page_addr.lo =
cp->kcq2.dma.pg_map_arr[1] & 0xffffffff;
fcoe_init->eq_next_page_addr.hi =
(u64) cp->kcq2.dma.pg_map_arr[1] >> 32;
fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff;
fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32;
fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
fcoe_init->sb_num = cp->status_blk_num;
fcoe_init->eq_prod = MAX_KCQ_IDX;
@ -2247,7 +2251,7 @@ static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
cp->kcq2.sw_prod_idx = 0;
cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT, cid,
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
FCOE_CONNECTION_TYPE, &l5_data);
*work = 3;
return ret;
@ -2463,7 +2467,7 @@ static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
memset(&l5_data, 0, sizeof(l5_data));
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY, cid,
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
FCOE_CONNECTION_TYPE, &l5_data);
return ret;
}
@ -2544,7 +2548,7 @@ static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
return -EAGAIN; /* bnx2 is down */
if (BNX2X_CHIP_NUM(cp->chip_id) == BNX2X_CHIP_NUM_57710)
if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
return -EINVAL;
for (i = 0; i < num_wqes; ) {
@ -2935,7 +2939,7 @@ static void cnic_service_bnx2x_bh(unsigned long data)
CNIC_WR16(dev, cp->kcq1.io_addr,
cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
if (!BNX2X_CHIP_IS_E2(cp->chip_id)) {
if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
status_idx, IGU_INT_ENABLE, 1);
break;
@ -3054,13 +3058,21 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info)
break;
}
case CNIC_CTL_COMPLETION_CMD: {
u32 cid = BNX2X_SW_CID(info->data.comp.cid);
struct cnic_ctl_completion *comp = &info->data.comp;
u32 cid = BNX2X_SW_CID(comp->cid);
u32 l5_cid;
struct cnic_local *cp = dev->cnic_priv;
if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
if (unlikely(comp->error)) {
set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags);
netdev_err(dev->netdev,
"CID %x CFC delete comp error %x\n",
cid, comp->error);
}
ctx->wait_cond = 1;
wake_up(&ctx->waitq);
}
@ -3935,10 +3947,17 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
for (i = 0; i < cp->max_cid_space; i++) {
struct cnic_context *ctx = &cp->ctx_tbl[i];
int j;
while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
msleep(10);
for (j = 0; j < 5; j++) {
if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
break;
msleep(20);
}
if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
netdev_warn(dev->netdev, "CID %x not deleted\n",
ctx->cid);
@ -4005,6 +4024,7 @@ static void cnic_delete_task(struct work_struct *work)
for (i = 0; i < cp->max_cid_space; i++) {
struct cnic_context *ctx = &cp->ctx_tbl[i];
int err;
if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
!test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
@ -4018,13 +4038,15 @@ static void cnic_delete_task(struct work_struct *work)
if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
continue;
cnic_bnx2x_destroy_ramrod(dev, i);
err = cnic_bnx2x_destroy_ramrod(dev, i);
cnic_free_bnx2x_conn_resc(dev, i);
if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
atomic_dec(&cp->iscsi_conn);
if (!err) {
if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
atomic_dec(&cp->iscsi_conn);
clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
}
}
if (need_resched)
@ -4620,7 +4642,7 @@ static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
offsetof(struct hc_status_block_data_e1x, index_data) +
sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
offsetof(struct hc_index_data, timeout), 64 / 12);
offsetof(struct hc_index_data, timeout), 64 / 4);
cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
}
@ -4636,7 +4658,6 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
dma_addr_t buf_map, ring_map = udev->l2_ring_map;
struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
int port = CNIC_PORT(cp);
int i;
u32 cli = cp->ethdev->iscsi_l2_client_id;
u32 val;
@ -4677,10 +4698,9 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
/* reset xstorm per client statistics */
if (cli < MAX_STAT_COUNTER_ID) {
val = BAR_XSTRORM_INTMEM +
XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++)
CNIC_WR(dev, val + i * 4, 0);
data->general.statistics_zero_flg = 1;
data->general.statistics_en_flg = 1;
data->general.statistics_counter_id = cli;
}
cp->tx_cons_ptr =
@ -4698,7 +4718,6 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
(udev->l2_ring + (2 * BCM_PAGE_SIZE));
struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
int i;
int port = CNIC_PORT(cp);
u32 cli = cp->ethdev->iscsi_l2_client_id;
int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
u32 val;
@ -4706,10 +4725,10 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
/* General data */
data->general.client_id = cli;
data->general.statistics_en_flg = 1;
data->general.statistics_counter_id = cli;
data->general.activate_flg = 1;
data->general.sp_client_id = cli;
data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
data->general.func_id = cp->pfid;
for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
dma_addr_t buf_map;
@ -4743,23 +4762,12 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
data->rx.status_block_id = BNX2X_DEF_SB_ID;
data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
data->rx.bd_buff_size = cpu_to_le16(cp->l2_single_buf_size);
data->rx.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size);
data->rx.outer_vlan_removal_enable_flg = 1;
/* reset tstorm and ustorm per client statistics */
if (cli < MAX_STAT_COUNTER_ID) {
val = BAR_TSTRORM_INTMEM +
TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++)
CNIC_WR(dev, val + i * 4, 0);
val = BAR_USTRORM_INTMEM +
USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++)
CNIC_WR(dev, val + i * 4, 0);
}
data->rx.silent_vlan_removal_flg = 1;
data->rx.silent_vlan_value = 0;
data->rx.silent_vlan_mask = 0xffff;
cp->rx_cons_ptr =
&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
@ -4775,7 +4783,7 @@ static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
cp->kcq1.sw_prod_idx = 0;
if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
cp->kcq1.hw_prod_idx_ptr =
@ -4791,7 +4799,7 @@ static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
&sb->sb.running_index[SM_RX_ID];
}
if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
@ -4808,10 +4816,12 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
int func = CNIC_FUNC(cp), ret, i;
int func = CNIC_FUNC(cp), ret;
u32 pfid;
if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
cp->port_mode = CHIP_PORT_MODE_NONE;
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
if (!(val & 1))
@ -4819,10 +4829,13 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
else
val = (val >> 1) & 1;
if (val)
if (val) {
cp->port_mode = CHIP_4_PORT_MODE;
cp->pfid = func >> 1;
else
} else {
cp->port_mode = CHIP_4_PORT_MODE;
cp->pfid = func & 0x6;
}
} else {
cp->pfid = func;
}
@ -4834,7 +4847,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
if (ret)
return -ENOMEM;
if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl,
BNX2X_FCOE_NUM_CONNECTIONS,
cp->fcoe_start_cid, 0);
@ -4871,15 +4884,6 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
HC_INDEX_ISCSI_EQ_CONS);
for (i = 0; i < cp->conn_buf_info.num_pages; i++) {
CNIC_WR(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i),
cp->conn_buf_info.pgtbl[2 * i]);
CNIC_WR(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i) + 4,
cp->conn_buf_info.pgtbl[(2 * i) + 1]);
}
CNIC_WR(dev, BAR_USTRORM_INTMEM +
USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
@ -4927,7 +4931,7 @@ static void cnic_init_rings(struct cnic_dev *dev)
cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
off = BAR_USTRORM_INTMEM +
(BNX2X_CHIP_IS_E2(cp->chip_id) ?
(BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ?
USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli));
@ -5277,7 +5281,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
if (BNX2X_CHIP_IS_E2(cp->chip_id) &&
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
!(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
@ -5293,7 +5297,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
cp->stop_cm = cnic_cm_stop_bnx2x_hw;
cp->enable_int = cnic_enable_bnx2x_int;
cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
if (BNX2X_CHIP_IS_E2(cp->chip_id))
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
cp->ack_int = cnic_ack_bnx2x_e2_msix;
else
cp->ack_int = cnic_ack_bnx2x_msix;

Просмотреть файл

@ -1,6 +1,6 @@
/* cnic.h: Broadcom CNIC core network driver.
*
* Copyright (c) 2006-2010 Broadcom Corporation
* Copyright (c) 2006-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -68,11 +68,6 @@
#define BNX2_PG_CTX_MAP 0x1a0034
#define BNX2_ISCSI_CTX_MAP 0x1a0074
struct cnic_redirect_entry {
struct dst_entry *old_dst;
struct dst_entry *new_dst;
};
#define MAX_COMPLETED_KCQE 64
#define MAX_CNIC_L5_CONTEXT 256
@ -171,6 +166,7 @@ struct cnic_context {
unsigned long ctx_flags;
#define CTX_FL_OFFLD_START 0
#define CTX_FL_DELETE_WAIT 1
#define CTX_FL_CID_ERROR 2
u8 ulp_proto_id;
union {
struct cnic_iscsi *iscsi;
@ -245,7 +241,7 @@ struct cnic_local {
u16 rx_cons;
u16 tx_cons;
struct iro *iro_arr;
const struct iro *iro_arr;
#define IRO (((struct cnic_local *) dev->cnic_priv)->iro_arr)
struct cnic_dma kwq_info;
@ -286,7 +282,6 @@ struct cnic_local {
struct cnic_sock *csk_tbl;
struct cnic_id_tbl csk_port_tbl;
struct cnic_dma conn_buf_info;
struct cnic_dma gbl_buf_info;
struct cnic_iscsi *iscsi_tbl;
@ -320,6 +315,11 @@ struct cnic_local {
u32 chip_id;
int func;
u32 pfid;
u8 port_mode;
#define CHIP_4_PORT_MODE 0
#define CHIP_2_PORT_MODE 1
#define CHIP_PORT_MODE_NONE 2
u32 shmem_base;
struct cnic_ops *cnic_ops;
@ -369,7 +369,6 @@ struct bnx2x_bd_chain_next {
#define BNX2X_ISCSI_MAX_PENDING_R2TS 4
#define BNX2X_ISCSI_R2TQE_SIZE 8
#define BNX2X_ISCSI_HQ_BD_SIZE 64
#define BNX2X_ISCSI_CONN_BUF_SIZE 64
#define BNX2X_ISCSI_GLB_BUF_SIZE 64
#define BNX2X_ISCSI_PBL_NOT_CACHED 0xff
#define BNX2X_ISCSI_PDU_HEADER_NOT_CACHED 0xff
@ -406,6 +405,7 @@ struct bnx2x_bd_chain_next {
#define BNX2X_CHIP_IS_E2(x) \
(BNX2X_CHIP_IS_57712(x) || BNX2X_CHIP_IS_57712E(x) || \
BNX2X_CHIP_IS_57713(x) || BNX2X_CHIP_IS_57713E(x))
#define BNX2X_CHIP_IS_E2_PLUS(x) BNX2X_CHIP_IS_E2(x)
#define IS_E1H_OFFSET BNX2X_CHIP_IS_E1H(cp->chip_id)
@ -442,8 +442,8 @@ struct bnx2x_bd_chain_next {
#define CNIC_PORT(cp) ((cp)->pfid & 1)
#define CNIC_FUNC(cp) ((cp)->func)
#define CNIC_PATH(cp) (!BNX2X_CHIP_IS_E2(cp->chip_id) ? 0 :\
(CNIC_FUNC(cp) & 1))
#define CNIC_PATH(cp) (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? \
0 : (CNIC_FUNC(cp) & 1))
#define CNIC_E1HVN(cp) ((cp)->pfid >> 1)
#define BNX2X_HW_CID(cp, x) ((CNIC_PORT(cp) << 23) | \
@ -452,10 +452,15 @@ struct bnx2x_bd_chain_next {
#define BNX2X_SW_CID(x) (x & 0x1ffff)
#define BNX2X_CL_QZONE_ID(cp, cli) \
(cli + (CNIC_PORT(cp) * (BNX2X_CHIP_IS_E2(cp->chip_id) ?\
ETH_MAX_RX_CLIENTS_E2 : \
ETH_MAX_RX_CLIENTS_E1H)))
(BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? cli : \
cli + (CNIC_PORT(cp) * ETH_MAX_RX_CLIENTS_E1H))
#ifndef MAX_STAT_COUNTER_ID
#define MAX_STAT_COUNTER_ID \
(BNX2X_CHIP_IS_E1H((cp)->chip_id) ? MAX_STAT_COUNTER_ID_E1H : \
((BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id)) ? MAX_STAT_COUNTER_ID_E2 :\
MAX_STAT_COUNTER_ID_E1))
#endif
#define TCP_TSTORM_OOO_DROP_AND_PROC_ACK (0<<4)
#endif

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -12,8 +12,8 @@
#ifndef CNIC_IF_H
#define CNIC_IF_H
#define CNIC_MODULE_VERSION "2.2.14"
#define CNIC_MODULE_RELDATE "Mar 30, 2011"
#define CNIC_MODULE_VERSION "2.5.3"
#define CNIC_MODULE_RELDATE "June 6, 2011"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
@ -99,6 +99,8 @@ struct kcqe {
struct cnic_ctl_completion {
u32 cid;
u8 opcode;
u8 error;
};
struct cnic_ctl_info {
@ -169,7 +171,7 @@ struct cnic_eth_dev {
struct pci_dev *pdev;
void __iomem *io_base;
void __iomem *io_base2;
void *iro_arr;
const void *iro_arr;
u32 ctx_tbl_offset;
u32 ctx_tbl_len;

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -62,7 +62,7 @@
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
#define BNX2FC_VERSION "1.0.1"
#define BNX2FC_VERSION "1.0.3"
#define PFX "bnx2fc: "
@ -262,9 +262,14 @@ struct bnx2fc_rport {
#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x8
#define BNX2FC_FLAG_EXPL_LOGO 0x9
u8 src_addr[ETH_ALEN];
u32 max_sqes;
u32 max_rqes;
u32 max_cqes;
atomic_t free_sqes;
struct b577xx_doorbell_set_prod sq_db;
struct b577xx_fcoe_rx_doorbell rx_db;
struct fcoe_sqe *sq;
dma_addr_t sq_dma;
@ -274,7 +279,7 @@ struct bnx2fc_rport {
struct fcoe_cqe *cq;
dma_addr_t cq_dma;
u32 cq_cons_idx;
u16 cq_cons_idx;
u8 cq_curr_toggle_bit;
u32 cq_mem_size;
@ -505,6 +510,7 @@ struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
struct fc_frame *,
void *),
void *arg, u32 timeout);
void bnx2fc_arm_cq(struct bnx2fc_rport *tgt);
int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt);
void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe);
struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,

Просмотреть файл

@ -5,6 +5,12 @@
* This file defines HSI constants for the FCoE flows
*/
/* Current FCoE HSI version number composed of two fields (16 bit) */
/* Implies on a change broken previous HSI */
#define FCOE_HSI_MAJOR_VERSION (1)
/* Implies on a change which does not broken previous HSI */
#define FCOE_HSI_MINOR_VERSION (1)
/* KWQ/KCQ FCoE layer code */
#define FCOE_KWQE_LAYER_CODE (7)
@ -40,21 +46,62 @@
#define FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x3)
#define FCOE_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x4)
#define FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR (0x5)
#define FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION (0x6)
/* CQE type */
#define FCOE_PENDING_CQE_TYPE 0
#define FCOE_UNSOLIC_CQE_TYPE 1
/* Unsolicited CQE type */
#define FCOE_UNSOLICITED_FRAME_CQE_TYPE 0
#define FCOE_ERROR_DETECTION_CQE_TYPE 1
#define FCOE_WARNING_DETECTION_CQE_TYPE 2
/* E_D_TOV timer resolution in ms */
#define FCOE_E_D_TOV_TIMER_RESOLUTION_MS (20)
/* E_D_TOV timer resolution for SDM (4 micro) */
#define FCOE_E_D_TOV_SDM_TIMER_RESOLUTION \
(FCOE_E_D_TOV_TIMER_RESOLUTION_MS * 1000 / 4)
/* REC timer resolution in ms */
#define FCOE_REC_TIMER_RESOLUTION_MS (20)
/* REC timer resolution for SDM (4 micro) */
#define FCOE_REC_SDM_TIMER_RESOLUTION (FCOE_REC_TIMER_RESOLUTION_MS * 1000 / 4)
/* E_D_TOV timer default wraparound value (2 sec) in 20 ms resolution */
#define FCOE_E_D_TOV_DEFAULT_WRAPAROUND_VAL \
(2000 / FCOE_E_D_TOV_TIMER_RESOLUTION_MS)
/* REC_TOV timer default wraparound value (3 sec) in 20 ms resolution */
#define FCOE_REC_TOV_DEFAULT_WRAPAROUND_VAL \
(3000 / FCOE_REC_TIMER_RESOLUTION_MS)
#define FCOE_NUM_OF_TIMER_TASKS (8 * 1024)
#define FCOE_NUM_OF_CACHED_TASKS_TIMER (8)
/* Task context constants */
/******** Remove FCP_CMD write tce sleep ***********************/
/* In case timer services are required then shall be updated by Xstorm after
* start processing the task. In case no timer facilities are required then the
* driver would initialize the state to this value
*
#define FCOE_TASK_TX_STATE_NORMAL 0
* After driver has initialize the task in case timer services required *
#define FCOE_TASK_TX_STATE_INIT 1
******** Remove FCP_CMD write tce sleep ***********************/
/* After driver has initialize the task in case timer services required */
#define FCOE_TASK_TX_STATE_INIT 0
/* In case timer services are required then shall be updated by Xstorm after
* start processing the task. In case no timer facilities are required then the
* driver would initialize the state to this value */
* driver would initialize the state to this value
*/
#define FCOE_TASK_TX_STATE_NORMAL 1
/* Task is under abort procedure. Updated in order to stop processing of
* pending WQEs on this task */
* pending WQEs on this task
*/
#define FCOE_TASK_TX_STATE_ABORT 2
/* For E_D_T_TOV timer expiration in Xstorm (Class 2 only) */
#define FCOE_TASK_TX_STATE_ERROR 3
@ -66,17 +113,8 @@
#define FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP 6
/* For sequence cleanup request task */
#define FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP 7
/* Mark task as aborted and indicate that ABTS was not transmitted */
#define FCOE_TASK_TX_STATE_BEFORE_ABTS_TX 8
/* Mark task as aborted and indicate that ABTS was transmitted */
#define FCOE_TASK_TX_STATE_AFTER_ABTS_TX 9
/* For completion the ABTS task. */
#define FCOE_TASK_TX_STATE_ABTS_TX_COMPLETED 10
/* Mark task as aborted and indicate that Exchange cleanup was not transmitted
*/
#define FCOE_TASK_TX_STATE_BEFORE_EXCHANGE_CLEANUP_TX 11
/* Mark task as aborted and indicate that Exchange cleanup was transmitted */
#define FCOE_TASK_TX_STATE_AFTER_EXCHANGE_CLEANUP_TX 12
#define FCOE_TASK_TX_STATE_ABTS_TX 8
#define FCOE_TASK_RX_STATE_NORMAL 0
#define FCOE_TASK_RX_STATE_COMPLETED 1
@ -86,25 +124,25 @@
#define FCOE_TASK_RX_STATE_WARNING 3
/* For E_D_T_TOV timer expiration in Ustorm */
#define FCOE_TASK_RX_STATE_ERROR 4
/* ABTS ACC arrived wait for local completion to finally complete the task. */
#define FCOE_TASK_RX_STATE_ABTS_ACC_ARRIVED 5
/* local completion arrived wait for ABTS ACC to finally complete the task. */
#define FCOE_TASK_RX_STATE_ABTS_LOCAL_COMP_ARRIVED 6
/* FW only: First visit at rx-path, part of the abts round trip */
#define FCOE_TASK_RX_STATE_ABTS_IN_PROCESS 5
/* FW only: Second visit at rx-path, after ABTS frame transmitted */
#define FCOE_TASK_RX_STATE_ABTS_TRANSMITTED 6
/* Special completion indication in case of task was aborted. */
#define FCOE_TASK_RX_STATE_ABTS_COMPLETED 7
/* Special completion indication in case of task was cleaned. */
#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED 8
/* Special completion indication (in task requested the exchange cleanup) in
* case cleaned task is in non-valid. */
#define FCOE_TASK_RX_STATE_ABORT_CLEANUP_COMPLETED 9
/* FW only: First visit at rx-path, part of the cleanup round trip */
#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_IN_PROCESS 8
/* FW only: Special completion indication in case of task was cleaned. */
#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED 9
/* Not in used: Special completion indication (in task requested the exchange
* cleanup) in case cleaned task is in non-valid.
*/
#define FCOE_TASK_RX_STATE_ABORT_CLEANUP_COMPLETED 10
/* Special completion indication (in task requested the sequence cleanup) in
* case cleaned task was already returned to normal. */
#define FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP 10
/* Exchange cleanup arrived wait until xfer will be handled to finally
* complete the task. */
#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_ARRIVED 11
/* Xfer handled, wait for exchange cleanup to finally complete the task. */
#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_HANDLED_XFER 12
* case cleaned task was already returned to normal.
*/
#define FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP 11
#define FCOE_TASK_TYPE_WRITE 0
#define FCOE_TASK_TYPE_READ 1
@ -120,11 +158,40 @@
#define FCOE_TASK_CLASS_TYPE_3 0
#define FCOE_TASK_CLASS_TYPE_2 1
/* FCoE/FC packet fields */
#define FCOE_ETH_TYPE 0x8906
/* FCoE maximum elements in hash table */
#define FCOE_MAX_ELEMENTS_IN_HASH_TABLE_ROW 8
/* FCoE half of the elements in hash table */
#define FCOE_HALF_ELEMENTS_IN_HASH_TABLE_ROW \
(FCOE_MAX_ELEMENTS_IN_HASH_TABLE_ROW / 2)
/* FcoE number of cached T2 entries */
#define T_FCOE_NUMBER_OF_CACHED_T2_ENTRIES (4)
/* FCoE maximum elements in hash table */
#define FCOE_HASH_TBL_CHUNK_SIZE 16384
/* Everest FCoE connection type */
#define B577XX_FCOE_CONNECTION_TYPE 4
/* Error codes for Error Reporting in fast path flows */
/* XFER error codes */
/* FCoE number of rows (in log). This number derives
* from the maximum connections supported which is 2048.
* TBA: Need a different constant for E2
*/
#define FCOE_MAX_NUM_SESSIONS_LOG 11
#define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12
/* Error codes for Error Reporting in slow path flows */
#define FCOE_SLOW_PATH_ERROR_CODE_TOO_MANY_FUNCS 0
#define FCOE_SLOW_PATH_ERROR_CODE_NO_LICENSE 1
/* Error codes for Error Reporting in fast path flows
* XFER error codes
*/
#define FCOE_ERROR_CODE_XFER_OOO_RO 0
#define FCOE_ERROR_CODE_XFER_RO_NOT_ALIGNED 1
#define FCOE_ERROR_CODE_XFER_NULL_BURST_LEN 2
@ -155,17 +222,17 @@
#define FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET 23
#define FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET 24
#define FCOE_ERROR_CODE_DATA_EOFN_END_SEQ_SET 25
#define FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET 26
#define FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ 27
#define FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET 26
#define FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ 27
#define FCOE_ERROR_CODE_DATA_FCTL 28
/* Middle path error codes */
#define FCOE_ERROR_CODE_MIDPATH_TYPE_NOT_ELS 29
#define FCOE_ERROR_CODE_MIDPATH_INVALID_TYPE 29
#define FCOE_ERROR_CODE_MIDPATH_SOFI3_SEQ_ACTIVE_SET 30
#define FCOE_ERROR_CODE_MIDPATH_SOFN_SEQ_ACTIVE_RESET 31
#define FCOE_ERROR_CODE_MIDPATH_EOFN_END_SEQ_SET 32
#define FCOE_ERROR_CODE_MIDPATH_EOFT_END_SEQ_RESET 33
#define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_FCTL 34
#define FCOE_ERROR_CODE_MIDPATH_REPLY_FCTL 34
#define FCOE_ERROR_CODE_MIDPATH_INVALID_REPLY 35
#define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_RCTL 36
@ -173,7 +240,7 @@
#define FCOE_ERROR_CODE_ABTS_REPLY_F_CTL 37
#define FCOE_ERROR_CODE_ABTS_REPLY_DDF_RCTL_FIELD 38
#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_BLS_RCTL 39
#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_RCTL 40
#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_RCTL 40
#define FCOE_ERROR_CODE_ABTS_REPLY_RCTL_GENERAL_MISMATCH 41
/* Common error codes */
@ -185,7 +252,7 @@
#define FCOE_ERROR_CODE_COMMON_DATA_NO_MORE_SGES 47
#define FCOE_ERROR_CODE_COMMON_OPTIONAL_FC_HDR 48
#define FCOE_ERROR_CODE_COMMON_READ_TCE_OX_ID_TOO_BIG 49
#define FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED 50
#define FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED 50
/* Unsolicited Rx error codes */
#define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_ELS 51

Просмотреть файл

@ -83,7 +83,7 @@ int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req)
rrq.rrq_cmd = ELS_RRQ;
hton24(rrq.rrq_s_id, sid);
rrq.rrq_ox_id = htons(aborted_io_req->xid);
rrq.rrq_rx_id = htons(aborted_io_req->task->rx_wr_tx_rd.rx_id);
rrq.rrq_rx_id = htons(aborted_io_req->task->rxwr_txrd.var_ctx.rx_id);
retry_rrq:
rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq),
@ -417,12 +417,13 @@ void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
hdr = (u64 *)fc_hdr;
temp_hdr = (u64 *)
&task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
&task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
hdr[0] = cpu_to_be64(temp_hdr[0]);
hdr[1] = cpu_to_be64(temp_hdr[1]);
hdr[2] = cpu_to_be64(temp_hdr[2]);
mp_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off;
mp_req->resp_len =
task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
/* Parse ELS response */
if ((els_req->cb_func) && (els_req->cb_arg)) {

Просмотреть файл

@ -21,7 +21,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
#define DRV_MODULE_NAME "bnx2fc"
#define DRV_MODULE_VERSION BNX2FC_VERSION
#define DRV_MODULE_RELDATE "Mar 17, 2011"
#define DRV_MODULE_RELDATE "Jun 10, 2011"
static char version[] __devinitdata =
@ -612,7 +612,7 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
return bnx2fc_stats;
}
bnx2fc_stats->invalid_crc_count += fw_stats->rx_stat1.fc_crc_cnt;
bnx2fc_stats->invalid_crc_count += fw_stats->rx_stat2.fc_crc_cnt;
bnx2fc_stats->tx_frames += fw_stats->tx_stat.fcoe_tx_pkt_cnt;
bnx2fc_stats->tx_words += (fw_stats->tx_stat.fcoe_tx_byte_cnt) / 4;
bnx2fc_stats->rx_frames += fw_stats->rx_stat0.fcoe_rx_pkt_cnt;

Просмотреть файл

@ -100,6 +100,9 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION;
fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION;
fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
((u64) hba->hash_tbl_pbl_dma >> 32);
@ -122,6 +125,7 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
fcoe_init3.error_bit_map_lo = 0xffffffff;
fcoe_init3.error_bit_map_hi = 0xffffffff;
fcoe_init3.perf_config = 1;
kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
@ -289,19 +293,19 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20;
ofld_req4.src_mac_addr_lo32[0] = port->data_src_addr[5];
ofld_req4.src_mac_addr_lo[0] = port->data_src_addr[5];
/* local mac */
ofld_req4.src_mac_addr_lo32[1] = port->data_src_addr[4];
ofld_req4.src_mac_addr_lo32[2] = port->data_src_addr[3];
ofld_req4.src_mac_addr_lo32[3] = port->data_src_addr[2];
ofld_req4.src_mac_addr_hi16[0] = port->data_src_addr[1];
ofld_req4.src_mac_addr_hi16[1] = port->data_src_addr[0];
ofld_req4.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
ofld_req4.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
ofld_req4.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
ofld_req4.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
ofld_req4.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
ofld_req4.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
ofld_req4.src_mac_addr_lo[1] = port->data_src_addr[4];
ofld_req4.src_mac_addr_mid[0] = port->data_src_addr[3];
ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2];
ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1];
ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0];
ofld_req4.dst_mac_addr_lo[0] = hba->ctlr.dest_addr[5];/* fcf mac */
ofld_req4.dst_mac_addr_lo[1] = hba->ctlr.dest_addr[4];
ofld_req4.dst_mac_addr_mid[0] = hba->ctlr.dest_addr[3];
ofld_req4.dst_mac_addr_mid[1] = hba->ctlr.dest_addr[2];
ofld_req4.dst_mac_addr_hi[0] = hba->ctlr.dest_addr[1];
ofld_req4.dst_mac_addr_hi[1] = hba->ctlr.dest_addr[0];
ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
@ -345,20 +349,21 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
enbl_req.hdr.flags =
(FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
enbl_req.src_mac_addr_lo32[0] = port->data_src_addr[5];
enbl_req.src_mac_addr_lo[0] = port->data_src_addr[5];
/* local mac */
enbl_req.src_mac_addr_lo32[1] = port->data_src_addr[4];
enbl_req.src_mac_addr_lo32[2] = port->data_src_addr[3];
enbl_req.src_mac_addr_lo32[3] = port->data_src_addr[2];
enbl_req.src_mac_addr_hi16[0] = port->data_src_addr[1];
enbl_req.src_mac_addr_hi16[1] = port->data_src_addr[0];
enbl_req.src_mac_addr_lo[1] = port->data_src_addr[4];
enbl_req.src_mac_addr_mid[0] = port->data_src_addr[3];
enbl_req.src_mac_addr_mid[1] = port->data_src_addr[2];
enbl_req.src_mac_addr_hi[0] = port->data_src_addr[1];
enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0];
memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
enbl_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
enbl_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
enbl_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
enbl_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
enbl_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
enbl_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
enbl_req.dst_mac_addr_lo[0] = hba->ctlr.dest_addr[5];/* fcf mac */
enbl_req.dst_mac_addr_lo[1] = hba->ctlr.dest_addr[4];
enbl_req.dst_mac_addr_mid[0] = hba->ctlr.dest_addr[3];
enbl_req.dst_mac_addr_mid[1] = hba->ctlr.dest_addr[2];
enbl_req.dst_mac_addr_hi[0] = hba->ctlr.dest_addr[1];
enbl_req.dst_mac_addr_hi[1] = hba->ctlr.dest_addr[0];
port_id = fc_host_port_id(lport->host);
if (port_id != tgt->sid) {
@ -411,18 +416,19 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
disable_req.hdr.flags =
(FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
disable_req.src_mac_addr_lo32[0] = port->data_src_addr[5];
disable_req.src_mac_addr_lo32[2] = port->data_src_addr[3];
disable_req.src_mac_addr_lo32[3] = port->data_src_addr[2];
disable_req.src_mac_addr_hi16[0] = port->data_src_addr[1];
disable_req.src_mac_addr_hi16[1] = port->data_src_addr[0];
disable_req.src_mac_addr_lo[0] = tgt->src_addr[5];
disable_req.src_mac_addr_lo[1] = tgt->src_addr[4];
disable_req.src_mac_addr_mid[0] = tgt->src_addr[3];
disable_req.src_mac_addr_mid[1] = tgt->src_addr[2];
disable_req.src_mac_addr_hi[0] = tgt->src_addr[1];
disable_req.src_mac_addr_hi[1] = tgt->src_addr[0];
disable_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
disable_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
disable_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
disable_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
disable_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
disable_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
disable_req.dst_mac_addr_lo[0] = hba->ctlr.dest_addr[5];/* fcf mac */
disable_req.dst_mac_addr_lo[1] = hba->ctlr.dest_addr[4];
disable_req.dst_mac_addr_mid[0] = hba->ctlr.dest_addr[3];
disable_req.dst_mac_addr_mid[1] = hba->ctlr.dest_addr[2];
disable_req.dst_mac_addr_hi[0] = hba->ctlr.dest_addr[1];
disable_req.dst_mac_addr_hi[1] = hba->ctlr.dest_addr[0];
port_id = tgt->sid;
disable_req.s_id[0] = (port_id & 0x000000FF);
@ -640,10 +646,10 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
xid = err_entry->fc_hdr.ox_id;
BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
err_entry->err_warn_bitmap_hi,
err_entry->err_warn_bitmap_lo);
err_entry->data.err_warn_bitmap_hi,
err_entry->data.err_warn_bitmap_lo);
BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
err_entry->tx_buf_off, err_entry->rx_buf_off);
err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
bnx2fc_return_rqe(tgt, 1);
@ -722,10 +728,10 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
xid = cpu_to_be16(err_entry->fc_hdr.ox_id);
BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
err_entry->err_warn_bitmap_hi,
err_entry->err_warn_bitmap_lo);
err_entry->data.err_warn_bitmap_hi,
err_entry->data.err_warn_bitmap_lo);
BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
err_entry->tx_buf_off, err_entry->rx_buf_off);
err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
bnx2fc_return_rqe(tgt, 1);
spin_unlock_bh(&tgt->tgt_lock);
@ -762,9 +768,9 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
task = &(task_page[index]);
num_rq = ((task->rx_wr_tx_rd.rx_flags &
FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE) >>
FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT);
num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
@ -777,22 +783,19 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
/* Timestamp IO completion time */
cmd_type = io_req->cmd_type;
/* optimized completion path */
if (cmd_type == BNX2FC_SCSI_CMD) {
rx_state = ((task->rx_wr_tx_rd.rx_flags &
FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE) >>
FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT);
rx_state = ((task->rxwr_txrd.var_ctx.rx_flags &
FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >>
FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT);
/* Process other IO completion types */
switch (cmd_type) {
case BNX2FC_SCSI_CMD:
if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
spin_unlock_bh(&tgt->tgt_lock);
return;
}
}
/* Process other IO completion types */
switch (cmd_type) {
case BNX2FC_SCSI_CMD:
if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
bnx2fc_process_abts_compl(io_req, task, num_rq);
else if (rx_state ==
@ -819,8 +822,16 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
break;
case BNX2FC_ELS:
BNX2FC_IO_DBG(io_req, "cq_compl - call process_els_compl\n");
bnx2fc_process_els_compl(io_req, task, num_rq);
if (rx_state == FCOE_TASK_RX_STATE_COMPLETED)
bnx2fc_process_els_compl(io_req, task, num_rq);
else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
bnx2fc_process_abts_compl(io_req, task, num_rq);
else if (rx_state ==
FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
bnx2fc_process_cleanup_compl(io_req, task, num_rq);
else
printk(KERN_ERR PFX "Invalid rx state = %d\n",
rx_state);
break;
case BNX2FC_CLEANUP:
@ -835,6 +846,20 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
spin_unlock_bh(&tgt->tgt_lock);
}
void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
{
struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
u32 msg;
wmb();
rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit <<
FCOE_CQE_TOGGLE_BIT_SHIFT);
msg = *((u32 *)rx_db);
writel(cpu_to_le32(msg), tgt->ctx_base);
mmiowb();
}
struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
{
struct bnx2fc_work *work;
@ -853,8 +878,8 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
struct fcoe_cqe *cq;
u32 cq_cons;
struct fcoe_cqe *cqe;
u32 num_free_sqes = 0;
u16 wqe;
bool more_cqes_found = false;
/*
* cq_lock is a low contention lock used to protect
@ -872,62 +897,51 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
cq_cons = tgt->cq_cons_idx;
cqe = &cq[cq_cons];
do {
more_cqes_found ^= true;
while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
(tgt->cq_curr_toggle_bit <<
FCOE_CQE_TOGGLE_BIT_SHIFT)) {
while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
(tgt->cq_curr_toggle_bit <<
FCOE_CQE_TOGGLE_BIT_SHIFT)) {
/* new entry on the cq */
if (wqe & FCOE_CQE_CQE_TYPE) {
/* Unsolicited event notification */
bnx2fc_process_unsol_compl(tgt, wqe);
} else {
/* Pending work request completion */
struct bnx2fc_work *work = NULL;
struct bnx2fc_percpu_s *fps = NULL;
unsigned int cpu = wqe % num_possible_cpus();
/* new entry on the cq */
if (wqe & FCOE_CQE_CQE_TYPE) {
/* Unsolicited event notification */
bnx2fc_process_unsol_compl(tgt, wqe);
} else {
struct bnx2fc_work *work = NULL;
struct bnx2fc_percpu_s *fps = NULL;
unsigned int cpu = wqe % num_possible_cpus();
fps = &per_cpu(bnx2fc_percpu, cpu);
spin_lock_bh(&fps->fp_work_lock);
if (unlikely(!fps->iothread))
goto unlock;
fps = &per_cpu(bnx2fc_percpu, cpu);
spin_lock_bh(&fps->fp_work_lock);
if (unlikely(!fps->iothread))
goto unlock;
work = bnx2fc_alloc_work(tgt, wqe);
if (work)
list_add_tail(&work->list,
&fps->work_list);
work = bnx2fc_alloc_work(tgt, wqe);
if (work)
list_add_tail(&work->list,
&fps->work_list);
unlock:
spin_unlock_bh(&fps->fp_work_lock);
spin_unlock_bh(&fps->fp_work_lock);
/* Pending work request completion */
if (fps->iothread && work)
wake_up_process(fps->iothread);
else
bnx2fc_process_cq_compl(tgt, wqe);
}
cqe++;
tgt->cq_cons_idx++;
if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
tgt->cq_cons_idx = 0;
cqe = cq;
tgt->cq_curr_toggle_bit =
1 - tgt->cq_curr_toggle_bit;
}
/* Pending work request completion */
if (fps->iothread && work)
wake_up_process(fps->iothread);
else
bnx2fc_process_cq_compl(tgt, wqe);
}
/* Re-arm CQ */
if (more_cqes_found) {
tgt->conn_db->cq_arm.lo = -1;
wmb();
cqe++;
tgt->cq_cons_idx++;
num_free_sqes++;
if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
tgt->cq_cons_idx = 0;
cqe = cq;
tgt->cq_curr_toggle_bit =
1 - tgt->cq_curr_toggle_bit;
}
} while (more_cqes_found);
/*
* Commit tgt->cq_cons_idx change to the memory
* spin_lock implies full memory barrier, no need to smp_wmb
*/
}
bnx2fc_arm_cq(tgt);
atomic_add(num_free_sqes, &tgt->free_sqes);
spin_unlock_bh(&tgt->cq_lock);
return 0;
}
@ -1141,7 +1155,11 @@ static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR:
printk(KERN_ERR PFX "init_failure due to NIC error\n");
break;
case FCOE_KCQE_COMPLETION_STATUS_ERROR:
printk(KERN_ERR PFX "init failure due to compl status err\n");
break;
case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION:
printk(KERN_ERR PFX "init failure due to HSI mismatch\n");
default:
printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
}
@ -1247,21 +1265,14 @@ void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid)
void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
{
struct b577xx_doorbell_set_prod ev_doorbell;
struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
u32 msg;
wmb();
memset(&ev_doorbell, 0, sizeof(struct b577xx_doorbell_set_prod));
ev_doorbell.header.header = B577XX_DOORBELL_HDR_DB_TYPE;
ev_doorbell.prod = tgt->sq_prod_idx |
sq_db->prod = tgt->sq_prod_idx |
(tgt->sq_curr_toggle_bit << 15);
ev_doorbell.header.header |= B577XX_FCOE_CONNECTION_TYPE <<
B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT;
msg = *((u32 *)&ev_doorbell);
msg = *((u32 *)sq_db);
writel(cpu_to_le32(msg), tgt->ctx_base);
mmiowb();
}
@ -1322,18 +1333,26 @@ void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
/* Tx Write Rx Read */
task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
task->tx_wr_rx_rd.init_flags = task_type <<
FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
/* Common */
task->cmn.common_flags = context_id <<
FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
task->cmn.general.cleanup_info.task_id = orig_xid;
/* init flags */
task->txwr_rxrd.const_ctx.init_flags = task_type <<
FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
task->txwr_rxrd.const_ctx.init_flags |=
FCOE_TASK_DEV_TYPE_DISK <<
FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
/* Tx flags */
task->txwr_rxrd.const_ctx.tx_flags =
FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
/* Rx Read Tx Write */
task->rxwr_txrd.const_ctx.init_flags = context_id <<
FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
}
void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
@ -1342,6 +1361,7 @@ void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
struct bnx2fc_rport *tgt = io_req->tgt;
struct fc_frame_header *fc_hdr;
struct fcoe_ext_mul_sges_ctx *sgl;
u8 task_type = 0;
u64 *hdr;
u64 temp_hdr[3];
@ -1367,47 +1387,49 @@ void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
/* Tx only */
if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
(task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
(u32)mp_req->mp_req_bd_dma;
task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
(u32)((u64)mp_req->mp_req_bd_dma >> 32);
task->tx_wr_only.sgl_ctx.mul_sges.sgl_size = 1;
BNX2FC_IO_DBG(io_req, "init_mp_task - bd_dma = 0x%llx\n",
(unsigned long long)mp_req->mp_req_bd_dma);
task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1;
}
/* Tx Write Rx Read */
task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_INIT <<
FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
task->tx_wr_rx_rd.init_flags = task_type <<
FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK <<
FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT;
task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
/* init flags */
task->txwr_rxrd.const_ctx.init_flags = task_type <<
FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
task->txwr_rxrd.const_ctx.init_flags |=
FCOE_TASK_DEV_TYPE_DISK <<
FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
/* Common */
task->cmn.data_2_trns = io_req->data_xfer_len;
context_id = tgt->context_id;
task->cmn.common_flags = context_id <<
FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
task->cmn.common_flags |= 1 <<
FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT;
task->cmn.common_flags |= 1 <<
FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT;
/* tx flags */
task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT <<
FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
/* Rx Write Tx Read */
task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
/* rx flags */
task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
context_id = tgt->context_id;
task->rxwr_txrd.const_ctx.init_flags = context_id <<
FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
fc_hdr = &(mp_req->req_fc_hdr);
if (task_type == FCOE_TASK_TYPE_MIDPATH) {
fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid);
fc_hdr->fh_rx_id = htons(0xffff);
task->rx_wr_tx_rd.rx_id = 0xffff;
task->rxwr_txrd.var_ctx.rx_id = 0xffff;
} else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid);
}
/* Fill FC Header into middle path buffer */
hdr = (u64 *) &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr;
memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr));
hdr[0] = cpu_to_be64(temp_hdr[0]);
hdr[1] = cpu_to_be64(temp_hdr[1]);
@ -1415,12 +1437,12 @@ void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
/* Rx Only */
if (task_type == FCOE_TASK_TYPE_MIDPATH) {
sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
(u32)mp_req->mp_resp_bd_dma;
task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma;
sgl->mul_sgl.cur_sge_addr.hi =
(u32)((u64)mp_req->mp_resp_bd_dma >> 32);
task->rx_wr_only.sgl_ctx.mul_sges.sgl_size = 1;
sgl->mul_sgl.sgl_size = 1;
}
}
@ -1431,6 +1453,8 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
struct io_bdt *bd_tbl = io_req->bd_tbl;
struct bnx2fc_rport *tgt = io_req->tgt;
struct fcoe_cached_sge_ctx *cached_sge;
struct fcoe_ext_mul_sges_ctx *sgl;
u64 *fcp_cmnd;
u64 tmp_fcp_cmnd[4];
u32 context_id;
@ -1449,47 +1473,33 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
/* Tx only */
if (task_type == FCOE_TASK_TYPE_WRITE) {
task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
(u32)bd_tbl->bd_tbl_dma;
task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
(u32)((u64)bd_tbl->bd_tbl_dma >> 32);
task->tx_wr_only.sgl_ctx.mul_sges.sgl_size =
task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
bd_tbl->bd_valid;
}
/*Tx Write Rx Read */
/* Init state to NORMAL */
task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
task->tx_wr_rx_rd.init_flags = task_type <<
FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK <<
FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT;
task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
/* Common */
task->cmn.data_2_trns = io_req->data_xfer_len;
context_id = tgt->context_id;
task->cmn.common_flags = context_id <<
FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
task->cmn.common_flags |= 1 <<
FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT;
task->cmn.common_flags |= 1 <<
FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT;
/* Set initiative ownership */
task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT;
task->txwr_rxrd.const_ctx.init_flags = task_type <<
FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
task->txwr_rxrd.const_ctx.init_flags |=
FCOE_TASK_DEV_TYPE_DISK <<
FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
/* tx flags */
task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
/* Set initial seq counter */
task->cmn.tx_low_seq_cnt = 1;
/* Set state to "waiting for the first packet" */
task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME;
task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1;
/* Fill FCP_CMND IU */
fcp_cmnd = (u64 *)
task->cmn.general.cmd_info.fcp_cmd_payload.opaque;
task->txwr_rxrd.union_ctx.fcp_cmd.opaque;
bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
/* swap fcp_cmnd */
@ -1501,32 +1511,54 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
}
/* Rx Write Tx Read */
task->rx_wr_tx_rd.rx_id = 0xffff;
task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
context_id = tgt->context_id;
task->rxwr_txrd.const_ctx.init_flags = context_id <<
FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
/* rx flags */
/* Set state to "waiting for the first packet" */
task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
task->rxwr_txrd.var_ctx.rx_id = 0xffff;
/* Rx Only */
cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge;
sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
bd_count = bd_tbl->bd_valid;
if (task_type == FCOE_TASK_TYPE_READ) {
bd_count = bd_tbl->bd_valid;
if (bd_count == 1) {
struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.lo =
fcoe_bd_tbl->buf_addr_lo;
task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.hi =
fcoe_bd_tbl->buf_addr_hi;
task->rx_wr_only.sgl_ctx.single_sge.cur_buf_rem =
fcoe_bd_tbl->buf_len;
task->tx_wr_rx_rd.init_flags |= 1 <<
FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT;
cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
task->txwr_rxrd.const_ctx.init_flags |= 1 <<
FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
} else if (bd_count == 2) {
struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
fcoe_bd_tbl++;
cached_sge->second_buf_addr.lo =
fcoe_bd_tbl->buf_addr_lo;
cached_sge->second_buf_addr.hi =
fcoe_bd_tbl->buf_addr_hi;
cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len;
task->txwr_rxrd.const_ctx.init_flags |= 1 <<
FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
} else {
task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
(u32)bd_tbl->bd_tbl_dma;
task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
sgl->mul_sgl.cur_sge_addr.hi =
(u32)((u64)bd_tbl->bd_tbl_dma >> 32);
task->rx_wr_only.sgl_ctx.mul_sges.sgl_size =
bd_tbl->bd_valid;
sgl->mul_sgl.sgl_size = bd_count;
}
}
}

Просмотреть файл

@ -425,6 +425,7 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
struct list_head *listp;
struct io_bdt *bd_tbl;
int index = RESERVE_FREE_LIST_INDEX;
u32 free_sqes;
u32 max_sqes;
u16 xid;
@ -445,8 +446,10 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
* cmgr lock
*/
spin_lock_bh(&cmd_mgr->free_list_lock[index]);
free_sqes = atomic_read(&tgt->free_sqes);
if ((list_empty(&(cmd_mgr->free_list[index]))) ||
(tgt->num_active_ios.counter >= max_sqes)) {
(tgt->num_active_ios.counter >= max_sqes) ||
(free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available "
"ios(%d):sqes(%d)\n",
tgt->num_active_ios.counter, tgt->max_sqes);
@ -463,6 +466,7 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
xid = io_req->xid;
cmd_mgr->cmds[xid] = io_req;
atomic_inc(&tgt->num_active_ios);
atomic_dec(&tgt->free_sqes);
spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
INIT_LIST_HEAD(&io_req->link);
@ -489,6 +493,7 @@ static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
struct bnx2fc_cmd *io_req;
struct list_head *listp;
struct io_bdt *bd_tbl;
u32 free_sqes;
u32 max_sqes;
u16 xid;
int index = get_cpu();
@ -499,8 +504,10 @@ static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
* cmgr lock
*/
spin_lock_bh(&cmd_mgr->free_list_lock[index]);
free_sqes = atomic_read(&tgt->free_sqes);
if ((list_empty(&cmd_mgr->free_list[index])) ||
(tgt->num_active_ios.counter >= max_sqes)) {
(tgt->num_active_ios.counter >= max_sqes) ||
(free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
put_cpu();
return NULL;
@ -513,6 +520,7 @@ static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
xid = io_req->xid;
cmd_mgr->cmds[xid] = io_req;
atomic_inc(&tgt->num_active_ios);
atomic_dec(&tgt->free_sqes);
spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
put_cpu();
@ -873,7 +881,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
/* Obtain oxid and rxid for the original exchange to be aborted */
fc_hdr->fh_ox_id = htons(io_req->xid);
fc_hdr->fh_rx_id = htons(io_req->task->rx_wr_tx_rd.rx_id);
fc_hdr->fh_rx_id = htons(io_req->task->rxwr_txrd.var_ctx.rx_id);
sid = tgt->sid;
did = rport->port_id;
@ -1189,7 +1197,7 @@ void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
kref_put(&io_req->refcount,
bnx2fc_cmd_release); /* drop timer hold */
r_ctl = task->cmn.general.rsp_info.abts_rsp.r_ctl;
r_ctl = (u8)task->rxwr_only.union_ctx.comp_info.abts_rsp.r_ctl;
switch (r_ctl) {
case FC_RCTL_BA_ACC:
@ -1344,12 +1352,13 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
fc_hdr = &(tm_req->resp_fc_hdr);
hdr = (u64 *)fc_hdr;
temp_hdr = (u64 *)
&task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
&task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
hdr[0] = cpu_to_be64(temp_hdr[0]);
hdr[1] = cpu_to_be64(temp_hdr[1]);
hdr[2] = cpu_to_be64(temp_hdr[2]);
tm_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off;
tm_req->resp_len =
task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
rsp_buf = tm_req->resp_buf;
@ -1724,7 +1733,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
/* Fetch fcp_rsp from task context and perform cmd completion */
fcp_rsp = (struct fcoe_fcp_rsp_payload *)
&(task->cmn.general.rsp_info.fcp_rsp.payload);
&(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload);
/* parse fcp_rsp and obtain sense data from RQ if available */
bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq);

Просмотреть файл

@ -133,6 +133,8 @@ retry_ofld:
/* upload will take care of cleaning up sess resc */
lport->tt.rport_logoff(rdata);
}
/* Arm CQ */
bnx2fc_arm_cq(tgt);
return;
ofld_err:
@ -315,6 +317,8 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
struct fc_rport *rport = rdata->rport;
struct bnx2fc_hba *hba = port->priv;
struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
tgt->rport = rport;
tgt->rdata = rdata;
@ -335,6 +339,7 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
tgt->max_sqes = BNX2FC_SQ_WQES_MAX;
tgt->max_rqes = BNX2FC_RQ_WQES_MAX;
tgt->max_cqes = BNX2FC_CQ_WQES_MAX;
atomic_set(&tgt->free_sqes, BNX2FC_SQ_WQES_MAX);
/* Initialize the toggle bit */
tgt->sq_curr_toggle_bit = 1;
@ -345,7 +350,17 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
tgt->rq_cons_idx = 0;
atomic_set(&tgt->num_active_ios, 0);
tgt->work_time_slice = 2;
/* initialize sq doorbell */
sq_db->header.header = B577XX_DOORBELL_HDR_DB_TYPE;
sq_db->header.header |= B577XX_FCOE_CONNECTION_TYPE <<
B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT;
/* initialize rx doorbell */
rx_db->hdr.header = ((0x1 << B577XX_DOORBELL_HDR_RX_SHIFT) |
(0x1 << B577XX_DOORBELL_HDR_DB_TYPE_SHIFT) |
(B577XX_FCOE_CONNECTION_TYPE <<
B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT));
rx_db->params = (0x2 << B577XX_FCOE_RX_DOORBELL_NEGATIVE_ARM_SHIFT) |
(0x3 << B577XX_FCOE_RX_DOORBELL_OPCODE_SHIFT);
spin_lock_init(&tgt->tgt_lock);
spin_lock_init(&tgt->cq_lock);
@ -758,8 +773,6 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
}
memset(tgt->lcq, 0, tgt->lcq_mem_size);
/* Arm CQ */
tgt->conn_db->cq_arm.lo = -1;
tgt->conn_db->rq_prod = 0x8000;
return 0;
@ -787,6 +800,8 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
iounmap(tgt->ctx_base);
tgt->ctx_base = NULL;
}
spin_lock_bh(&tgt->cq_lock);
/* Free LCQ */
if (tgt->lcq) {
dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
@ -828,17 +843,16 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
tgt->rq = NULL;
}
/* Free CQ */
spin_lock_bh(&tgt->cq_lock);
if (tgt->cq) {
dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
tgt->cq, tgt->cq_dma);
tgt->cq = NULL;
}
spin_unlock_bh(&tgt->cq_lock);
/* Free SQ */
if (tgt->sq) {
dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
tgt->sq, tgt->sq_dma);
tgt->sq = NULL;
}
spin_unlock_bh(&tgt->cq_lock);
}

Просмотреть файл

@ -707,8 +707,10 @@ struct iscsi_kwqe_conn_update {
#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
#define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE (0x3<<4)
#define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE_SHIFT 4
#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0x3<<6)
#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 6
#elif defined(__LITTLE_ENDIAN)
u8 conn_flags;
#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
@ -719,8 +721,10 @@ struct iscsi_kwqe_conn_update {
#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
#define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE (0x3<<4)
#define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE_SHIFT 4
#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0x3<<6)
#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 6
u8 reserved2;
u8 max_outstanding_r2ts;
u8 session_error_recovery_level;