bnx2x, cnic, bnx2i: use new FW/HSI

This is the new FW HSI blob and the relevant definitions without logic changes.
It also included code adaptation for new HSI. New features are not enabled.

New FW/HSI includes:
- Support for 57712 HW
- Future support for VF (not used)
- Improvements in FW interrupts scheme
- FW FCoE hooks (stubs for future usage)

Signed-off-by: Dmitry Kravkov <dmitry@broadcom.com>
Signed-off-by: Michael Chan <mchan@broadcom.com>
Signed-off-by: Eilon Greenstein <eilong@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Dmitry Kravkov 2010-10-06 03:23:26 +00:00 коммит произвёл David S. Miller
Родитель 0c5b77152e
Коммит 523224a3b3
19 изменённых файлов: 4804 добавлений и 3262 удалений

Просмотреть файл

@ -33,13 +33,11 @@
#define BNX2X_NEW_NAPI #define BNX2X_NEW_NAPI
#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
#define BCM_CNIC 1 #define BCM_CNIC 1
#include "../cnic_if.h" #include "../cnic_if.h"
#endif #endif
#ifdef BCM_CNIC #ifdef BCM_CNIC
#define BNX2X_MIN_MSIX_VEC_CNT 3 #define BNX2X_MIN_MSIX_VEC_CNT 3
#define BNX2X_MSIX_VEC_FP_START 2 #define BNX2X_MSIX_VEC_FP_START 2
@ -129,16 +127,18 @@ void bnx2x_panic_dump(struct bnx2x *bp);
} while (0) } while (0)
#endif #endif
#define bnx2x_mc_addr(ha) ((ha)->addr)
#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff) #define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff)
#define U64_HI(x) (u32)(((u64)(x)) >> 32) #define U64_HI(x) (u32)(((u64)(x)) >> 32)
#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
#define REG_ADDR(bp, offset) (bp->regview + offset) #define REG_ADDR(bp, offset) ((bp->regview) + (offset))
#define REG_RD(bp, offset) readl(REG_ADDR(bp, offset)) #define REG_RD(bp, offset) readl(REG_ADDR(bp, offset))
#define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset)) #define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset))
#define REG_RD16(bp, offset) readw(REG_ADDR(bp, offset))
#define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset)) #define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset))
#define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset)) #define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset))
@ -160,6 +160,9 @@ void bnx2x_panic_dump(struct bnx2x *bp);
offset, len32); \ offset, len32); \
} while (0) } while (0)
#define REG_WR_DMAE_LEN(bp, offset, valp, len32) \
REG_WR_DMAE(bp, offset, valp, len32)
#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \ #define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \
do { \ do { \
memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \ memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \
@ -175,16 +178,52 @@ void bnx2x_panic_dump(struct bnx2x *bp);
offsetof(struct shmem2_region, field)) offsetof(struct shmem2_region, field))
#define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field)) #define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field))
#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val) #define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val)
#define MF_CFG_ADDR(bp, field) (bp->common.mf_cfg_base + \
offsetof(struct mf_cfg, field))
#define MF_CFG_RD(bp, field) SHMEM_RD(bp, mf_cfg.field) #define MF_CFG_RD(bp, field) REG_RD(bp, MF_CFG_ADDR(bp, field))
#define MF_CFG_WR(bp, field, val) SHMEM_WR(bp, mf_cfg.field, val) #define MF_CFG_WR(bp, field, val) REG_WR(bp,\
MF_CFG_ADDR(bp, field), (val))
#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg) #define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val) #define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
/* SP SB indices */
/* General SP events - stats query, cfc delete, etc */
#define HC_SP_INDEX_ETH_DEF_CONS 3
/* EQ completions */
#define HC_SP_INDEX_EQ_CONS 7
/* iSCSI L2 */
#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5
#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1
/**
* CIDs and CLIDs:
* CLIDs below is a CLID for func 0, then the CLID for other
* functions will be calculated by the formula:
*
* FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X
*
*/
/* iSCSI L2 */
#define BNX2X_ISCSI_ETH_CL_ID 17
#define BNX2X_ISCSI_ETH_CID 17
/** Additional rings budgeting */
#ifdef BCM_CNIC
#define CNIC_CONTEXT_USE 1
#else
#define CNIC_CONTEXT_USE 0
#endif /* BCM_CNIC */
#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \ #define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
#define SM_RX_ID 0
#define SM_TX_ID 1
/* fast path */ /* fast path */
@ -254,11 +293,21 @@ union db_prod {
#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1) #define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1)
#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK) #define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK)
union host_hc_status_block {
/* pointer to fp status block e1x */
struct host_hc_status_block_e1x *e1x_sb;
};
struct bnx2x_fastpath { struct bnx2x_fastpath {
struct napi_struct napi; struct napi_struct napi;
struct host_status_block *status_blk; union host_hc_status_block status_blk;
/* chip independed shortcuts into sb structure */
__le16 *sb_index_values;
__le16 *sb_running_index;
/* chip independed shortcut into rx_prods_offset memory */
u32 ustorm_rx_prods_offset;
dma_addr_t status_blk_mapping; dma_addr_t status_blk_mapping;
struct sw_tx_bd *tx_buf_ring; struct sw_tx_bd *tx_buf_ring;
@ -288,10 +337,15 @@ struct bnx2x_fastpath {
#define BNX2X_FP_STATE_OPEN 0xa0000 #define BNX2X_FP_STATE_OPEN 0xa0000
#define BNX2X_FP_STATE_HALTING 0xb0000 #define BNX2X_FP_STATE_HALTING 0xb0000
#define BNX2X_FP_STATE_HALTED 0xc0000 #define BNX2X_FP_STATE_HALTED 0xc0000
#define BNX2X_FP_STATE_TERMINATING 0xd0000
#define BNX2X_FP_STATE_TERMINATED 0xe0000
u8 index; /* number in fp array */ u8 index; /* number in fp array */
u8 cl_id; /* eth client id */ u8 cl_id; /* eth client id */
u8 sb_id; /* status block number in HW */ u8 cl_qzone_id;
u8 fw_sb_id; /* status block number in FW */
u8 igu_sb_id; /* status block number in HW */
u32 cid;
union db_prod tx_db; union db_prod tx_db;
@ -301,8 +355,7 @@ struct bnx2x_fastpath {
u16 tx_bd_cons; u16 tx_bd_cons;
__le16 *tx_cons_sb; __le16 *tx_cons_sb;
__le16 fp_c_idx; __le16 fp_hc_idx;
__le16 fp_u_idx;
u16 rx_bd_prod; u16 rx_bd_prod;
u16 rx_bd_cons; u16 rx_bd_cons;
@ -312,7 +365,7 @@ struct bnx2x_fastpath {
/* The last maximal completed SGE */ /* The last maximal completed SGE */
u16 last_max_sge; u16 last_max_sge;
__le16 *rx_cons_sb; __le16 *rx_cons_sb;
__le16 *rx_bd_cons_sb;
unsigned long tx_pkt, unsigned long tx_pkt,
@ -356,6 +409,8 @@ struct bnx2x_fastpath {
#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) #define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS)
#define MAX_TX_BD (NUM_TX_BD - 1) #define MAX_TX_BD (NUM_TX_BD - 1)
#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) #define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
#define INIT_JUMBO_TX_RING_SIZE MAX_TX_AVAIL
#define INIT_TX_RING_SIZE MAX_TX_AVAIL
#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ #define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \
(MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
#define TX_BD(x) ((x) & MAX_TX_BD) #define TX_BD(x) ((x) & MAX_TX_BD)
@ -370,6 +425,8 @@ struct bnx2x_fastpath {
#define MAX_RX_BD (NUM_RX_BD - 1) #define MAX_RX_BD (NUM_RX_BD - 1)
#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) #define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
#define MIN_RX_AVAIL 128 #define MIN_RX_AVAIL 128
#define INIT_JUMBO_RX_RING_SIZE MAX_RX_AVAIL
#define INIT_RX_RING_SIZE MAX_RX_AVAIL
#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ #define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
(MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1) (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1)
#define RX_BD(x) ((x) & MAX_RX_BD) #define RX_BD(x) ((x) & MAX_RX_BD)
@ -420,11 +477,12 @@ struct bnx2x_fastpath {
le32_to_cpu((bd)->addr_lo)) le32_to_cpu((bd)->addr_lo))
#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes)) #define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */
#define BNX2X_DB_SHIFT 7 /* 128 bytes*/
#define DPM_TRIGER_TYPE 0x40 #define DPM_TRIGER_TYPE 0x40
#define DOORBELL(bp, cid, val) \ #define DOORBELL(bp, cid, val) \
do { \ do { \
writel((u32)(val), bp->doorbells + (BCM_PAGE_SIZE * (cid)) + \ writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \
DPM_TRIGER_TYPE); \ DPM_TRIGER_TYPE); \
} while (0) } while (0)
@ -482,31 +540,15 @@ struct bnx2x_fastpath {
#define BNX2X_RX_SUM_FIX(cqe) \ #define BNX2X_RX_SUM_FIX(cqe) \
BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags) BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags)
#define U_SB_ETH_RX_CQ_INDEX 1
#define FP_USB_FUNC_OFF (2 + 2*HC_USTORM_SB_NUM_INDICES) #define U_SB_ETH_RX_BD_INDEX 2
#define FP_CSB_FUNC_OFF (2 + 2*HC_CSTORM_SB_NUM_INDICES) #define C_SB_ETH_TX_CQ_INDEX 5
#define U_SB_ETH_RX_CQ_INDEX HC_INDEX_U_ETH_RX_CQ_CONS
#define U_SB_ETH_RX_BD_INDEX HC_INDEX_U_ETH_RX_BD_CONS
#define C_SB_ETH_TX_CQ_INDEX HC_INDEX_C_ETH_TX_CQ_CONS
#define BNX2X_RX_SB_INDEX \ #define BNX2X_RX_SB_INDEX \
(&fp->status_blk->u_status_block.index_values[U_SB_ETH_RX_CQ_INDEX]) (&fp->sb_index_values[U_SB_ETH_RX_CQ_INDEX])
#define BNX2X_RX_SB_BD_INDEX \
(&fp->status_blk->u_status_block.index_values[U_SB_ETH_RX_BD_INDEX])
#define BNX2X_RX_SB_INDEX_NUM \
(((U_SB_ETH_RX_CQ_INDEX << \
USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT) & \
USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER) | \
((U_SB_ETH_RX_BD_INDEX << \
USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER_SHIFT) & \
USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER))
#define BNX2X_TX_SB_INDEX \ #define BNX2X_TX_SB_INDEX \
(&fp->status_blk->c_status_block.index_values[C_SB_ETH_TX_CQ_INDEX]) (&fp->sb_index_values[C_SB_ETH_TX_CQ_INDEX])
/* end of fast path */ /* end of fast path */
@ -553,10 +595,16 @@ struct bnx2x_common {
u32 shmem_base; u32 shmem_base;
u32 shmem2_base; u32 shmem2_base;
u32 mf_cfg_base;
u32 hw_config; u32 hw_config;
u32 bc_ver; u32 bc_ver;
u8 int_block;
#define INT_BLOCK_HC 0
u8 chip_port_mode;
#define CHIP_PORT_MODE_NONE 0x2
}; };
@ -590,27 +638,98 @@ struct bnx2x_port {
/* end of port */ /* end of port */
/* e1h Classification CAM line allocations */
enum {
CAM_ETH_LINE = 0,
CAM_ISCSI_ETH_LINE,
CAM_MAX_PF_LINE = CAM_ISCSI_ETH_LINE
};
#define BNX2X_VF_ID_INVALID 0xFF
#ifdef BCM_CNIC /*
#define MAX_CONTEXT 15 * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is
#else * control by the number of fast-path status blocks supported by the
#define MAX_CONTEXT 16 * device (HW/FW). Each fast-path status block (FP-SB) aka non-default
#endif * status block represents an independent interrupts context that can
* serve a regular L2 networking queue. However special L2 queues such
* as the FCoE queue do not require a FP-SB and other components like
* the CNIC may consume FP-SB reducing the number of possible L2 queues
*
* If the maximum number of FP-SB available is X then:
* a. If CNIC is supported it consumes 1 FP-SB thus the max number of
* regular L2 queues is Y=X-1
* b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor)
* c. If the FCoE L2 queue is supported the actual number of L2 queues
* is Y+1
* d. The number of irqs (MSIX vectors) is either Y+1 (one extra for
* slow-path interrupts) or Y+2 if CNIC is supported (one additional
* FP interrupt context for the CNIC).
* e. The number of HW context (CID count) is always X or X+1 if FCoE
* L2 queue is supported. the cid for the FCoE L2 queue is always X.
*/
#define FP_SB_MAX_E1x 16 /* fast-path interrupt contexts E1x */
#define MAX_CONTEXT FP_SB_MAX_E1x
/*
* cid_cnt paramter below refers to the value returned by
* 'bnx2x_get_l2_cid_count()' routine
*/
/*
* The number of FP context allocated by the driver == max number of regular
* L2 queues + 1 for the FCoE L2 queue
*/
#define L2_FP_COUNT(cid_cnt) ((cid_cnt) - CNIC_CONTEXT_USE)
union cdu_context { union cdu_context {
struct eth_context eth; struct eth_context eth;
char pad[1024]; char pad[1024];
}; };
/* CDU host DB constants */
#define CDU_ILT_PAGE_SZ_HW 3
#define CDU_ILT_PAGE_SZ (4096 << CDU_ILT_PAGE_SZ_HW) /* 32K */
#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
#ifdef BCM_CNIC
#define CNIC_ISCSI_CID_MAX 256
#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX)
#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
#endif
#define QM_ILT_PAGE_SZ_HW 3
#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 32K */
#define QM_CID_ROUND 1024
#ifdef BCM_CNIC
/* TM (timers) host DB constants */
#define TM_ILT_PAGE_SZ_HW 2
#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 16K */
/* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */
#define TM_CONN_NUM 1024
#define TM_ILT_SZ (8 * TM_CONN_NUM)
#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
/* SRC (Searcher) host DB constants */
#define SRC_ILT_PAGE_SZ_HW 3
#define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 32K */
#define SRC_HASH_BITS 10
#define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */
#define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM)
#define SRC_T2_SZ SRC_ILT_SZ
#define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ)
#endif
#define MAX_DMAE_C 8 #define MAX_DMAE_C 8
/* DMA memory not used in fastpath */ /* DMA memory not used in fastpath */
struct bnx2x_slowpath { struct bnx2x_slowpath {
union cdu_context context[MAX_CONTEXT];
struct eth_stats_query fw_stats; struct eth_stats_query fw_stats;
struct mac_configuration_cmd mac_config; struct mac_configuration_cmd mac_config;
struct mac_configuration_cmd mcast_config; struct mac_configuration_cmd mcast_config;
struct client_init_ramrod_data client_init_data;
/* used by dmae command executer */ /* used by dmae command executer */
struct dmae_command dmae[MAX_DMAE_C]; struct dmae_command dmae[MAX_DMAE_C];
@ -638,37 +757,71 @@ struct attn_route {
u32 sig[4]; u32 sig[4];
}; };
struct iro {
u32 base;
u16 m1;
u16 m2;
u16 m3;
u16 size;
};
struct hw_context {
union cdu_context *vcxt;
dma_addr_t cxt_mapping;
size_t size;
};
/* forward */
struct bnx2x_ilt;
typedef enum { typedef enum {
BNX2X_RECOVERY_DONE, BNX2X_RECOVERY_DONE,
BNX2X_RECOVERY_INIT, BNX2X_RECOVERY_INIT,
BNX2X_RECOVERY_WAIT, BNX2X_RECOVERY_WAIT,
} bnx2x_recovery_state_t; } bnx2x_recovery_state_t;
/**
* Event queue (EQ or event ring) MC hsi
* NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2
*/
#define NUM_EQ_PAGES 1
#define EQ_DESC_CNT_PAGE (BCM_PAGE_SIZE / sizeof(union event_ring_elem))
#define EQ_DESC_MAX_PAGE (EQ_DESC_CNT_PAGE - 1)
#define NUM_EQ_DESC (EQ_DESC_CNT_PAGE * NUM_EQ_PAGES)
#define EQ_DESC_MASK (NUM_EQ_DESC - 1)
#define MAX_EQ_AVAIL (EQ_DESC_MAX_PAGE * NUM_EQ_PAGES - 2)
/* depends on EQ_DESC_CNT_PAGE being a power of 2 */
#define NEXT_EQ_IDX(x) ((((x) & EQ_DESC_MAX_PAGE) == \
(EQ_DESC_MAX_PAGE - 1)) ? (x) + 2 : (x) + 1)
/* depends on the above and on NUM_EQ_PAGES being a power of 2 */
#define EQ_DESC(x) ((x) & EQ_DESC_MASK)
#define BNX2X_EQ_INDEX \
(&bp->def_status_blk->sp_sb.\
index_values[HC_SP_INDEX_EQ_CONS])
struct bnx2x { struct bnx2x {
/* Fields used in the tx and intr/napi performance paths /* Fields used in the tx and intr/napi performance paths
* are grouped together in the beginning of the structure * are grouped together in the beginning of the structure
*/ */
struct bnx2x_fastpath fp[MAX_CONTEXT]; struct bnx2x_fastpath *fp;
void __iomem *regview; void __iomem *regview;
void __iomem *doorbells; void __iomem *doorbells;
#ifdef BCM_CNIC u16 db_size;
#define BNX2X_DB_SIZE (18*BCM_PAGE_SIZE)
#else
#define BNX2X_DB_SIZE (16*BCM_PAGE_SIZE)
#endif
struct net_device *dev; struct net_device *dev;
struct pci_dev *pdev; struct pci_dev *pdev;
struct iro *iro_arr;
#define IRO (bp->iro_arr)
atomic_t intr_sem; atomic_t intr_sem;
bnx2x_recovery_state_t recovery_state; bnx2x_recovery_state_t recovery_state;
int is_leader; int is_leader;
#ifdef BCM_CNIC struct msix_entry *msix_table;
struct msix_entry msix_table[MAX_CONTEXT+2];
#else
struct msix_entry msix_table[MAX_CONTEXT+1];
#endif
#define INT_MODE_INTx 1 #define INT_MODE_INTx 1
#define INT_MODE_MSI 2 #define INT_MODE_MSI 2
@ -680,7 +833,8 @@ struct bnx2x {
u32 rx_csum; u32 rx_csum;
u32 rx_buf_size; u32 rx_buf_size;
#define ETH_OVREHEAD (ETH_HLEN + 8) /* 8 for CRC + VLAN */ /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
#define ETH_MIN_PACKET_SIZE 60 #define ETH_MIN_PACKET_SIZE 60
#define ETH_MAX_PACKET_SIZE 1500 #define ETH_MAX_PACKET_SIZE 1500
#define ETH_MAX_JUMBO_PACKET_SIZE 9600 #define ETH_MAX_JUMBO_PACKET_SIZE 9600
@ -689,13 +843,12 @@ struct bnx2x {
#define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \ #define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \
L1_CACHE_SHIFT : 8) L1_CACHE_SHIFT : 8)
#define BNX2X_RX_ALIGN (1 << BNX2X_RX_ALIGN_SHIFT) #define BNX2X_RX_ALIGN (1 << BNX2X_RX_ALIGN_SHIFT)
#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5)
struct host_def_status_block *def_status_blk; struct host_sp_status_block *def_status_blk;
#define DEF_SB_ID 16 #define DEF_SB_IGU_ID 16
__le16 def_c_idx; #define DEF_SB_ID HC_SP_SB_ID
__le16 def_u_idx; __le16 def_idx;
__le16 def_x_idx;
__le16 def_t_idx;
__le16 def_att_idx; __le16 def_att_idx;
u32 attn_state; u32 attn_state;
struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS]; struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS];
@ -711,6 +864,13 @@ struct bnx2x {
/* used to synchronize spq accesses */ /* used to synchronize spq accesses */
spinlock_t spq_lock; spinlock_t spq_lock;
/* event queue */
union event_ring_elem *eq_ring;
dma_addr_t eq_mapping;
u16 eq_prod;
u16 eq_cons;
__le16 *eq_cons_sb;
/* Flags for marking that there is a STAT_QUERY or /* Flags for marking that there is a STAT_QUERY or
SET_MAC ramrod pending */ SET_MAC ramrod pending */
int stats_pending; int stats_pending;
@ -737,6 +897,8 @@ struct bnx2x {
#define MF_FUNC_DIS 0x1000 #define MF_FUNC_DIS 0x1000
int func; int func;
int base_fw_ndsb;
#define BP_PORT(bp) (bp->func % PORT_MAX) #define BP_PORT(bp) (bp->func % PORT_MAX)
#define BP_FUNC(bp) (bp->func) #define BP_FUNC(bp) (bp->func)
#define BP_E1HVN(bp) (bp->func >> 1) #define BP_E1HVN(bp) (bp->func >> 1)
@ -801,6 +963,7 @@ struct bnx2x {
#define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000 #define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000
#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000 #define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000
#define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000 #define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000
#define BNX2X_STATE_FUNC_STARTED 0x7000
#define BNX2X_STATE_DIAG 0xe000 #define BNX2X_STATE_DIAG 0xe000
#define BNX2X_STATE_ERROR 0xf000 #define BNX2X_STATE_ERROR 0xf000
@ -809,6 +972,15 @@ struct bnx2x {
int disable_tpa; int disable_tpa;
int int_mode; int int_mode;
struct tstorm_eth_mac_filter_config mac_filters;
#define BNX2X_ACCEPT_NONE 0x0000
#define BNX2X_ACCEPT_UNICAST 0x0001
#define BNX2X_ACCEPT_MULTICAST 0x0002
#define BNX2X_ACCEPT_ALL_UNICAST 0x0004
#define BNX2X_ACCEPT_ALL_MULTICAST 0x0008
#define BNX2X_ACCEPT_BROADCAST 0x0010
#define BNX2X_PROMISCUOUS_MODE 0x10000
u32 rx_mode; u32 rx_mode;
#define BNX2X_RX_MODE_NONE 0 #define BNX2X_RX_MODE_NONE 0
#define BNX2X_RX_MODE_NORMAL 1 #define BNX2X_RX_MODE_NORMAL 1
@ -817,12 +989,25 @@ struct bnx2x {
#define BNX2X_MAX_MULTICAST 64 #define BNX2X_MAX_MULTICAST 64
#define BNX2X_MAX_EMUL_MULTI 16 #define BNX2X_MAX_EMUL_MULTI 16
u32 rx_mode_cl_mask; u8 igu_dsb_id;
u8 igu_base_sb;
u8 igu_sb_cnt;
dma_addr_t def_status_blk_mapping; dma_addr_t def_status_blk_mapping;
struct bnx2x_slowpath *slowpath; struct bnx2x_slowpath *slowpath;
dma_addr_t slowpath_mapping; dma_addr_t slowpath_mapping;
struct hw_context context;
struct bnx2x_ilt *ilt;
#define BP_ILT(bp) ((bp)->ilt)
#define ILT_MAX_LINES 128
int l2_cid_count;
#define L2_ILT_LINES(bp) (DIV_ROUND_UP((bp)->l2_cid_count, \
ILT_PAGE_CIDS))
#define BNX2X_DB_SIZE(bp) ((bp)->l2_cid_count * (1 << BNX2X_DB_SHIFT))
int qm_cid_count;
int dropless_fc; int dropless_fc;
@ -842,9 +1027,10 @@ struct bnx2x {
void *cnic_data; void *cnic_data;
u32 cnic_tag; u32 cnic_tag;
struct cnic_eth_dev cnic_eth_dev; struct cnic_eth_dev cnic_eth_dev;
struct host_status_block *cnic_sb; union host_hc_status_block cnic_sb;
dma_addr_t cnic_sb_mapping; dma_addr_t cnic_sb_mapping;
#define CNIC_SB_ID(bp) BP_L_ID(bp) #define CNIC_SB_ID(bp) ((bp)->base_fw_ndsb + BP_L_ID(bp))
#define CNIC_IGU_SB_ID(bp) ((bp)->igu_base_sb)
struct eth_spe *cnic_kwq; struct eth_spe *cnic_kwq;
struct eth_spe *cnic_kwq_prod; struct eth_spe *cnic_kwq_prod;
struct eth_spe *cnic_kwq_cons; struct eth_spe *cnic_kwq_cons;
@ -914,12 +1100,167 @@ struct bnx2x {
const struct firmware *firmware; const struct firmware *firmware;
}; };
/**
* Init queue/func interface
*/
/* queue init flags */
#define QUEUE_FLG_TPA 0x0001
#define QUEUE_FLG_CACHE_ALIGN 0x0002
#define QUEUE_FLG_STATS 0x0004
#define QUEUE_FLG_OV 0x0008
#define QUEUE_FLG_VLAN 0x0010
#define QUEUE_FLG_COS 0x0020
#define QUEUE_FLG_HC 0x0040
#define QUEUE_FLG_DHC 0x0080
#define QUEUE_FLG_OOO 0x0100
#define QUEUE_DROP_IP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR
#define QUEUE_DROP_TCP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR
#define QUEUE_DROP_TTL0 TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0
#define QUEUE_DROP_UDP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR
/* rss capabilities */
#define RSS_IPV4_CAP 0x0001
#define RSS_IPV4_TCP_CAP 0x0002
#define RSS_IPV6_CAP 0x0004
#define RSS_IPV6_TCP_CAP 0x0008
#define BNX2X_MAX_QUEUES(bp) (IS_E1HMF(bp) ? (MAX_CONTEXT/E1HVN_MAX) \ #define BNX2X_MAX_QUEUES(bp) (IS_E1HMF(bp) ? (MAX_CONTEXT/E1HVN_MAX) \
: MAX_CONTEXT) : MAX_CONTEXT)
#define BNX2X_NUM_QUEUES(bp) (bp->num_queues) #define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) #define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
#define RSS_IPV4_CAP_MASK \
TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY
#define RSS_IPV4_TCP_CAP_MASK \
TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY
#define RSS_IPV6_CAP_MASK \
TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY
#define RSS_IPV6_TCP_CAP_MASK \
TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY
/* func init flags */
#define FUNC_FLG_RSS 0x0001
#define FUNC_FLG_STATS 0x0002
/* removed FUNC_FLG_UNMATCHED 0x0004 */
#define FUNC_FLG_TPA 0x0008
#define FUNC_FLG_SPQ 0x0010
#define FUNC_FLG_LEADING 0x0020 /* PF only */
#define FUNC_CONFIG(flgs) ((flgs) & (FUNC_FLG_RSS | FUNC_FLG_TPA | \
FUNC_FLG_LEADING))
struct rxq_pause_params {
u16 bd_th_lo;
u16 bd_th_hi;
u16 rcq_th_lo;
u16 rcq_th_hi;
u16 sge_th_lo; /* valid iff QUEUE_FLG_TPA */
u16 sge_th_hi; /* valid iff QUEUE_FLG_TPA */
u16 pri_map;
};
struct bnx2x_rxq_init_params {
/* cxt*/
struct eth_context *cxt;
/* dma */
dma_addr_t dscr_map;
dma_addr_t sge_map;
dma_addr_t rcq_map;
dma_addr_t rcq_np_map;
u16 flags;
u16 drop_flags;
u16 mtu;
u16 buf_sz;
u16 fw_sb_id;
u16 cl_id;
u16 spcl_id;
u16 cl_qzone_id;
/* valid iff QUEUE_FLG_STATS */
u16 stat_id;
/* valid iff QUEUE_FLG_TPA */
u16 tpa_agg_sz;
u16 sge_buf_sz;
u16 max_sges_pkt;
/* valid iff QUEUE_FLG_CACHE_ALIGN */
u8 cache_line_log;
u8 sb_cq_index;
u32 cid;
/* desired interrupts per sec. valid iff QUEUE_FLG_HC */
u32 hc_rate;
};
struct bnx2x_txq_init_params {
/* cxt*/
struct eth_context *cxt;
/* dma */
dma_addr_t dscr_map;
u16 flags;
u16 fw_sb_id;
u8 sb_cq_index;
u8 cos; /* valid iff QUEUE_FLG_COS */
u16 stat_id; /* valid iff QUEUE_FLG_STATS */
u16 traffic_type;
u32 cid;
u16 hc_rate; /* desired interrupts per sec.*/
/* valid iff QUEUE_FLG_HC */
};
struct bnx2x_client_ramrod_params {
int *pstate;
int state;
u16 index;
u16 cl_id;
u32 cid;
u8 poll;
#define CLIENT_IS_LEADING_RSS 0x02
u8 flags;
};
struct bnx2x_client_init_params {
struct rxq_pause_params pause;
struct bnx2x_rxq_init_params rxq_params;
struct bnx2x_txq_init_params txq_params;
struct bnx2x_client_ramrod_params ramrod_params;
};
struct bnx2x_rss_params {
int mode;
u16 cap;
u16 result_mask;
};
struct bnx2x_func_init_params {
/* rss */
struct bnx2x_rss_params *rss; /* valid iff FUNC_FLG_RSS */
/* dma */
dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */
dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */
u16 func_flgs;
u16 func_id; /* abs fid */
u16 pf_id;
u16 spq_prod; /* valid iff FUNC_FLG_SPQ */
};
#define for_each_queue(bp, var) \ #define for_each_queue(bp, var) \
for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++)
#define for_each_nondefault_queue(bp, var) \ #define for_each_nondefault_queue(bp, var) \
@ -957,6 +1298,38 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
return val; return val;
} }
#define BNX2X_ILT_ZALLOC(x, y, size) \
do { \
x = pci_alloc_consistent(bp->pdev, size, y); \
if (x) \
memset(x, 0, size); \
} while (0)
#define BNX2X_ILT_FREE(x, y, size) \
do { \
if (x) { \
pci_free_consistent(bp->pdev, size, x, y); \
x = NULL; \
y = 0; \
} \
} while (0)
#define ILOG2(x) (ilog2((x)))
#define ILT_NUM_PAGE_ENTRIES (3072)
/* In 57710/11 we use whole table since we have 8 func
*/
#define ILT_PER_FUNC (ILT_NUM_PAGE_ENTRIES/8)
#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
/*
* the phys address is shifted right 12 bits and has an added
* 1=valid bit added to the 53rd bit
* then since this is a wide register(TM)
* we split it into two 32 bit writes
*/
#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
/* load/unload mode */ /* load/unload mode */
@ -1032,7 +1405,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) #define MAX_SP_DESC_CNT (SP_DESC_CNT - 1)
#define BNX2X_BTR 1 #define BNX2X_BTR 4
#define MAX_SPQ_PENDING 8 #define MAX_SPQ_PENDING 8
@ -1149,20 +1522,22 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT)) TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT))
#define MULTI_MASK 0x7f #define MULTI_MASK 0x7f
#define DEF_USB_FUNC_OFF (2 + 2*HC_USTORM_DEF_SB_NUM_INDICES)
#define DEF_CSB_FUNC_OFF (2 + 2*HC_CSTORM_DEF_SB_NUM_INDICES)
#define DEF_XSB_FUNC_OFF (2 + 2*HC_XSTORM_DEF_SB_NUM_INDICES)
#define DEF_TSB_FUNC_OFF (2 + 2*HC_TSTORM_DEF_SB_NUM_INDICES)
#define C_DEF_SB_SP_INDEX HC_INDEX_DEF_C_ETH_SLOW_PATH
#define BNX2X_SP_DSB_INDEX \ #define BNX2X_SP_DSB_INDEX \
(&bp->def_status_blk->c_def_status_block.index_values[C_DEF_SB_SP_INDEX]) (&bp->def_status_blk->sp_sb.\
index_values[HC_SP_INDEX_ETH_DEF_CONS])
#define SET_FLAG(value, mask, flag) \
do {\
(value) &= ~(mask);\
(value) |= ((flag) << (mask##_SHIFT));\
} while (0)
#define GET_FLAG(value, mask) \
(((value) &= (mask)) >> (mask##_SHIFT))
#define CAM_IS_INVALID(x) \ #define CAM_IS_INVALID(x) \
(x.target_table_entry.flags == TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE) (GET_FLAG(x.flags, \
MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \
(T_ETH_MAC_COMMAND_INVALIDATE))
#define CAM_INVALIDATE(x) \ #define CAM_INVALIDATE(x) \
(x.target_table_entry.flags = TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE) (x.target_table_entry.flags = TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE)
@ -1181,6 +1556,14 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define BNX2X_VPD_LEN 128 #define BNX2X_VPD_LEN 128
#define VENDOR_ID_LEN 4 #define VENDOR_ID_LEN 4
/* Congestion management fairness mode */
#define CMNG_FNS_NONE 0
#define CMNG_FNS_MINMAX 1
#define HC_SEG_ACCESS_DEF 0 /*Driver decision 0-3*/
#define HC_SEG_ACCESS_ATTN 4
#define HC_SEG_ACCESS_NORM 0 /*Driver decision 0-1*/
#ifdef BNX2X_MAIN #ifdef BNX2X_MAIN
#define BNX2X_EXTERN #define BNX2X_EXTERN
#else #else
@ -1195,4 +1578,9 @@ extern void bnx2x_set_ethtool_ops(struct net_device *netdev);
void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx); void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
#define WAIT_RAMROD_POLL 0x01
#define WAIT_RAMROD_COMMON 0x02
int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
int *state_p, int flags);
#endif /* bnx2x.h */ #endif /* bnx2x.h */

Просмотреть файл

@ -27,6 +27,8 @@
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#endif #endif
#include "bnx2x_init.h"
static int bnx2x_poll(struct napi_struct *napi, int budget); static int bnx2x_poll(struct napi_struct *napi, int budget);
/* free skb in the packet ring at pos idx /* free skb in the packet ring at pos idx
@ -190,14 +192,16 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
/* First mark all used pages */ /* First mark all used pages */
for (i = 0; i < sge_len; i++) for (i = 0; i < sge_len; i++)
SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i]))); SGE_MASK_CLEAR_BIT(fp,
RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n", DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1])); sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
/* Here we assume that the last SGE index is the biggest */ /* Here we assume that the last SGE index is the biggest */
prefetch((void *)(fp->sge_mask)); prefetch((void *)(fp->sge_mask));
bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1])); bnx2x_update_last_max_sge(fp,
le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
last_max = RX_SGE(fp->last_max_sge); last_max = RX_SGE(fp->last_max_sge);
last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT; last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
@ -298,7 +302,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* Run through the SGL and compose the fragmented skb */ /* Run through the SGL and compose the fragmented skb */
for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j])); u16 sge_idx =
RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
/* FW gives the indices of the SGE as if the ring is an array /* FW gives the indices of the SGE as if the ring is an array
(meaning that "next" element will consume 2 indices) */ (meaning that "next" element will consume 2 indices) */
@ -394,8 +399,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
if (!bnx2x_fill_frag_skb(bp, fp, skb, if (!bnx2x_fill_frag_skb(bp, fp, skb,
&cqe->fast_path_cqe, cqe_idx)) { &cqe->fast_path_cqe, cqe_idx)) {
#ifdef BCM_VLAN #ifdef BCM_VLAN
if ((bp->vlgrp != NULL) && is_vlan_cqe && if ((bp->vlgrp != NULL) &&
(!is_not_hwaccel_vlan_cqe)) (le16_to_cpu(cqe->fast_path_cqe.
pars_flags.flags) & PARSING_FLAGS_VLAN))
vlan_gro_receive(&fp->napi, bp->vlgrp, vlan_gro_receive(&fp->napi, bp->vlgrp,
le16_to_cpu(cqe->fast_path_cqe. le16_to_cpu(cqe->fast_path_cqe.
vlan_tag), skb); vlan_tag), skb);
@ -686,9 +692,10 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n", DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
fp->index, fp->sb_id); "[fp %d fw_sd %d igusb %d]\n",
bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); fp->index, fp->fw_sb_id, fp->igu_sb_id);
bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
#ifdef BNX2X_STOP_ON_ERROR #ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic)) if (unlikely(bp->panic))
@ -698,8 +705,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
/* Handle Rx and Tx according to MSI-X vector */ /* Handle Rx and Tx according to MSI-X vector */
prefetch(fp->rx_cons_sb); prefetch(fp->rx_cons_sb);
prefetch(fp->tx_cons_sb); prefetch(fp->tx_cons_sb);
prefetch(&fp->status_blk->u_status_block.status_block_index); prefetch(&fp->sb_running_index[SM_RX_ID]);
prefetch(&fp->status_blk->c_status_block.status_block_index);
napi_schedule(&bnx2x_fp(bp, fp->index, napi)); napi_schedule(&bnx2x_fp(bp, fp->index, napi));
return IRQ_HANDLED; return IRQ_HANDLED;
@ -774,27 +780,73 @@ void bnx2x_link_report(struct bnx2x *bp)
} }
} }
void bnx2x_init_rx_rings(struct bnx2x *bp) /* Returns the number of actually allocated BDs */
static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
int rx_ring_size)
{ {
int func = BP_FUNC(bp); struct bnx2x *bp = fp->bp;
int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
ETH_MAX_AGGREGATION_QUEUES_E1H;
u16 ring_prod, cqe_ring_prod; u16 ring_prod, cqe_ring_prod;
int i, j; int i;
fp->rx_comp_cons = 0;
cqe_ring_prod = ring_prod = 0;
for (i = 0; i < rx_ring_size; i++) {
if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
BNX2X_ERR("was only able to allocate "
"%d rx skbs on queue[%d]\n", i, fp->index);
fp->eth_q_stats.rx_skb_alloc_failed++;
break;
}
ring_prod = NEXT_RX_IDX(ring_prod);
cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
WARN_ON(ring_prod <= i);
}
fp->rx_bd_prod = ring_prod;
/* Limit the CQE producer by the CQE ring size */
fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
cqe_ring_prod);
fp->rx_pkt = fp->rx_calls = 0;
return i;
}
static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
{
struct bnx2x *bp = fp->bp;
int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size : int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
MAX_RX_AVAIL/bp->num_queues; MAX_RX_AVAIL/bp->num_queues;
rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size); rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN; bnx2x_alloc_rx_bds(fp, rx_ring_size);
/* Warning!
* this will generate an interrupt (to the TSTORM)
* must only be done after chip is initialized
*/
bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
fp->rx_sge_prod);
}
void bnx2x_init_rx_rings(struct bnx2x *bp)
{
int func = BP_FUNC(bp);
int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
ETH_MAX_AGGREGATION_QUEUES_E1H;
u16 ring_prod;
int i, j;
bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
BNX2X_FW_IP_HDR_ALIGN_PAD;
DP(NETIF_MSG_IFUP, DP(NETIF_MSG_IFUP,
"mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size); "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
if (bp->flags & TPA_ENABLE_FLAG) { for_each_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
for_each_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
if (!fp->disable_tpa) {
for (i = 0; i < max_agg_queues; i++) { for (i = 0; i < max_agg_queues; i++) {
fp->tpa_pool[i].skb = fp->tpa_pool[i].skb =
netdev_alloc_skb(bp->dev, bp->rx_buf_size); netdev_alloc_skb(bp->dev, bp->rx_buf_size);
@ -812,6 +864,35 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
mapping, 0); mapping, 0);
fp->tpa_state[i] = BNX2X_TPA_STOP; fp->tpa_state[i] = BNX2X_TPA_STOP;
} }
/* "next page" elements initialization */
bnx2x_set_next_page_sgl(fp);
/* set SGEs bit mask */
bnx2x_init_sge_ring_bit_mask(fp);
/* Allocate SGEs and initialize the ring elements */
for (i = 0, ring_prod = 0;
i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
BNX2X_ERR("was only able to allocate "
"%d rx sges\n", i);
BNX2X_ERR("disabling TPA for"
" queue[%d]\n", j);
/* Cleanup already allocated elements */
bnx2x_free_rx_sge_range(bp,
fp, ring_prod);
bnx2x_free_tpa_pool(bp,
fp, max_agg_queues);
fp->disable_tpa = 1;
ring_prod = 0;
break;
}
ring_prod = NEXT_SGE_IDX(ring_prod);
}
fp->rx_sge_prod = ring_prod;
} }
} }
@ -819,98 +900,15 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
struct bnx2x_fastpath *fp = &bp->fp[j]; struct bnx2x_fastpath *fp = &bp->fp[j];
fp->rx_bd_cons = 0; fp->rx_bd_cons = 0;
fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
/* "next page" elements initialization */ bnx2x_set_next_page_rx_bd(fp);
/* SGE ring */
for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
struct eth_rx_sge *sge;
sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
sge->addr_hi =
cpu_to_le32(U64_HI(fp->rx_sge_mapping +
BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
sge->addr_lo =
cpu_to_le32(U64_LO(fp->rx_sge_mapping +
BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
}
bnx2x_init_sge_ring_bit_mask(fp);
/* RX BD ring */
for (i = 1; i <= NUM_RX_RINGS; i++) {
struct eth_rx_bd *rx_bd;
rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
rx_bd->addr_hi =
cpu_to_le32(U64_HI(fp->rx_desc_mapping +
BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
rx_bd->addr_lo =
cpu_to_le32(U64_LO(fp->rx_desc_mapping +
BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
}
/* CQ ring */ /* CQ ring */
for (i = 1; i <= NUM_RCQ_RINGS; i++) { bnx2x_set_next_page_rx_cq(fp);
struct eth_rx_cqe_next_page *nextpg;
nextpg = (struct eth_rx_cqe_next_page *)
&fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
nextpg->addr_hi =
cpu_to_le32(U64_HI(fp->rx_comp_mapping +
BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
nextpg->addr_lo =
cpu_to_le32(U64_LO(fp->rx_comp_mapping +
BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
}
/* Allocate SGEs and initialize the ring elements */
for (i = 0, ring_prod = 0;
i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
BNX2X_ERR("was only able to allocate "
"%d rx sges\n", i);
BNX2X_ERR("disabling TPA for queue[%d]\n", j);
/* Cleanup already allocated elements */
bnx2x_free_rx_sge_range(bp, fp, ring_prod);
bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
fp->disable_tpa = 1;
ring_prod = 0;
break;
}
ring_prod = NEXT_SGE_IDX(ring_prod);
}
fp->rx_sge_prod = ring_prod;
/* Allocate BDs and initialize BD ring */ /* Allocate BDs and initialize BD ring */
fp->rx_comp_cons = 0; bnx2x_alloc_rx_bd_ring(fp);
cqe_ring_prod = ring_prod = 0;
for (i = 0; i < rx_ring_size; i++) {
if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
BNX2X_ERR("was only able to allocate "
"%d rx skbs on queue[%d]\n", i, j);
fp->eth_q_stats.rx_skb_alloc_failed++;
break;
}
ring_prod = NEXT_RX_IDX(ring_prod);
cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
WARN_ON(ring_prod <= i);
}
fp->rx_bd_prod = ring_prod;
/* must not have more available CQEs than BDs */
fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
cqe_ring_prod);
fp->rx_pkt = fp->rx_calls = 0;
/* Warning!
* this will generate an interrupt (to the TSTORM)
* must only be done after chip is initialized
*/
bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
fp->rx_sge_prod);
if (j != 0) if (j != 0)
continue; continue;
@ -921,6 +919,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4, USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
U64_HI(fp->rx_comp_mapping)); U64_HI(fp->rx_comp_mapping));
} }
} }
static void bnx2x_free_tx_skbs(struct bnx2x *bp) static void bnx2x_free_tx_skbs(struct bnx2x *bp)
{ {
@ -1252,6 +1251,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (rc) if (rc)
return rc; return rc;
/* must be called before memory allocation and HW init */
bnx2x_ilt_set_info(bp);
if (bnx2x_alloc_mem(bp)) { if (bnx2x_alloc_mem(bp)) {
bnx2x_free_irq(bp, true); bnx2x_free_irq(bp, true);
return -ENOMEM; return -ENOMEM;
@ -1339,6 +1341,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
goto load_error2; goto load_error2;
} }
if (rc) {
bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
goto load_error2;
}
/* Setup NIC internals and enable interrupts */ /* Setup NIC internals and enable interrupts */
bnx2x_nic_init(bp, load_code); bnx2x_nic_init(bp, load_code);
@ -1360,7 +1367,18 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
rc = bnx2x_setup_leading(bp); rc = bnx2x_func_start(bp);
if (rc) {
BNX2X_ERR("Function start failed!\n");
#ifndef BNX2X_STOP_ON_ERROR
goto load_error3;
#else
bp->panic = 1;
return -EBUSY;
#endif
}
rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
if (rc) { if (rc) {
BNX2X_ERR("Setup leading failed!\n"); BNX2X_ERR("Setup leading failed!\n");
#ifndef BNX2X_STOP_ON_ERROR #ifndef BNX2X_STOP_ON_ERROR
@ -1377,56 +1395,53 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bp->flags |= MF_FUNC_DIS; bp->flags |= MF_FUNC_DIS;
} }
if (bp->state == BNX2X_STATE_OPEN) {
#ifdef BCM_CNIC #ifdef BCM_CNIC
/* Enable Timer scan */ /* Enable Timer scan */
REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1); REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
#endif #endif
for_each_nondefault_queue(bp, i) { for_each_nondefault_queue(bp, i) {
rc = bnx2x_setup_multi(bp, i); rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
if (rc) if (rc)
#ifdef BCM_CNIC #ifdef BCM_CNIC
goto load_error4; goto load_error4;
#else #else
goto load_error3; goto load_error3;
#endif
}
if (CHIP_IS_E1(bp))
bnx2x_set_eth_mac_addr_e1(bp, 1);
else
bnx2x_set_eth_mac_addr_e1h(bp, 1);
#ifdef BCM_CNIC
/* Set iSCSI L2 MAC */
mutex_lock(&bp->cnic_mutex);
if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
bnx2x_set_iscsi_eth_mac_addr(bp, 1);
bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
CNIC_SB_ID(bp));
}
mutex_unlock(&bp->cnic_mutex);
#endif #endif
} }
/* Now when Clients are configured we are ready to work */
bp->state = BNX2X_STATE_OPEN;
bnx2x_set_eth_mac(bp, 1);
#ifdef BCM_CNIC
/* Set iSCSI L2 MAC */
mutex_lock(&bp->cnic_mutex);
if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
bnx2x_set_iscsi_eth_mac_addr(bp, 1);
bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
bnx2x_init_sb(bp, bp->cnic_sb_mapping,
BNX2X_VF_ID_INVALID, false,
CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
}
mutex_unlock(&bp->cnic_mutex);
#endif
if (bp->port.pmf) if (bp->port.pmf)
bnx2x_initial_phy_init(bp, load_mode); bnx2x_initial_phy_init(bp, load_mode);
/* Start fast path */ /* Start fast path */
switch (load_mode) { switch (load_mode) {
case LOAD_NORMAL: case LOAD_NORMAL:
if (bp->state == BNX2X_STATE_OPEN) { /* Tx queue should be only reenabled */
/* Tx queue should be only reenabled */ netif_tx_wake_all_queues(bp->dev);
netif_tx_wake_all_queues(bp->dev);
}
/* Initialize the receive filter. */ /* Initialize the receive filter. */
bnx2x_set_rx_mode(bp->dev); bnx2x_set_rx_mode(bp->dev);
break; break;
case LOAD_OPEN: case LOAD_OPEN:
netif_tx_start_all_queues(bp->dev); netif_tx_start_all_queues(bp->dev);
if (bp->state != BNX2X_STATE_OPEN) smp_mb__after_clear_bit();
netif_tx_disable(bp->dev);
/* Initialize the receive filter. */ /* Initialize the receive filter. */
bnx2x_set_rx_mode(bp->dev); bnx2x_set_rx_mode(bp->dev);
break; break;
@ -1512,21 +1527,22 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
bp->rx_mode = BNX2X_RX_MODE_NONE; bp->rx_mode = BNX2X_RX_MODE_NONE;
bnx2x_set_storm_rx_mode(bp); bnx2x_set_storm_rx_mode(bp);
/* Disable HW interrupts, NAPI and Tx */
bnx2x_netif_stop(bp, 1);
netif_carrier_off(bp->dev);
del_timer_sync(&bp->timer); del_timer_sync(&bp->timer);
SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
(DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bnx2x_stats_handle(bp, STATS_EVENT_STOP); bnx2x_stats_handle(bp, STATS_EVENT_STOP);
/* Release IRQs */
bnx2x_free_irq(bp, false);
/* Cleanup the chip if needed */ /* Cleanup the chip if needed */
if (unload_mode != UNLOAD_RECOVERY) if (unload_mode != UNLOAD_RECOVERY)
bnx2x_chip_cleanup(bp, unload_mode); bnx2x_chip_cleanup(bp, unload_mode);
else {
/* Disable HW interrupts, NAPI and Tx */
bnx2x_netif_stop(bp, 1);
/* Release IRQs */
bnx2x_free_irq(bp, false);
}
bp->port.pmf = 0; bp->port.pmf = 0;
@ -1634,27 +1650,28 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
/* Fall out from the NAPI loop if needed */ /* Fall out from the NAPI loop if needed */
if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
bnx2x_update_fpsb_idx(fp); bnx2x_update_fpsb_idx(fp);
/* bnx2x_has_rx_work() reads the status block, thus we need /* bnx2x_has_rx_work() reads the status block,
* to ensure that status block indices have been actually read * thus we need to ensure that status block indices
* (bnx2x_update_fpsb_idx) prior to this check * have been actually read (bnx2x_update_fpsb_idx)
* (bnx2x_has_rx_work) so that we won't write the "newer" * prior to this check (bnx2x_has_rx_work) so that
* value of the status block to IGU (if there was a DMA right * we won't write the "newer" value of the status block
* after bnx2x_has_rx_work and if there is no rmb, the memory * to IGU (if there was a DMA right after
* reading (bnx2x_update_fpsb_idx) may be postponed to right * bnx2x_has_rx_work and if there is no rmb, the memory
* before bnx2x_ack_sb). In this case there will never be * reading (bnx2x_update_fpsb_idx) may be postponed
* another interrupt until there is another update of the * to right before bnx2x_ack_sb). In this case there
* status block, while there is still unhandled work. * will never be another interrupt until there is
* another update of the status block, while there
* is still unhandled work.
*/ */
rmb(); rmb();
if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
napi_complete(napi); napi_complete(napi);
/* Re-enable interrupts */ /* Re-enable interrupts */
bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, DP(NETIF_MSG_HW,
le16_to_cpu(fp->fp_c_idx), "Update index to %d\n", fp->fp_hc_idx);
IGU_INT_NOP, 1); bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, le16_to_cpu(fp->fp_hc_idx),
le16_to_cpu(fp->fp_u_idx),
IGU_INT_ENABLE, 1); IGU_INT_ENABLE, 1);
break; break;
} }
@ -1850,7 +1867,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct sw_tx_bd *tx_buf; struct sw_tx_bd *tx_buf;
struct eth_tx_start_bd *tx_start_bd; struct eth_tx_start_bd *tx_start_bd;
struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
struct eth_tx_parse_bd *pbd = NULL; struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
u16 pkt_prod, bd_prod; u16 pkt_prod, bd_prod;
int nbd, fp_index; int nbd, fp_index;
dma_addr_t mapping; dma_addr_t mapping;
@ -1926,10 +1943,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd; tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
tx_start_bd->general_data = (mac_type << SET_FLAG(tx_start_bd->general_data,
ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT); ETH_TX_START_BD_ETH_ADDR_TYPE,
mac_type);
/* header nbd */ /* header nbd */
tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); SET_FLAG(tx_start_bd->general_data,
ETH_TX_START_BD_HDR_NBDS,
1);
/* remember the first BD of the packet */ /* remember the first BD of the packet */
tx_buf->first_bd = fp->tx_bd_prod; tx_buf->first_bd = fp->tx_bd_prod;
@ -1943,34 +1963,18 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
#ifdef BCM_VLAN #ifdef BCM_VLAN
if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) && if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
(bp->flags & HW_VLAN_TX_FLAG)) { (bp->flags & HW_VLAN_TX_FLAG)) {
tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb)); tx_start_bd->vlan_or_ethertype =
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG; cpu_to_le16(vlan_tx_tag_get(skb));
tx_start_bd->bd_flags.as_bitfield |=
(X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
} else } else
#endif #endif
tx_start_bd->vlan = cpu_to_le16(pkt_prod); tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
/* turn on parsing and get a BD */ /* turn on parsing and get a BD */
bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
if (xmit_type & XMIT_CSUM) { if (xmit_type & XMIT_CSUM) {
hlen = (skb_network_header(skb) - skb->data) / 2;
/* for now NS flag is not used in Linux */
pbd->global_data =
(hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
pbd->ip_hlen = (skb_transport_header(skb) -
skb_network_header(skb)) / 2;
hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
pbd->total_hlen = cpu_to_le16(hlen);
hlen = hlen*2;
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
if (xmit_type & XMIT_CSUM_V4) if (xmit_type & XMIT_CSUM_V4)
@ -1980,25 +1984,47 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_start_bd->bd_flags.as_bitfield |= tx_start_bd->bd_flags.as_bitfield |=
ETH_TX_BD_FLAGS_IPV6; ETH_TX_BD_FLAGS_IPV6;
if (!(xmit_type & XMIT_CSUM_TCP))
tx_start_bd->bd_flags.as_bitfield |=
ETH_TX_BD_FLAGS_IS_UDP;
}
pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
/* Set PBD in checksum offload case */
if (xmit_type & XMIT_CSUM) {
hlen = (skb_network_header(skb) - skb->data) / 2;
/* for now NS flag is not used in Linux */
pbd_e1x->global_data =
(hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
pbd_e1x->ip_hlen_w = (skb_transport_header(skb) -
skb_network_header(skb)) / 2;
hlen += pbd_e1x->ip_hlen_w + tcp_hdrlen(skb) / 2;
pbd_e1x->total_hlen_w = cpu_to_le16(hlen);
hlen = hlen*2;
if (xmit_type & XMIT_CSUM_TCP) { if (xmit_type & XMIT_CSUM_TCP) {
pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check); pbd_e1x->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
} else { } else {
s8 fix = SKB_CS_OFF(skb); /* signed! */ s8 fix = SKB_CS_OFF(skb); /* signed! */
pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
DP(NETIF_MSG_TX_QUEUED, DP(NETIF_MSG_TX_QUEUED,
"hlen %d fix %d csum before fix %x\n", "hlen %d fix %d csum before fix %x\n",
le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb)); le16_to_cpu(pbd_e1x->total_hlen_w),
fix, SKB_CS(skb));
/* HW bug: fixup the CSUM */ /* HW bug: fixup the CSUM */
pbd->tcp_pseudo_csum = pbd_e1x->tcp_pseudo_csum =
bnx2x_csum_fix(skb_transport_header(skb), bnx2x_csum_fix(skb_transport_header(skb),
SKB_CS(skb), fix); SKB_CS(skb), fix);
DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n", DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
pbd->tcp_pseudo_csum); pbd_e1x->tcp_pseudo_csum);
} }
} }
@ -2016,7 +2042,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
" nbytes %d flags %x vlan %x\n", " nbytes %d flags %x vlan %x\n",
tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes), le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan)); tx_start_bd->bd_flags.as_bitfield,
le16_to_cpu(tx_start_bd->vlan_or_ethertype));
if (xmit_type & XMIT_GSO) { if (xmit_type & XMIT_GSO) {
@ -2031,24 +2058,25 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd, bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
hlen, bd_prod, ++nbd); hlen, bd_prod, ++nbd);
pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); pbd_e1x->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq); pbd_e1x->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
pbd->tcp_flags = pbd_tcp_flags(skb); pbd_e1x->tcp_flags = pbd_tcp_flags(skb);
if (xmit_type & XMIT_GSO_V4) { if (xmit_type & XMIT_GSO_V4) {
pbd->ip_id = swab16(ip_hdr(skb)->id); pbd_e1x->ip_id = swab16(ip_hdr(skb)->id);
pbd->tcp_pseudo_csum = pbd_e1x->tcp_pseudo_csum =
swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, ip_hdr(skb)->daddr,
0, IPPROTO_TCP, 0)); 0, IPPROTO_TCP, 0));
} else } else
pbd->tcp_pseudo_csum = pbd_e1x->tcp_pseudo_csum =
swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, &ipv6_hdr(skb)->daddr,
0, IPPROTO_TCP, 0)); 0, IPPROTO_TCP, 0));
pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN; pbd_e1x->global_data |=
ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
} }
tx_data_bd = (struct eth_tx_bd *)tx_start_bd; tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
@ -2088,13 +2116,14 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (total_pkt_bd != NULL) if (total_pkt_bd != NULL)
total_pkt_bd->total_pkt_bytes = pkt_size; total_pkt_bd->total_pkt_bytes = pkt_size;
if (pbd) if (pbd_e1x)
DP(NETIF_MSG_TX_QUEUED, DP(NETIF_MSG_TX_QUEUED,
"PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u" "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
" tcp_flags %x xsum %x seq %u hlen %u\n", " tcp_flags %x xsum %x seq %u hlen %u\n",
pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id, pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum, pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen)); pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
le16_to_cpu(pbd_e1x->total_hlen_w));
DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
@ -2109,7 +2138,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
fp->tx_db.data.prod += nbd; fp->tx_db.data.prod += nbd;
barrier(); barrier();
DOORBELL(bp, fp->index, fp->tx_db.raw); DOORBELL(bp, fp->cid, fp->tx_db.raw);
mmiowb(); mmiowb();
@ -2141,16 +2170,51 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
return -EINVAL; return -EINVAL;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
if (netif_running(dev)) { if (netif_running(dev))
if (CHIP_IS_E1(bp)) bnx2x_set_eth_mac(bp, 1);
bnx2x_set_eth_mac_addr_e1(bp, 1);
else
bnx2x_set_eth_mac_addr_e1h(bp, 1);
}
return 0; return 0;
} }
void bnx2x_free_mem_bp(struct bnx2x *bp)
{
kfree(bp->fp);
kfree(bp->msix_table);
kfree(bp->ilt);
}
int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
{
struct bnx2x_fastpath *fp;
struct msix_entry *tbl;
struct bnx2x_ilt *ilt;
/* fp array */
fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
if (!fp)
goto alloc_err;
bp->fp = fp;
/* msix table */
tbl = kzalloc((bp->l2_cid_count + 1) * sizeof(*tbl),
GFP_KERNEL);
if (!tbl)
goto alloc_err;
bp->msix_table = tbl;
/* ilt */
ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
if (!ilt)
goto alloc_err;
bp->ilt = ilt;
return 0;
alloc_err:
bnx2x_free_mem_bp(bp);
return -ENOMEM;
}
/* called with rtnl_lock */ /* called with rtnl_lock */
int bnx2x_change_mtu(struct net_device *dev, int new_mtu) int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
{ {
@ -2200,18 +2264,6 @@ void bnx2x_vlan_rx_register(struct net_device *dev,
struct bnx2x *bp = netdev_priv(dev); struct bnx2x *bp = netdev_priv(dev);
bp->vlgrp = vlgrp; bp->vlgrp = vlgrp;
/* Set flags according to the required capabilities */
bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
if (dev->features & NETIF_F_HW_VLAN_TX)
bp->flags |= HW_VLAN_TX_FLAG;
if (dev->features & NETIF_F_HW_VLAN_RX)
bp->flags |= HW_VLAN_RX_FLAG;
if (netif_running(dev))
bnx2x_set_client_config(bp);
} }
#endif #endif

Просмотреть файл

@ -106,6 +106,13 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
*/ */
void bnx2x_int_enable(struct bnx2x *bp); void bnx2x_int_enable(struct bnx2x *bp);
/**
* Disable HW interrupts.
*
* @param bp
*/
void bnx2x_int_disable(struct bnx2x *bp);
/** /**
* Disable interrupts. This function ensures that there are no * Disable interrupts. This function ensures that there are no
* ISRs or SP DPCs (sp_task) are running after it returns. * ISRs or SP DPCs (sp_task) are running after it returns.
@ -163,27 +170,30 @@ int bnx2x_alloc_mem(struct bnx2x *bp);
void bnx2x_free_mem(struct bnx2x *bp); void bnx2x_free_mem(struct bnx2x *bp);
/** /**
* Bring up a leading (the first) eth Client. * Setup eth Client.
*
* @param bp
*
* @return int
*/
int bnx2x_setup_leading(struct bnx2x *bp);
/**
* Setup non-leading eth Client.
* *
* @param bp * @param bp
* @param fp * @param fp
* @param is_leading
* *
* @return int * @return int
*/ */
int bnx2x_setup_multi(struct bnx2x *bp, int index); int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
int is_leading);
/** /**
* Set number of quueus according to mode and number of available * Bring down an eth client.
* msi-x vectors *
* @param bp
* @param p
*
* @return int
*/
int bnx2x_stop_fw_client(struct bnx2x *bp,
struct bnx2x_client_ramrod_params *p);
/**
* Set number of quueus according to mode
* *
* @param bp * @param bp
* *
@ -228,16 +238,7 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
* @param bp driver handle * @param bp driver handle
* @param set * @param set
*/ */
void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set); void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
/**
* Configure eth MAC address in the HW according to the value in
* netdev->dev_addr for 57710
*
* @param bp driver handle
* @param set
*/
void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set);
#ifdef BCM_CNIC #ifdef BCM_CNIC
/** /**
@ -257,12 +258,15 @@ int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set);
* Initialize status block in FW and HW * Initialize status block in FW and HW
* *
* @param bp driver handle * @param bp driver handle
* @param sb host_status_block
* @param dma_addr_t mapping * @param dma_addr_t mapping
* @param int sb_id * @param int sb_id
* @param int vfid
* @param u8 vf_valid
* @param int fw_sb_id
* @param int igu_sb_id
*/ */
void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb, void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
dma_addr_t mapping, int sb_id); u8 vf_valid, int fw_sb_id, int igu_sb_id);
/** /**
* Reconfigure FW/HW according to dev->flags rx mode * Reconfigure FW/HW according to dev->flags rx mode
@ -294,14 +298,6 @@ void bnx2x_disable_close_the_gate(struct bnx2x *bp);
*/ */
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
/**
* Configures FW with client paramteres (like HW VLAN removal)
* for each active client.
*
* @param bp
*/
void bnx2x_set_client_config(struct bnx2x *bp);
/** /**
* Handle sp events * Handle sp events
* *
@ -310,14 +306,29 @@ void bnx2x_set_client_config(struct bnx2x *bp);
*/ */
void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe); void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
/**
* Init/halt function before/after sending
* CLIENT_SETUP/CFC_DEL for the first/last client.
*
* @param bp
*
* @return int
*/
int bnx2x_func_start(struct bnx2x *bp);
int bnx2x_func_stop(struct bnx2x *bp);
/**
* Prepare ILT configurations according to current driver
* parameters.
*
* @param bp
*/
void bnx2x_ilt_set_info(struct bnx2x *bp);
static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
{ {
struct host_status_block *fpsb = fp->status_blk;
barrier(); /* status block is written to by the chip */ barrier(); /* status block is written to by the chip */
fp->fp_c_idx = fpsb->c_status_block.status_block_index; fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
fp->fp_u_idx = fpsb->u_status_block.status_block_index;
} }
static inline void bnx2x_update_rx_prod(struct bnx2x *bp, static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
@ -344,8 +355,8 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
wmb(); wmb();
for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++) for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
REG_WR(bp, BAR_USTRORM_INTMEM + REG_WR(bp,
USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4, BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset + i*4,
((u32 *)&rx_prods)[i]); ((u32 *)&rx_prods)[i]);
mmiowb(); /* keep prod updates ordered */ mmiowb(); /* keep prod updates ordered */
@ -434,6 +445,17 @@ static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
return hw_cons != fp->tx_pkt_cons; return hw_cons != fp->tx_pkt_cons;
} }
static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
{
u16 rx_cons_sb;
/* Tell compiler that status block fields can change */
barrier();
rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
rx_cons_sb++;
return (fp->rx_comp_cons != rx_cons_sb);
}
static inline void bnx2x_free_rx_sge(struct bnx2x *bp, static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
struct bnx2x_fastpath *fp, u16 index) struct bnx2x_fastpath *fp, u16 index)
{ {
@ -454,13 +476,35 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
sge->addr_lo = 0; sge->addr_lo = 0;
} }
static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
struct bnx2x_fastpath *fp, int last)
{
int i;
for (i = 0; i < last; i++)
bnx2x_free_rx_sge(bp, fp, i);
static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
{
int i, j;
for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
int idx = RX_SGE_CNT * i - 1;
for (j = 0; j < 2; j++) {
SGE_MASK_CLEAR_BIT(fp, idx);
idx--;
}
}
}
static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
{
/* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
memset(fp->sge_mask, 0xff,
(NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
/* Clear the two last indices in the page to 1:
these are the indices that correspond to the "next" element,
hence will never be indicated and should be removed from
the calculations. */
bnx2x_clear_sge_mask_next_elems(fp);
} }
static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
@ -540,33 +584,15 @@ static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
dma_unmap_addr(cons_rx_buf, mapping)); dma_unmap_addr(cons_rx_buf, mapping));
*prod_bd = *cons_bd; *prod_bd = *cons_bd;
} }
static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) struct bnx2x_fastpath *fp, int last)
{ {
int i, j; int i;
for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { for (i = 0; i < last; i++)
int idx = RX_SGE_CNT * i - 1; bnx2x_free_rx_sge(bp, fp, i);
for (j = 0; j < 2; j++) {
SGE_MASK_CLEAR_BIT(fp, idx);
idx--;
}
}
} }
static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
{
/* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
memset(fp->sge_mask, 0xff,
(NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
/* Clear the two last indices in the page to 1:
these are the indices that correspond to the "next" element,
hence will never be indicated and should be removed from
the calculations. */
bnx2x_clear_sge_mask_next_elems(fp);
}
static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
struct bnx2x_fastpath *fp, int last) struct bnx2x_fastpath *fp, int last)
{ {
@ -592,7 +618,7 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
} }
static inline void bnx2x_init_tx_ring(struct bnx2x *bp) static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
{ {
int i, j; int i, j;
@ -611,7 +637,7 @@ static inline void bnx2x_init_tx_ring(struct bnx2x *bp)
BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
} }
fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE; SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
fp->tx_db.data.zero_fill1 = 0; fp->tx_db.data.zero_fill1 = 0;
fp->tx_db.data.prod = 0; fp->tx_db.data.prod = 0;
@ -619,22 +645,94 @@ static inline void bnx2x_init_tx_ring(struct bnx2x *bp)
fp->tx_pkt_cons = 0; fp->tx_pkt_cons = 0;
fp->tx_bd_prod = 0; fp->tx_bd_prod = 0;
fp->tx_bd_cons = 0; fp->tx_bd_cons = 0;
fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
fp->tx_pkt = 0; fp->tx_pkt = 0;
} }
} }
static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
{ {
u16 rx_cons_sb; int i;
/* Tell compiler that status block fields can change */ for (i = 1; i <= NUM_RX_RINGS; i++) {
barrier(); struct eth_rx_bd *rx_bd;
rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
rx_cons_sb++; rx_bd->addr_hi =
return fp->rx_comp_cons != rx_cons_sb; cpu_to_le32(U64_HI(fp->rx_desc_mapping +
BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
rx_bd->addr_lo =
cpu_to_le32(U64_LO(fp->rx_desc_mapping +
BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
}
} }
static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
{
int i;
for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
struct eth_rx_sge *sge;
sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
sge->addr_hi =
cpu_to_le32(U64_HI(fp->rx_sge_mapping +
BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
sge->addr_lo =
cpu_to_le32(U64_LO(fp->rx_sge_mapping +
BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
}
}
static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
{
int i;
for (i = 1; i <= NUM_RCQ_RINGS; i++) {
struct eth_rx_cqe_next_page *nextpg;
nextpg = (struct eth_rx_cqe_next_page *)
&fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
nextpg->addr_hi =
cpu_to_le32(U64_HI(fp->rx_comp_mapping +
BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
nextpg->addr_lo =
cpu_to_le32(U64_LO(fp->rx_comp_mapping +
BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
}
}
static inline void __storm_memset_struct(struct bnx2x *bp,
u32 addr, size_t size, u32 *data)
{
int i;
for (i = 0; i < size/4; i++)
REG_WR(bp, addr + (i * 4), data[i]);
}
static inline void storm_memset_mac_filters(struct bnx2x *bp,
struct tstorm_eth_mac_filter_config *mac_filters,
u16 abs_fid)
{
size_t size = sizeof(struct tstorm_eth_mac_filter_config);
u32 addr = BAR_TSTRORM_INTMEM +
TSTORM_MAC_FILTER_CONFIG_OFFSET(abs_fid);
__storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
}
static inline void storm_memset_cmng(struct bnx2x *bp,
struct cmng_struct_per_port *cmng,
u8 port)
{
size_t size = sizeof(struct cmng_struct_per_port);
u32 addr = BAR_XSTRORM_INTMEM +
XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
__storm_memset_struct(bp, addr, size, (u32 *)cmng);
}
/* HW Lock for shared dual port PHYs */ /* HW Lock for shared dual port PHYs */
void bnx2x_acquire_phy_lock(struct bnx2x *bp); void bnx2x_acquire_phy_lock(struct bnx2x *bp);
void bnx2x_release_phy_lock(struct bnx2x *bp); void bnx2x_release_phy_lock(struct bnx2x *bp);
@ -659,4 +757,16 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
int bnx2x_nic_load(struct bnx2x *bp, int load_mode); int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
/**
* Allocate/release memories outsize main driver structure
*
* @param bp
*
* @return int
*/
int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp);
void bnx2x_free_mem_bp(struct bnx2x *bp);
#define BNX2X_FW_IP_HDR_ALIGN_PAD 2 /* FW places hdr with this padding */
#endif /* BNX2X_CMN_H */ #endif /* BNX2X_CMN_H */

Просмотреть файл

@ -1343,7 +1343,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
u16 pkt_prod, bd_prod; u16 pkt_prod, bd_prod;
struct sw_tx_bd *tx_buf; struct sw_tx_bd *tx_buf;
struct eth_tx_start_bd *tx_start_bd; struct eth_tx_start_bd *tx_start_bd;
struct eth_tx_parse_bd *pbd = NULL; struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
dma_addr_t mapping; dma_addr_t mapping;
union eth_rx_cqe *cqe; union eth_rx_cqe *cqe;
u8 cqe_fp_flags; u8 cqe_fp_flags;
@ -1399,16 +1399,20 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */ tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
tx_start_bd->vlan = cpu_to_le16(pkt_prod); tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
tx_start_bd->general_data = ((UNICAST_ADDRESS << SET_FLAG(tx_start_bd->general_data,
ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1); ETH_TX_START_BD_ETH_ADDR_TYPE,
UNICAST_ADDRESS);
SET_FLAG(tx_start_bd->general_data,
ETH_TX_START_BD_HDR_NBDS,
1);
/* turn on parsing and get a BD */ /* turn on parsing and get a BD */
bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd; pbd_e1x = &fp_tx->tx_desc_ring[bd_prod].parse_bd_e1x;
memset(pbd, 0, sizeof(struct eth_tx_parse_bd)); memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
wmb(); wmb();
@ -1578,9 +1582,9 @@ static int bnx2x_test_intr(struct bnx2x *bp)
bp->set_mac_pending++; bp->set_mac_pending++;
smp_wmb(); smp_wmb();
rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
U64_HI(bnx2x_sp_mapping(bp, mac_config)), U64_HI(bnx2x_sp_mapping(bp, mac_config)),
U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
if (rc == 0) { if (rc == 0) {
for (i = 0; i < 10; i++) { for (i = 0; i < 10; i++) {
if (!bp->set_mac_pending) if (!bp->set_mac_pending)

Просмотреть файл

@ -7,369 +7,272 @@
* the Free Software Foundation. * the Free Software Foundation.
*/ */
#ifndef BNX2X_FW_DEFS_H
#define BNX2X_FW_DEFS_H
#define CSTORM_ASSERT_LIST_INDEX_OFFSET \ #define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[142].base)
(IS_E1H_OFFSET ? 0x7000 : 0x1000) #define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
#define CSTORM_ASSERT_LIST_OFFSET(idx) \ (IRO[141].base + ((assertListEntry) * IRO[141].m1))
(IS_E1H_OFFSET ? (0x7020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) #define CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
#define CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(function, index) \ (IRO[144].base + ((pfId) * IRO[144].m1))
(IS_E1H_OFFSET ? (0x8622 + ((function>>1) * 0x40) + \ #define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \
((function&1) * 0x100) + (index * 0x4)) : (0x3562 + (function * \ (IRO[149].base + (((pfId)>>1) * IRO[149].m1) + (((pfId)&1) * \
0x40) + (index * 0x4))) IRO[149].m2))
#define CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(function, index) \ #define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \
(IS_E1H_OFFSET ? (0x8822 + ((function>>1) * 0x80) + \ (IRO[150].base + (((pfId)>>1) * IRO[150].m1) + (((pfId)&1) * \
((function&1) * 0x200) + (index * 0x4)) : (0x35e2 + (function * \ IRO[150].m2))
0x80) + (index * 0x4))) #define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \
#define CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(function) \ (IRO[156].base + ((funcId) * IRO[156].m1))
(IS_E1H_OFFSET ? (0x8600 + ((function>>1) * 0x40) + \ #define CSTORM_FUNC_EN_OFFSET(funcId) \
((function&1) * 0x100)) : (0x3540 + (function * 0x40))) (IRO[146].base + ((funcId) * IRO[146].m1))
#define CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(function) \ #define CSTORM_FUNCTION_MODE_OFFSET (IRO[153].base)
(IS_E1H_OFFSET ? (0x8800 + ((function>>1) * 0x80) + \ #define CSTORM_IGU_MODE_OFFSET (IRO[154].base)
((function&1) * 0x200)) : (0x35c0 + (function * 0x80))) #define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
#define CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(function) \ (IRO[311].base + ((pfId) * IRO[311].m1))
(IS_E1H_OFFSET ? (0x8608 + ((function>>1) * 0x40) + \ #define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
((function&1) * 0x100)) : (0x3548 + (function * 0x40))) (IRO[312].base + ((pfId) * IRO[312].m1))
#define CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(function) \ #define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
(IS_E1H_OFFSET ? (0x8808 + ((function>>1) * 0x80) + \ (IRO[304].base + ((pfId) * IRO[304].m1) + ((iscsiEqId) * \
((function&1) * 0x200)) : (0x35c8 + (function * 0x80))) IRO[304].m2))
#define CSTORM_FUNCTION_MODE_OFFSET \ #define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
(IS_E1H_OFFSET ? 0x11e8 : 0xffffffff) (IRO[306].base + ((pfId) * IRO[306].m1) + ((iscsiEqId) * \
#define CSTORM_HC_BTR_C_OFFSET(port) \ IRO[306].m2))
(IS_E1H_OFFSET ? (0x8c04 + (port * 0xf0)) : (0x36c4 + (port * 0xc0))) #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
#define CSTORM_HC_BTR_U_OFFSET(port) \ (IRO[305].base + ((pfId) * IRO[305].m1) + ((iscsiEqId) * \
(IS_E1H_OFFSET ? (0x8de4 + (port * 0xf0)) : (0x3844 + (port * 0xc0))) IRO[305].m2))
#define CSTORM_ISCSI_CQ_SIZE_OFFSET(function) \ #define \
(IS_E1H_OFFSET ? (0x6680 + (function * 0x8)) : (0x25a0 + \ CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
(function * 0x8))) (IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * \
#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(function) \ IRO[307].m2))
(IS_E1H_OFFSET ? (0x66c0 + (function * 0x8)) : (0x25b0 + \ #define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
(function * 0x8))) (IRO[303].base + ((pfId) * IRO[303].m1) + ((iscsiEqId) * \
#define CSTORM_ISCSI_EQ_CONS_OFFSET(function, eqIdx) \ IRO[303].m2))
(IS_E1H_OFFSET ? (0x6040 + (function * 0xc0) + (eqIdx * 0x18)) : \ #define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
(0x2410 + (function * 0xc0) + (eqIdx * 0x18))) (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * \
#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(function, eqIdx) \ IRO[309].m2))
(IS_E1H_OFFSET ? (0x6044 + (function * 0xc0) + (eqIdx * 0x18)) : \ #define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
(0x2414 + (function * 0xc0) + (eqIdx * 0x18))) (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * \
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(function, eqIdx) \ IRO[308].m2))
(IS_E1H_OFFSET ? (0x604c + (function * 0xc0) + (eqIdx * 0x18)) : \ #define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
(0x241c + (function * 0xc0) + (eqIdx * 0x18))) (IRO[310].base + ((pfId) * IRO[310].m1))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(function, eqIdx) \ #define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
(IS_E1H_OFFSET ? (0x6057 + (function * 0xc0) + (eqIdx * 0x18)) : \ (IRO[302].base + ((pfId) * IRO[302].m1))
(0x2427 + (function * 0xc0) + (eqIdx * 0x18))) #define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
#define CSTORM_ISCSI_EQ_PROD_OFFSET(function, eqIdx) \ (IRO[301].base + ((pfId) * IRO[301].m1))
(IS_E1H_OFFSET ? (0x6042 + (function * 0xc0) + (eqIdx * 0x18)) : \ #define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
(0x2412 + (function * 0xc0) + (eqIdx * 0x18))) (IRO[300].base + ((pfId) * IRO[300].m1))
#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(function, eqIdx) \ #define CSTORM_PATH_ID_OFFSET (IRO[159].base)
(IS_E1H_OFFSET ? (0x6056 + (function * 0xc0) + (eqIdx * 0x18)) : \ #define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
(0x2426 + (function * 0xc0) + (eqIdx * 0x18))) (IRO[137].base + ((pfId) * IRO[137].m1))
#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(function, eqIdx) \ #define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \
(IS_E1H_OFFSET ? (0x6054 + (function * 0xc0) + (eqIdx * 0x18)) : \ (IRO[136].base + ((pfId) * IRO[136].m1))
(0x2424 + (function * 0xc0) + (eqIdx * 0x18))) #define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[136].size)
#define CSTORM_ISCSI_HQ_SIZE_OFFSET(function) \ #define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \
(IS_E1H_OFFSET ? (0x6640 + (function * 0x8)) : (0x2590 + \ (IRO[138].base + ((pfId) * IRO[138].m1))
(function * 0x8))) #define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[138].size)
#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \ #define CSTORM_STATS_FLAGS_OFFSET(pfId) \
(IS_E1H_OFFSET ? (0x6004 + (function * 0x8)) : (0x2404 + \ (IRO[143].base + ((pfId) * IRO[143].m1))
(function * 0x8))) #define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \
#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \ (IRO[129].base + ((sbId) * IRO[129].m1))
(IS_E1H_OFFSET ? (0x6002 + (function * 0x8)) : (0x2402 + \ #define CSTORM_STATUS_BLOCK_OFFSET(sbId) \
(function * 0x8))) (IRO[128].base + ((sbId) * IRO[128].m1))
#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(function) \ #define CSTORM_STATUS_BLOCK_SIZE (IRO[128].size)
(IS_E1H_OFFSET ? (0x6000 + (function * 0x8)) : (0x2400 + \ #define CSTORM_SYNC_BLOCK_OFFSET(sbId) \
(function * 0x8))) (IRO[132].base + ((sbId) * IRO[132].m1))
#define CSTORM_SB_HC_DISABLE_C_OFFSET(port, cpu_id, index) \ #define CSTORM_SYNC_BLOCK_SIZE (IRO[132].size)
(IS_E1H_OFFSET ? (0x811a + (port * 0x280) + (cpu_id * 0x28) + \ #define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \
(index * 0x4)) : (0x305a + (port * 0x280) + (cpu_id * 0x28) + \ (IRO[151].base + ((vfId) * IRO[151].m1))
(index * 0x4))) #define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \
#define CSTORM_SB_HC_DISABLE_U_OFFSET(port, cpu_id, index) \ (IRO[152].base + ((vfId) * IRO[152].m1))
(IS_E1H_OFFSET ? (0xb01a + (port * 0x800) + (cpu_id * 0x80) + \ #define CSTORM_VF_TO_PF_OFFSET(funcId) \
(index * 0x4)) : (0x401a + (port * 0x800) + (cpu_id * 0x80) + \ (IRO[147].base + ((funcId) * IRO[147].m1))
(index * 0x4))) #define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[199].base)
#define CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, cpu_id, index) \ #define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
(IS_E1H_OFFSET ? (0x8118 + (port * 0x280) + (cpu_id * 0x28) + \ (IRO[198].base + ((pfId) * IRO[198].m1))
(index * 0x4)) : (0x3058 + (port * 0x280) + (cpu_id * 0x28) + \ #define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[99].base)
(index * 0x4))) #define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
#define CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, cpu_id, index) \ (IRO[98].base + ((assertListEntry) * IRO[98].m1))
(IS_E1H_OFFSET ? (0xb018 + (port * 0x800) + (cpu_id * 0x80) + \ #define TSTORM_CLIENT_CONFIG_OFFSET(portId, clientId) \
(index * 0x4)) : (0x4018 + (port * 0x800) + (cpu_id * 0x80) + \ (IRO[197].base + ((portId) * IRO[197].m1) + ((clientId) * \
(index * 0x4))) IRO[197].m2))
#define CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, cpu_id) \ #define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[104].base)
(IS_E1H_OFFSET ? (0x8100 + (port * 0x280) + (cpu_id * 0x28)) : \
(0x3040 + (port * 0x280) + (cpu_id * 0x28)))
#define CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, cpu_id) \
(IS_E1H_OFFSET ? (0xb000 + (port * 0x800) + (cpu_id * 0x80)) : \
(0x4000 + (port * 0x800) + (cpu_id * 0x80)))
#define CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, cpu_id) \
(IS_E1H_OFFSET ? (0x8108 + (port * 0x280) + (cpu_id * 0x28)) : \
(0x3048 + (port * 0x280) + (cpu_id * 0x28)))
#define CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, cpu_id) \
(IS_E1H_OFFSET ? (0xb008 + (port * 0x800) + (cpu_id * 0x80)) : \
(0x4008 + (port * 0x800) + (cpu_id * 0x80)))
#define CSTORM_SB_STATUS_BLOCK_C_SIZE 0x10
#define CSTORM_SB_STATUS_BLOCK_U_SIZE 0x60
#define CSTORM_STATS_FLAGS_OFFSET(function) \
(IS_E1H_OFFSET ? (0x1108 + (function * 0x8)) : (0x5108 + \
(function * 0x8)))
#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(function) \
(IS_E1H_OFFSET ? (0x3200 + (function * 0x20)) : 0xffffffff)
#define TSTORM_ASSERT_LIST_INDEX_OFFSET \
(IS_E1H_OFFSET ? 0xa000 : 0x1000)
#define TSTORM_ASSERT_LIST_OFFSET(idx) \
(IS_E1H_OFFSET ? (0xa020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
#define TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) \
(IS_E1H_OFFSET ? (0x33a0 + (port * 0x1a0) + (client_id * 0x10)) \
: (0x9c0 + (port * 0x120) + (client_id * 0x10)))
#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET \
(IS_E1H_OFFSET ? 0x1ed8 : 0xffffffff)
#define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \ #define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \
(IS_E1H_OFFSET ? 0x1eda : 0xffffffff) (IRO[105].base)
#define TSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ #define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
(IS_E1H_OFFSET ? (0xb01a + ((function>>1) * 0x28) + \ (IRO[96].base + ((pfId) * IRO[96].m1))
((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \ #define TSTORM_FUNC_EN_OFFSET(funcId) \
0x28) + (index * 0x4))) (IRO[101].base + ((funcId) * IRO[101].m1))
#define TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ #define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \
(IS_E1H_OFFSET ? (0xb000 + ((function>>1) * 0x28) + \ (IRO[195].base + ((pfId) * IRO[195].m1))
((function&1) * 0xa0)) : (0x1400 + (function * 0x28))) #define TSTORM_FUNCTION_MODE_OFFSET (IRO[103].base)
#define TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ #define TSTORM_INDIRECTION_TABLE_OFFSET(pfId) \
(IS_E1H_OFFSET ? (0xb008 + ((function>>1) * 0x28) + \ (IRO[91].base + ((pfId) * IRO[91].m1))
((function&1) * 0xa0)) : (0x1408 + (function * 0x28))) #define TSTORM_INDIRECTION_TABLE_SIZE (IRO[91].size)
#define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \ #define \
(IS_E1H_OFFSET ? (0x2940 + (function * 0x8)) : (0x4928 + \ TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfId, iscsiConBufPblEntry) \
(function * 0x8))) (IRO[260].base + ((pfId) * IRO[260].m1) + ((iscsiConBufPblEntry) \
#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(function) \ * IRO[260].m2))
(IS_E1H_OFFSET ? (0x3000 + (function * 0x40)) : (0x1500 + \ #define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
(function * 0x40))) (IRO[264].base + ((pfId) * IRO[264].m1))
#define TSTORM_FUNCTION_MODE_OFFSET \ #define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
(IS_E1H_OFFSET ? 0x1ed0 : 0xffffffff) (IRO[265].base + ((pfId) * IRO[265].m1))
#define TSTORM_HC_BTR_OFFSET(port) \ #define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
(IS_E1H_OFFSET ? (0xb144 + (port * 0x30)) : (0x1454 + (port * 0x18))) (IRO[266].base + ((pfId) * IRO[266].m1))
#define TSTORM_INDIRECTION_TABLE_OFFSET(function) \ #define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
(IS_E1H_OFFSET ? (0x12c8 + (function * 0x80)) : (0x22c8 + \ (IRO[267].base + ((pfId) * IRO[267].m1))
(function * 0x80))) #define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
#define TSTORM_INDIRECTION_TABLE_SIZE 0x80 (IRO[263].base + ((pfId) * IRO[263].m1))
#define TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(function, pblEntry) \ #define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
(IS_E1H_OFFSET ? (0x60c0 + (function * 0x40) + (pblEntry * 0x8)) \ (IRO[262].base + ((pfId) * IRO[262].m1))
: (0x4c30 + (function * 0x40) + (pblEntry * 0x8))) #define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(function) \ (IRO[261].base + ((pfId) * IRO[261].m1))
(IS_E1H_OFFSET ? (0x6340 + (function * 0x8)) : (0x4cd0 + \ #define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(function * 0x8))) (IRO[259].base + ((pfId) * IRO[259].m1))
#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \ #define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
(IS_E1H_OFFSET ? (0x6004 + (function * 0x8)) : (0x4c04 + \ (IRO[269].base + ((pfId) * IRO[269].m1))
(function * 0x8))) #define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \ (IRO[256].base + ((pfId) * IRO[256].m1))
(IS_E1H_OFFSET ? (0x6002 + (function * 0x8)) : (0x4c02 + \ #define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
(function * 0x8))) (IRO[257].base + ((pfId) * IRO[257].m1))
#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(function) \ #define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
(IS_E1H_OFFSET ? (0x6000 + (function * 0x8)) : (0x4c00 + \ (IRO[258].base + ((pfId) * IRO[258].m1))
(function * 0x8))) #define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
#define TSTORM_ISCSI_RQ_SIZE_OFFSET(function) \ (IRO[196].base + ((pfId) * IRO[196].m1))
(IS_E1H_OFFSET ? (0x6080 + (function * 0x8)) : (0x4c20 + \ #define TSTORM_PER_COUNTER_ID_STATS_OFFSET(portId, tStatCntId) \
(function * 0x8))) (IRO[100].base + ((portId) * IRO[100].m1) + ((tStatCntId) * \
#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(function) \ IRO[100].m2))
(IS_E1H_OFFSET ? (0x6040 + (function * 0x8)) : (0x4c10 + \ #define TSTORM_STATS_FLAGS_OFFSET(pfId) \
(function * 0x8))) (IRO[95].base + ((pfId) * IRO[95].m1))
#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(function) \ #define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
(IS_E1H_OFFSET ? (0x6042 + (function * 0x8)) : (0x4c12 + \ (IRO[211].base + ((pfId) * IRO[211].m1))
(function * 0x8))) #define TSTORM_VF_TO_PF_OFFSET(funcId) \
#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(function) \ (IRO[102].base + ((funcId) * IRO[102].m1))
(IS_E1H_OFFSET ? (0x6044 + (function * 0x8)) : (0x4c14 + \ #define USTORM_AGG_DATA_OFFSET (IRO[201].base)
(function * 0x8))) #define USTORM_AGG_DATA_SIZE (IRO[201].size)
#define TSTORM_MAC_FILTER_CONFIG_OFFSET(function) \ #define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[170].base)
(IS_E1H_OFFSET ? (0x3008 + (function * 0x40)) : (0x1508 + \ #define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(function * 0x40))) (IRO[169].base + ((assertListEntry) * IRO[169].m1))
#define TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \ #define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
(IS_E1H_OFFSET ? (0x2010 + (port * 0x490) + (stats_counter_id * \ (IRO[178].base + ((portId) * IRO[178].m1))
0x40)) : (0x4010 + (port * 0x490) + (stats_counter_id * 0x40))) #define USTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
#define TSTORM_STATS_FLAGS_OFFSET(function) \ (IRO[172].base + ((pfId) * IRO[172].m1))
(IS_E1H_OFFSET ? (0x29c0 + (function * 0x8)) : (0x4948 + \ #define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
(function * 0x8))) (IRO[313].base + ((pfId) * IRO[313].m1))
#define TSTORM_TCP_MAX_CWND_OFFSET(function) \ #define USTORM_FUNC_EN_OFFSET(funcId) \
(IS_E1H_OFFSET ? (0x4004 + (function * 0x8)) : (0x1fb4 + \ (IRO[174].base + ((funcId) * IRO[174].m1))
(function * 0x8))) #define USTORM_FUNCTION_MODE_OFFSET (IRO[177].base)
#define USTORM_AGG_DATA_OFFSET (IS_E1H_OFFSET ? 0xa000 : 0x3000) #define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
#define USTORM_AGG_DATA_SIZE (IS_E1H_OFFSET ? 0x2000 : 0x1000) (IRO[277].base + ((pfId) * IRO[277].m1))
#define USTORM_ASSERT_LIST_INDEX_OFFSET \ #define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IS_E1H_OFFSET ? 0x8000 : 0x1000) (IRO[278].base + ((pfId) * IRO[278].m1))
#define USTORM_ASSERT_LIST_OFFSET(idx) \ #define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
(IS_E1H_OFFSET ? (0x8020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) (IRO[282].base + ((pfId) * IRO[282].m1))
#define USTORM_CQE_PAGE_BASE_OFFSET(port, clientId) \ #define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
(IS_E1H_OFFSET ? (0x1010 + (port * 0x680) + (clientId * 0x40)) : \ (IRO[279].base + ((pfId) * IRO[279].m1))
(0x4010 + (port * 0x360) + (clientId * 0x30))) #define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
#define USTORM_CQE_PAGE_NEXT_OFFSET(port, clientId) \ (IRO[275].base + ((pfId) * IRO[275].m1))
(IS_E1H_OFFSET ? (0x1028 + (port * 0x680) + (clientId * 0x40)) : \ #define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
(0x4028 + (port * 0x360) + (clientId * 0x30))) (IRO[274].base + ((pfId) * IRO[274].m1))
#define USTORM_ETH_PAUSE_ENABLED_OFFSET(port) \ #define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
(IS_E1H_OFFSET ? (0x2ad4 + (port * 0x8)) : 0xffffffff) (IRO[273].base + ((pfId) * IRO[273].m1))
#define USTORM_ETH_RING_PAUSE_DATA_OFFSET(port, clientId) \ #define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
(IS_E1H_OFFSET ? (0x1030 + (port * 0x680) + (clientId * 0x40)) : \ (IRO[276].base + ((pfId) * IRO[276].m1))
0xffffffff) #define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
#define USTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \ (IRO[280].base + ((pfId) * IRO[280].m1))
(IS_E1H_OFFSET ? (0x2a50 + (function * 0x8)) : (0x1dd0 + \ #define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(function * 0x8))) (IRO[281].base + ((pfId) * IRO[281].m1))
#define USTORM_FUNCTION_MODE_OFFSET \ #define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
(IS_E1H_OFFSET ? 0x2448 : 0xffffffff) (IRO[176].base + ((pfId) * IRO[176].m1))
#define USTORM_ISCSI_CQ_SIZE_OFFSET(function) \ #define USTORM_PER_COUNTER_ID_STATS_OFFSET(portId, uStatCntId) \
(IS_E1H_OFFSET ? (0x7044 + (function * 0x8)) : (0x2414 + \ (IRO[173].base + ((portId) * IRO[173].m1) + ((uStatCntId) * \
(function * 0x8))) IRO[173].m2))
#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(function) \ #define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
(IS_E1H_OFFSET ? (0x7046 + (function * 0x8)) : (0x2416 + \ (IRO[204].base + ((portId) * IRO[204].m1) + ((clientId) * \
(function * 0x8))) IRO[204].m2))
#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(function) \ #define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \
(IS_E1H_OFFSET ? (0x7688 + (function * 0x8)) : (0x29c8 + \ (IRO[205].base + ((qzoneId) * IRO[205].m1))
(function * 0x8))) #define USTORM_STATS_FLAGS_OFFSET(pfId) \
#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(function) \ (IRO[171].base + ((pfId) * IRO[171].m1))
(IS_E1H_OFFSET ? (0x7648 + (function * 0x8)) : (0x29b8 + \ #define USTORM_TPA_BTR_OFFSET (IRO[202].base)
(function * 0x8))) #define USTORM_TPA_BTR_SIZE (IRO[202].size)
#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \ #define USTORM_VF_TO_PF_OFFSET(funcId) \
(IS_E1H_OFFSET ? (0x7004 + (function * 0x8)) : (0x2404 + \ (IRO[175].base + ((funcId) * IRO[175].m1))
(function * 0x8))) #define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[59].base)
#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \ #define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[58].base)
(IS_E1H_OFFSET ? (0x7002 + (function * 0x8)) : (0x2402 + \ #define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[54].base)
(function * 0x8))) #define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
#define USTORM_ISCSI_PAGE_SIZE_OFFSET(function) \ (IRO[53].base + ((assertListEntry) * IRO[53].m1))
(IS_E1H_OFFSET ? (0x7000 + (function * 0x8)) : (0x2400 + \ #define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \
(function * 0x8))) (IRO[47].base + ((portId) * IRO[47].m1))
#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(function) \ #define XSTORM_E1HOV_OFFSET(pfId) \
(IS_E1H_OFFSET ? (0x7040 + (function * 0x8)) : (0x2410 + \ (IRO[55].base + ((pfId) * IRO[55].m1))
(function * 0x8))) #define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(function) \ (IRO[45].base + ((pfId) * IRO[45].m1))
(IS_E1H_OFFSET ? (0x7080 + (function * 0x8)) : (0x2420 + \ #define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \
(function * 0x8))) (IRO[49].base + ((pfId) * IRO[49].m1))
#define USTORM_ISCSI_RQ_SIZE_OFFSET(function) \ #define XSTORM_FUNC_EN_OFFSET(funcId) \
(IS_E1H_OFFSET ? (0x7084 + (function * 0x8)) : (0x2424 + \ (IRO[51].base + ((funcId) * IRO[51].m1))
(function * 0x8))) #define XSTORM_FUNCTION_MODE_OFFSET (IRO[56].base)
#define USTORM_MAX_AGG_SIZE_OFFSET(port, clientId) \ #define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
(IS_E1H_OFFSET ? (0x1018 + (port * 0x680) + (clientId * 0x40)) : \ (IRO[290].base + ((pfId) * IRO[290].m1))
(0x4018 + (port * 0x360) + (clientId * 0x30))) #define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(function) \ (IRO[293].base + ((pfId) * IRO[293].m1))
(IS_E1H_OFFSET ? (0x2408 + (function * 0x8)) : (0x1da8 + \ #define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
(function * 0x8))) (IRO[294].base + ((pfId) * IRO[294].m1))
#define USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \ #define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
(IS_E1H_OFFSET ? (0x2450 + (port * 0x2d0) + (stats_counter_id * \ (IRO[295].base + ((pfId) * IRO[295].m1))
0x28)) : (0x1500 + (port * 0x2d0) + (stats_counter_id * 0x28))) #define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
#define USTORM_RX_PRODS_OFFSET(port, client_id) \ (IRO[296].base + ((pfId) * IRO[296].m1))
(IS_E1H_OFFSET ? (0x1000 + (port * 0x680) + (client_id * 0x40)) \ #define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
: (0x4000 + (port * 0x360) + (client_id * 0x30))) (IRO[297].base + ((pfId) * IRO[297].m1))
#define USTORM_STATS_FLAGS_OFFSET(function) \ #define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
(IS_E1H_OFFSET ? (0x29f0 + (function * 0x8)) : (0x1db8 + \ (IRO[298].base + ((pfId) * IRO[298].m1))
(function * 0x8))) #define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
#define USTORM_TPA_BTR_OFFSET (IS_E1H_OFFSET ? 0x3da5 : 0x5095) (IRO[299].base + ((pfId) * IRO[299].m1))
#define USTORM_TPA_BTR_SIZE 0x1 #define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
#define XSTORM_ASSERT_LIST_INDEX_OFFSET \ (IRO[289].base + ((pfId) * IRO[289].m1))
(IS_E1H_OFFSET ? 0x9000 : 0x1000) #define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
#define XSTORM_ASSERT_LIST_OFFSET(idx) \ (IRO[288].base + ((pfId) * IRO[288].m1))
(IS_E1H_OFFSET ? (0x9020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) #define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) \ (IRO[287].base + ((pfId) * IRO[287].m1))
(IS_E1H_OFFSET ? (0x24a8 + (port * 0x50)) : (0x3a80 + (port * 0x50))) #define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
#define XSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ (IRO[292].base + ((pfId) * IRO[292].m1))
(IS_E1H_OFFSET ? (0xa01a + ((function>>1) * 0x28) + \ #define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \ (IRO[291].base + ((pfId) * IRO[291].m1))
0x28) + (index * 0x4))) #define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
#define XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ (IRO[286].base + ((pfId) * IRO[286].m1))
(IS_E1H_OFFSET ? (0xa000 + ((function>>1) * 0x28) + \ #define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
((function&1) * 0xa0)) : (0x1400 + (function * 0x28))) (IRO[285].base + ((pfId) * IRO[285].m1))
#define XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ #define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
(IS_E1H_OFFSET ? (0xa008 + ((function>>1) * 0x28) + \ (IRO[284].base + ((pfId) * IRO[284].m1))
((function&1) * 0xa0)) : (0x1408 + (function * 0x28))) #define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
#define XSTORM_E1HOV_OFFSET(function) \ (IRO[283].base + ((pfId) * IRO[283].m1))
(IS_E1H_OFFSET ? (0x2c10 + (function * 0x8)) : 0xffffffff) #define XSTORM_PATH_ID_OFFSET (IRO[65].base)
#define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \ #define XSTORM_PER_COUNTER_ID_STATS_OFFSET(portId, xStatCntId) \
(IS_E1H_OFFSET ? (0x2418 + (function * 0x8)) : (0x3a50 + \ (IRO[50].base + ((portId) * IRO[50].m1) + ((xStatCntId) * \
(function * 0x8))) IRO[50].m2))
#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(function) \ #define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
(IS_E1H_OFFSET ? (0x2588 + (function * 0x90)) : (0x3b60 + \ (IRO[48].base + ((pfId) * IRO[48].m1))
(function * 0x90))) #define XSTORM_SPQ_DATA_OFFSET(funcId) \
#define XSTORM_FUNCTION_MODE_OFFSET \ (IRO[32].base + ((funcId) * IRO[32].m1))
(IS_E1H_OFFSET ? 0x2c50 : 0xffffffff) #define XSTORM_SPQ_DATA_SIZE (IRO[32].size)
#define XSTORM_HC_BTR_OFFSET(port) \ #define XSTORM_SPQ_PAGE_BASE_OFFSET(funcId) \
(IS_E1H_OFFSET ? (0xa144 + (port * 0x30)) : (0x1454 + (port * 0x18))) (IRO[30].base + ((funcId) * IRO[30].m1))
#define XSTORM_ISCSI_HQ_SIZE_OFFSET(function) \ #define XSTORM_SPQ_PROD_OFFSET(funcId) \
(IS_E1H_OFFSET ? (0x80c0 + (function * 0x8)) : (0x1c30 + \ (IRO[31].base + ((funcId) * IRO[31].m1))
(function * 0x8))) #define XSTORM_STATS_FLAGS_OFFSET(pfId) \
#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(function) \ (IRO[43].base + ((pfId) * IRO[43].m1))
(IS_E1H_OFFSET ? (0x8080 + (function * 0x8)) : (0x1c20 + \ #define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \
(function * 0x8))) (IRO[206].base + ((portId) * IRO[206].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(function) \ #define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
(IS_E1H_OFFSET ? (0x8081 + (function * 0x8)) : (0x1c21 + \ (IRO[207].base + ((portId) * IRO[207].m1))
(function * 0x8))) #define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \
#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(function) \ (IRO[209].base + (((pfId)>>1) * IRO[209].m1) + (((pfId)&1) * \
(IS_E1H_OFFSET ? (0x8082 + (function * 0x8)) : (0x1c22 + \ IRO[209].m2))
(function * 0x8))) #define XSTORM_VF_TO_PF_OFFSET(funcId) \
#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(function) \ (IRO[52].base + ((funcId) * IRO[52].m1))
(IS_E1H_OFFSET ? (0x8083 + (function * 0x8)) : (0x1c23 + \
(function * 0x8)))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(function) \
(IS_E1H_OFFSET ? (0x8084 + (function * 0x8)) : (0x1c24 + \
(function * 0x8)))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(function) \
(IS_E1H_OFFSET ? (0x8085 + (function * 0x8)) : (0x1c25 + \
(function * 0x8)))
#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(function) \
(IS_E1H_OFFSET ? (0x8086 + (function * 0x8)) : (0x1c26 + \
(function * 0x8)))
#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \
(IS_E1H_OFFSET ? (0x8004 + (function * 0x8)) : (0x1c04 + \
(function * 0x8)))
#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \
(IS_E1H_OFFSET ? (0x8002 + (function * 0x8)) : (0x1c02 + \
(function * 0x8)))
#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(function) \
(IS_E1H_OFFSET ? (0x8000 + (function * 0x8)) : (0x1c00 + \
(function * 0x8)))
#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(function) \
(IS_E1H_OFFSET ? (0x80c4 + (function * 0x8)) : (0x1c34 + \
(function * 0x8)))
#define XSTORM_ISCSI_SQ_SIZE_OFFSET(function) \
(IS_E1H_OFFSET ? (0x80c2 + (function * 0x8)) : (0x1c32 + \
(function * 0x8)))
#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(function) \
(IS_E1H_OFFSET ? (0x8043 + (function * 0x8)) : (0x1c13 + \
(function * 0x8)))
#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(function) \
(IS_E1H_OFFSET ? (0x8042 + (function * 0x8)) : (0x1c12 + \
(function * 0x8)))
#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(function) \
(IS_E1H_OFFSET ? (0x8041 + (function * 0x8)) : (0x1c11 + \
(function * 0x8)))
#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(function) \
(IS_E1H_OFFSET ? (0x8040 + (function * 0x8)) : (0x1c10 + \
(function * 0x8)))
#define XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
(IS_E1H_OFFSET ? (0xc000 + (port * 0x360) + (stats_counter_id * \
0x30)) : (0x3378 + (port * 0x360) + (stats_counter_id * 0x30)))
#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(function) \
(IS_E1H_OFFSET ? (0x2548 + (function * 0x90)) : (0x3b20 + \
(function * 0x90)))
#define XSTORM_SPQ_PAGE_BASE_OFFSET(function) \
(IS_E1H_OFFSET ? (0x2000 + (function * 0x10)) : (0x3328 + \
(function * 0x10)))
#define XSTORM_SPQ_PROD_OFFSET(function) \
(IS_E1H_OFFSET ? (0x2008 + (function * 0x10)) : (0x3330 + \
(function * 0x10)))
#define XSTORM_STATS_FLAGS_OFFSET(function) \
(IS_E1H_OFFSET ? (0x23d8 + (function * 0x8)) : (0x3a40 + \
(function * 0x8)))
#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port) \
(IS_E1H_OFFSET ? (0x4000 + (port * 0x8)) : (0x1960 + (port * 0x8)))
#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port) \
(IS_E1H_OFFSET ? (0x4001 + (port * 0x8)) : (0x1961 + (port * 0x8)))
#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(function) \
(IS_E1H_OFFSET ? (0x4060 + ((function>>1) * 0x8) + ((function&1) \
* 0x4)) : (0x1978 + (function * 0x4)))
#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 #define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
/**
* This file defines HSI constants for the ETH flow
*/
#ifdef _EVEREST_MICROCODE
#include "microcode_constants.h"
#include "eth_rx_bd.h"
#include "eth_tx_bd.h"
#include "eth_rx_cqe.h"
#include "eth_rx_sge.h"
#include "eth_rx_cqe_next_page.h"
#endif
/* RSS hash types */ /* RSS hash types */
#define DEFAULT_HASH_TYPE 0 #define DEFAULT_HASH_TYPE 0
#define IPV4_HASH_TYPE 1 #define IPV4_HASH_TYPE 1
@ -389,11 +292,17 @@
#define U_ETH_NUM_OF_SGES_TO_FETCH 8 #define U_ETH_NUM_OF_SGES_TO_FETCH 8
#define U_ETH_MAX_SGES_FOR_PACKET 3 #define U_ETH_MAX_SGES_FOR_PACKET 3
/*Tx params*/
#define X_ETH_NO_VLAN 0
#define X_ETH_OUTBAND_VLAN 1
#define X_ETH_INBAND_VLAN 2
/* Rx ring params */ /* Rx ring params */
#define U_ETH_LOCAL_BD_RING_SIZE 8 #define U_ETH_LOCAL_BD_RING_SIZE 8
#define U_ETH_LOCAL_SGE_RING_SIZE 10 #define U_ETH_LOCAL_SGE_RING_SIZE 10
#define U_ETH_SGL_SIZE 8 #define U_ETH_SGL_SIZE 8
/* The fw will padd the buffer with this value, so the IP header \
will be align to 4 Byte */
#define IP_HEADER_ALIGNMENT_PADDING 2
#define U_ETH_SGES_PER_PAGE_INVERSE_MASK \ #define U_ETH_SGES_PER_PAGE_INVERSE_MASK \
(0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1)) (0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1))
@ -409,16 +318,15 @@
#define U_ETH_UNDEFINED_Q 0xFF #define U_ETH_UNDEFINED_Q 0xFF
/* values of command IDs in the ramrod message */ /* values of command IDs in the ramrod message */
#define RAMROD_CMD_ID_ETH_PORT_SETUP 80 #define RAMROD_CMD_ID_ETH_UNUSED 0
#define RAMROD_CMD_ID_ETH_CLIENT_SETUP 85 #define RAMROD_CMD_ID_ETH_CLIENT_SETUP 1
#define RAMROD_CMD_ID_ETH_STAT_QUERY 90 #define RAMROD_CMD_ID_ETH_UPDATE 2
#define RAMROD_CMD_ID_ETH_UPDATE 100 #define RAMROD_CMD_ID_ETH_HALT 3
#define RAMROD_CMD_ID_ETH_HALT 105 #define RAMROD_CMD_ID_ETH_FORWARD_SETUP 4
#define RAMROD_CMD_ID_ETH_SET_MAC 110 #define RAMROD_CMD_ID_ETH_ACTIVATE 5
#define RAMROD_CMD_ID_ETH_CFC_DEL 115 #define RAMROD_CMD_ID_ETH_DEACTIVATE 6
#define RAMROD_CMD_ID_ETH_PORT_DEL 120 #define RAMROD_CMD_ID_ETH_EMPTY 7
#define RAMROD_CMD_ID_ETH_FORWARD_SETUP 125 #define RAMROD_CMD_ID_ETH_TERMINATE 8
/* command values for set mac command */ /* command values for set mac command */
#define T_ETH_MAC_COMMAND_SET 0 #define T_ETH_MAC_COMMAND_SET 0
@ -431,7 +339,9 @@
/* Maximal L2 clients supported */ /* Maximal L2 clients supported */
#define ETH_MAX_RX_CLIENTS_E1 18 #define ETH_MAX_RX_CLIENTS_E1 18
#define ETH_MAX_RX_CLIENTS_E1H 26 #define ETH_MAX_RX_CLIENTS_E1H 28
#define MAX_STAT_COUNTER_ID ETH_MAX_RX_CLIENTS_E1H
/* Maximal aggregation queues supported */ /* Maximal aggregation queues supported */
#define ETH_MAX_AGGREGATION_QUEUES_E1 32 #define ETH_MAX_AGGREGATION_QUEUES_E1 32
@ -443,6 +353,20 @@
#define ETH_RSS_MODE_VLAN_PRI 2 #define ETH_RSS_MODE_VLAN_PRI 2
#define ETH_RSS_MODE_E1HOV_PRI 3 #define ETH_RSS_MODE_E1HOV_PRI 3
#define ETH_RSS_MODE_IP_DSCP 4 #define ETH_RSS_MODE_IP_DSCP 4
#define ETH_RSS_MODE_E2_INTEG 5
/* ETH vlan filtering modes */
#define ETH_VLAN_FILTER_ANY_VLAN 0 /* Don't filter by vlan */
#define ETH_VLAN_FILTER_SPECIFIC_VLAN \
1 /* Only the vlan_id is allowed */
#define ETH_VLAN_FILTER_CLASSIFY \
2 /* vlan will be added to CAM for classification */
/* Fast path CQE selection */
#define ETH_FP_CQE_REGULAR 0
#define ETH_FP_CQE_SGL 1
#define ETH_FP_CQE_RAW 2
/** /**
@ -458,6 +382,7 @@
#define RESERVED_CONNECTION_TYPE_0 5 #define RESERVED_CONNECTION_TYPE_0 5
#define RESERVED_CONNECTION_TYPE_1 6 #define RESERVED_CONNECTION_TYPE_1 6
#define RESERVED_CONNECTION_TYPE_2 7 #define RESERVED_CONNECTION_TYPE_2 7
#define NONE_CONNECTION_TYPE 8
#define PROTOCOL_STATE_BIT_OFFSET 6 #define PROTOCOL_STATE_BIT_OFFSET 6
@ -466,6 +391,16 @@
#define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET) #define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
#define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET) #define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
/* values of command IDs in the ramrod message */
#define RAMROD_CMD_ID_COMMON_FUNCTION_START 1
#define RAMROD_CMD_ID_COMMON_FUNCTION_STOP 2
#define RAMROD_CMD_ID_COMMON_CFC_DEL 3
#define RAMROD_CMD_ID_COMMON_CFC_DEL_WB 4
#define RAMROD_CMD_ID_COMMON_SET_MAC 5
#define RAMROD_CMD_ID_COMMON_STAT_QUERY 6
#define RAMROD_CMD_ID_COMMON_STOP_TRAFFIC 7
#define RAMROD_CMD_ID_COMMON_START_TRAFFIC 8
/* microcode fixed page page size 4K (chains and ring segments) */ /* microcode fixed page page size 4K (chains and ring segments) */
#define MC_PAGE_SIZE 4096 #define MC_PAGE_SIZE 4096
@ -473,46 +408,26 @@
/* Host coalescing constants */ /* Host coalescing constants */
#define HC_IGU_BC_MODE 0 #define HC_IGU_BC_MODE 0
#define HC_IGU_NBC_MODE 1 #define HC_IGU_NBC_MODE 1
/* Host coalescing constants. E1 includes E1H as well */
/* Number of indices per slow-path SB */
#define HC_SP_SB_MAX_INDICES 16
/* Number of indices per SB */
#define HC_SB_MAX_INDICES_E1X 8
#define HC_SB_MAX_INDICES_E2 8
#define HC_SB_MAX_SB_E1X 32
#define HC_SB_MAX_SB_E2 136
#define HC_SP_SB_ID 0xde
#define HC_REGULAR_SEGMENT 0 #define HC_REGULAR_SEGMENT 0
#define HC_DEFAULT_SEGMENT 1 #define HC_DEFAULT_SEGMENT 1
#define HC_SB_MAX_SM 2
/* index numbers */ #define HC_SB_MAX_DYNAMIC_INDICES 4
#define HC_USTORM_DEF_SB_NUM_INDICES 8 #define HC_FUNCTION_DISABLED 0xff
#define HC_CSTORM_DEF_SB_NUM_INDICES 8
#define HC_XSTORM_DEF_SB_NUM_INDICES 4
#define HC_TSTORM_DEF_SB_NUM_INDICES 4
#define HC_USTORM_SB_NUM_INDICES 4
#define HC_CSTORM_SB_NUM_INDICES 4
/* index values - which counter to update */
#define HC_INDEX_U_TOE_RX_CQ_CONS 0
#define HC_INDEX_U_ETH_RX_CQ_CONS 1
#define HC_INDEX_U_ETH_RX_BD_CONS 2
#define HC_INDEX_U_FCOE_EQ_CONS 3
#define HC_INDEX_C_TOE_TX_CQ_CONS 0
#define HC_INDEX_C_ETH_TX_CQ_CONS 1
#define HC_INDEX_C_ISCSI_EQ_CONS 2
#define HC_INDEX_DEF_X_SPQ_CONS 0
#define HC_INDEX_DEF_C_RDMA_EQ_CONS 0
#define HC_INDEX_DEF_C_RDMA_NAL_PROD 1
#define HC_INDEX_DEF_C_ETH_FW_TX_CQ_CONS 2
#define HC_INDEX_DEF_C_ETH_SLOW_PATH 3
#define HC_INDEX_DEF_C_ETH_RDMA_CQ_CONS 4
#define HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS 5
#define HC_INDEX_DEF_C_ETH_FCOE_CQ_CONS 6
#define HC_INDEX_DEF_U_ETH_RDMA_RX_CQ_CONS 0
#define HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS 1
#define HC_INDEX_DEF_U_ETH_RDMA_RX_BD_CONS 2
#define HC_INDEX_DEF_U_ETH_ISCSI_RX_BD_CONS 3
#define HC_INDEX_DEF_U_ETH_FCOE_RX_CQ_CONS 4
#define HC_INDEX_DEF_U_ETH_FCOE_RX_BD_CONS 5
/* used by the driver to get the SB offset */ /* used by the driver to get the SB offset */
#define USTORM_ID 0 #define USTORM_ID 0
#define CSTORM_ID 1 #define CSTORM_ID 1
@ -529,45 +444,17 @@
/**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/ /**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
#define EMULATION_FREQUENCY_FACTOR 1600
#define FPGA_FREQUENCY_FACTOR 100
#define TIMERS_TICK_SIZE_CHIP (1e-3) #define TIMERS_TICK_SIZE_CHIP (1e-3)
#define TIMERS_TICK_SIZE_EMUL \
((TIMERS_TICK_SIZE_CHIP)/((EMULATION_FREQUENCY_FACTOR)))
#define TIMERS_TICK_SIZE_FPGA \
((TIMERS_TICK_SIZE_CHIP)/((FPGA_FREQUENCY_FACTOR)))
#define TSEMI_CLK1_RESUL_CHIP (1e-3) #define TSEMI_CLK1_RESUL_CHIP (1e-3)
#define TSEMI_CLK1_RESUL_EMUL \
((TSEMI_CLK1_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
#define TSEMI_CLK1_RESUL_FPGA \
((TSEMI_CLK1_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
#define USEMI_CLK1_RESUL_CHIP (TIMERS_TICK_SIZE_CHIP)
#define USEMI_CLK1_RESUL_EMUL (TIMERS_TICK_SIZE_EMUL)
#define USEMI_CLK1_RESUL_FPGA (TIMERS_TICK_SIZE_FPGA)
#define XSEMI_CLK1_RESUL_CHIP (1e-3) #define XSEMI_CLK1_RESUL_CHIP (1e-3)
#define XSEMI_CLK1_RESUL_EMUL \
((XSEMI_CLK1_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
#define XSEMI_CLK1_RESUL_FPGA \
((XSEMI_CLK1_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
#define XSEMI_CLK2_RESUL_CHIP (1e-6)
#define XSEMI_CLK2_RESUL_EMUL \
((XSEMI_CLK2_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
#define XSEMI_CLK2_RESUL_FPGA \
((XSEMI_CLK2_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
#define SDM_TIMER_TICK_RESUL_CHIP (4*(1e-6)) #define SDM_TIMER_TICK_RESUL_CHIP (4*(1e-6))
#define SDM_TIMER_TICK_RESUL_EMUL \
((SDM_TIMER_TICK_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
#define SDM_TIMER_TICK_RESUL_FPGA \
((SDM_TIMER_TICK_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
/**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/ /**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
#define XSTORM_IP_ID_ROLL_HALF 0x8000 #define XSTORM_IP_ID_ROLL_HALF 0x8000
#define XSTORM_IP_ID_ROLL_ALL 0 #define XSTORM_IP_ID_ROLL_ALL 0
@ -576,10 +463,36 @@
#define NUM_OF_PROTOCOLS 4 #define NUM_OF_PROTOCOLS 4
#define NUM_OF_SAFC_BITS 16 #define NUM_OF_SAFC_BITS 16
#define MAX_COS_NUMBER 4 #define MAX_COS_NUMBER 4
#define MAX_T_STAT_COUNTER_ID 18
#define MAX_X_STAT_COUNTER_ID 18
#define MAX_U_STAT_COUNTER_ID 18
#define FAIRNESS_COS_WRR_MODE 0
#define FAIRNESS_COS_ETS_MODE 1
/* Priority Flow Control (PFC) */
#define MAX_PFC_PRIORITIES 8
#define MAX_PFC_TRAFFIC_TYPES 8
/* Available Traffic Types for Link Layer Flow Control */
#define LLFC_TRAFFIC_TYPE_NW 0
#define LLFC_TRAFFIC_TYPE_FCOE 1
#define LLFC_TRAFFIC_TYPE_ISCSI 2
/***************** START OF E2 INTEGRATION \
CODE***************************************/
#define LLFC_TRAFFIC_TYPE_NW_COS1_E2INTEG 3
/***************** END OF E2 INTEGRATION \
CODE***************************************/
#define LLFC_TRAFFIC_TYPE_MAX 4
/* used by array traffic_type_to_priority[] to mark traffic type \
that is not mapped to priority*/
#define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
#define LLFC_MODE_NONE 0
#define LLFC_MODE_PFC 1
#define LLFC_MODE_SAFC 2
#define DCB_DISABLED 0
#define DCB_ENABLED 1
#define UNKNOWN_ADDRESS 0 #define UNKNOWN_ADDRESS 0
#define UNICAST_ADDRESS 1 #define UNICAST_ADDRESS 1
@ -587,8 +500,32 @@
#define BROADCAST_ADDRESS 3 #define BROADCAST_ADDRESS 3
#define SINGLE_FUNCTION 0 #define SINGLE_FUNCTION 0
#define MULTI_FUNCTION 1 #define MULTI_FUNCTION_SD 1
#define MULTI_FUNCTION_SI 2
#define IP_V4 0 #define IP_V4 0
#define IP_V6 1 #define IP_V6 1
#define C_ERES_PER_PAGE \
(PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem)))
#define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1)
#define EVENT_RING_OPCODE_VF_PF_CHANNEL 0
#define EVENT_RING_OPCODE_FUNCTION_START 1
#define EVENT_RING_OPCODE_FUNCTION_STOP 2
#define EVENT_RING_OPCODE_CFC_DEL 3
#define EVENT_RING_OPCODE_CFC_DEL_WB 4
#define EVENT_RING_OPCODE_SET_MAC 5
#define EVENT_RING_OPCODE_STAT_QUERY 6
#define EVENT_RING_OPCODE_STOP_TRAFFIC 7
#define EVENT_RING_OPCODE_START_TRAFFIC 8
#define EVENT_RING_OPCODE_FORWARD_SETUP 9
#define VF_PF_CHANNEL_STATE_READY 0
#define VF_PF_CHANNEL_STATE_WAITING_FOR_ACK 1
#define VF_PF_CHANNEL_STATE_MAX_NUMBER 2
#endif /* BNX2X_FW_DEFS_H */

Просмотреть файл

@ -31,6 +31,7 @@ struct bnx2x_fw_file_hdr {
struct bnx2x_fw_file_section csem_pram_data; struct bnx2x_fw_file_section csem_pram_data;
struct bnx2x_fw_file_section xsem_int_table_data; struct bnx2x_fw_file_section xsem_int_table_data;
struct bnx2x_fw_file_section xsem_pram_data; struct bnx2x_fw_file_section xsem_pram_data;
struct bnx2x_fw_file_section iro_arr;
struct bnx2x_fw_file_section fw_version; struct bnx2x_fw_file_section fw_version;
}; };

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -148,5 +148,46 @@ union init_op {
struct raw_op raw; struct raw_op raw;
}; };
#define INITOP_SET 0 /* set the HW directly */
#define INITOP_CLEAR 1 /* clear the HW directly */
#define INITOP_INIT 2 /* set the init-value array */
/****************************************************************************
* ILT management
****************************************************************************/
struct ilt_line {
dma_addr_t page_mapping;
void *page;
u32 size;
};
struct ilt_client_info {
u32 page_size;
u16 start;
u16 end;
u16 client_num;
u16 flags;
#define ILT_CLIENT_SKIP_INIT 0x1
#define ILT_CLIENT_SKIP_MEM 0x2
};
struct bnx2x_ilt {
u32 start_line;
struct ilt_line *lines;
struct ilt_client_info clients[4];
#define ILT_CLIENT_CDU 0
#define ILT_CLIENT_QM 1
#define ILT_CLIENT_SRC 2
#define ILT_CLIENT_TM 3
};
/****************************************************************************
* SRC configuration
****************************************************************************/
struct src_ent {
u8 opaque[56];
u64 next;
};
#endif /* BNX2X_INIT_H */ #endif /* BNX2X_INIT_H */

Просмотреть файл

@ -151,6 +151,15 @@ static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, const u32 *data,
bnx2x_init_ind_wr(bp, addr, data, len); bnx2x_init_ind_wr(bp, addr, data, len);
} }
static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo, u32 val_hi)
{
u32 wb_write[2];
wb_write[0] = val_lo;
wb_write[1] = val_hi;
REG_WR_DMAE_LEN(bp, reg, wb_write, 2);
}
static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, u32 blob_off) static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, u32 blob_off)
{ {
const u8 *data = NULL; const u8 *data = NULL;
@ -503,4 +512,333 @@ static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order)
} }
} }
/****************************************************************************
* ILT management
****************************************************************************/
/*
* This codes hides the low level HW interaction for ILT management and
* configuration. The API consists of a shadow ILT table which is set by the
* driver and a set of routines to use it to configure the HW.
*
*/
/* ILT HW init operations */
/* ILT memory management operations */
#define ILT_MEMOP_ALLOC 0
#define ILT_MEMOP_FREE 1
/* the phys address is shifted right 12 bits and has an added
* 1=valid bit added to the 53rd bit
* then since this is a wide register(TM)
* we split it into two 32 bit writes
*/
#define ILT_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
#define ILT_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
#define ILT_RANGE(f, l) (((l) << 10) | f)
static int bnx2x_ilt_line_mem_op(struct bnx2x *bp, struct ilt_line *line,
u32 size, u8 memop)
{
if (memop == ILT_MEMOP_FREE) {
BNX2X_ILT_FREE(line->page, line->page_mapping, line->size);
return 0;
}
BNX2X_ILT_ZALLOC(line->page, &line->page_mapping, size);
if (!line->page)
return -1;
line->size = size;
return 0;
}
static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, u8 memop)
{
int i, rc;
struct bnx2x_ilt *ilt = BP_ILT(bp);
struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
if (!ilt || !ilt->lines)
return -1;
if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM))
return 0;
for (rc = 0, i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) {
rc = bnx2x_ilt_line_mem_op(bp, &ilt->lines[i],
ilt_cli->page_size, memop);
}
return rc;
}
int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
{
int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
if (!rc)
rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop);
if (!rc)
rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
if (!rc)
rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop);
return rc;
}
static void bnx2x_ilt_line_wr(struct bnx2x *bp, int abs_idx,
dma_addr_t page_mapping)
{
u32 reg;
if (CHIP_IS_E1(bp))
reg = PXP2_REG_RQ_ONCHIP_AT + abs_idx*8;
else
reg = PXP2_REG_RQ_ONCHIP_AT_B0 + abs_idx*8;
bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping));
}
static void bnx2x_ilt_line_init_op(struct bnx2x *bp, struct bnx2x_ilt *ilt,
int idx, u8 initop)
{
dma_addr_t null_mapping;
int abs_idx = ilt->start_line + idx;
switch (initop) {
case INITOP_INIT:
/* set in the init-value array */
case INITOP_SET:
bnx2x_ilt_line_wr(bp, abs_idx, ilt->lines[idx].page_mapping);
break;
case INITOP_CLEAR:
null_mapping = 0;
bnx2x_ilt_line_wr(bp, abs_idx, null_mapping);
break;
}
}
void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
struct ilt_client_info *ilt_cli,
u32 ilt_start, u8 initop)
{
u32 start_reg = 0;
u32 end_reg = 0;
/* The boundary is either SET or INIT,
CLEAR => SET and for now SET ~~ INIT */
/* find the appropriate regs */
if (CHIP_IS_E1(bp)) {
switch (ilt_cli->client_num) {
case ILT_CLIENT_CDU:
start_reg = PXP2_REG_PSWRQ_CDU0_L2P;
break;
case ILT_CLIENT_QM:
start_reg = PXP2_REG_PSWRQ_QM0_L2P;
break;
case ILT_CLIENT_SRC:
start_reg = PXP2_REG_PSWRQ_SRC0_L2P;
break;
case ILT_CLIENT_TM:
start_reg = PXP2_REG_PSWRQ_TM0_L2P;
break;
}
REG_WR(bp, start_reg + BP_FUNC(bp)*4,
ILT_RANGE((ilt_start + ilt_cli->start),
(ilt_start + ilt_cli->end)));
} else {
switch (ilt_cli->client_num) {
case ILT_CLIENT_CDU:
start_reg = PXP2_REG_RQ_CDU_FIRST_ILT;
end_reg = PXP2_REG_RQ_CDU_LAST_ILT;
break;
case ILT_CLIENT_QM:
start_reg = PXP2_REG_RQ_QM_FIRST_ILT;
end_reg = PXP2_REG_RQ_QM_LAST_ILT;
break;
case ILT_CLIENT_SRC:
start_reg = PXP2_REG_RQ_SRC_FIRST_ILT;
end_reg = PXP2_REG_RQ_SRC_LAST_ILT;
break;
case ILT_CLIENT_TM:
start_reg = PXP2_REG_RQ_TM_FIRST_ILT;
end_reg = PXP2_REG_RQ_TM_LAST_ILT;
break;
}
REG_WR(bp, start_reg, (ilt_start + ilt_cli->start));
REG_WR(bp, end_reg, (ilt_start + ilt_cli->end));
}
}
void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp, struct bnx2x_ilt *ilt,
struct ilt_client_info *ilt_cli, u8 initop)
{
int i;
if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
return;
for (i = ilt_cli->start; i <= ilt_cli->end; i++)
bnx2x_ilt_line_init_op(bp, ilt, i, initop);
/* init/clear the ILT boundries */
bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop);
}
void bnx2x_ilt_client_init_op(struct bnx2x *bp,
struct ilt_client_info *ilt_cli, u8 initop)
{
struct bnx2x_ilt *ilt = BP_ILT(bp);
bnx2x_ilt_client_init_op_ilt(bp, ilt, ilt_cli, initop);
}
static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp,
int cli_num, u8 initop)
{
struct bnx2x_ilt *ilt = BP_ILT(bp);
struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
}
void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
{
bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop);
}
static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
u32 psz_reg, u8 initop)
{
struct bnx2x_ilt *ilt = BP_ILT(bp);
struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
return;
switch (initop) {
case INITOP_INIT:
/* set in the init-value array */
case INITOP_SET:
REG_WR(bp, psz_reg, ILOG2(ilt_cli->page_size >> 12));
break;
case INITOP_CLEAR:
break;
}
}
/*
* called during init common stage, ilt clients should be initialized
* prioir to calling this function
*/
void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
{
bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU,
PXP2_REG_RQ_CDU_P_SIZE, initop);
bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_QM,
PXP2_REG_RQ_QM_P_SIZE, initop);
bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_SRC,
PXP2_REG_RQ_SRC_P_SIZE, initop);
bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_TM,
PXP2_REG_RQ_TM_P_SIZE, initop);
}
/****************************************************************************
* QM initializations
****************************************************************************/
#define QM_QUEUES_PER_FUNC 16 /* E1 has 32, but only 16 are used */
#define QM_INIT_MIN_CID_COUNT 31
#define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT)
/* called during init port stage */
void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
u8 initop)
{
int port = BP_PORT(bp);
if (QM_INIT(qm_cid_count)) {
switch (initop) {
case INITOP_INIT:
/* set in the init-value array */
case INITOP_SET:
REG_WR(bp, QM_REG_CONNNUM_0 + port*4,
qm_cid_count/16 - 1);
break;
case INITOP_CLEAR:
break;
}
}
}
static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count)
{
int i;
u32 wb_data[2];
wb_data[0] = wb_data[1] = 0;
for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) {
REG_WR(bp, QM_REG_BASEADDR + i*4,
qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8,
wb_data, 2);
if (CHIP_IS_E1H(bp)) {
REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4,
qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
wb_data, 2);
}
}
}
/* called during init common stage */
void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
u8 initop)
{
if (!QM_INIT(qm_cid_count))
return;
switch (initop) {
case INITOP_INIT:
/* set in the init-value array */
case INITOP_SET:
bnx2x_qm_set_ptr_table(bp, qm_cid_count);
break;
case INITOP_CLEAR:
break;
}
}
/****************************************************************************
* SRC initializations
****************************************************************************/
/* called during init func stage */
void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
dma_addr_t t2_mapping, int src_cid_count)
{
int i;
int port = BP_PORT(bp);
/* Initialize T2 */
for (i = 0; i < src_cid_count-1; i++)
t2[i].next = (u64)(t2_mapping + (i+1)*sizeof(struct src_ent));
/* tell the searcher where the T2 table is */
REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, src_cid_count);
bnx2x_wr_64(bp, SRC_REG_FIRSTFREE0 + port*16,
U64_LO(t2_mapping), U64_HI(t2_mapping));
bnx2x_wr_64(bp, SRC_REG_LASTFREE0 + port*16,
U64_LO((u64)t2_mapping +
(src_cid_count-1) * sizeof(struct src_ent)),
U64_HI((u64)t2_mapping +
(src_cid_count-1) * sizeof(struct src_ent)));
}
#endif /* BNX2X_INIT_OPS_H */ #endif /* BNX2X_INIT_OPS_H */

Просмотреть файл

@ -28,7 +28,7 @@
/********************************************************/ /********************************************************/
#define ETH_HLEN 14 #define ETH_HLEN 14
#define ETH_OVREHEAD (ETH_HLEN + 8)/* 8 for CRC + VLAN*/ #define ETH_OVREHEAD (ETH_HLEN + 8 + 8)/* 16 for CRC + VLAN + LLC */
#define ETH_MIN_PACKET_SIZE 60 #define ETH_MIN_PACKET_SIZE 60
#define ETH_MAX_PACKET_SIZE 1500 #define ETH_MAX_PACKET_SIZE 1500
#define ETH_MAX_JUMBO_PACKET_SIZE 9600 #define ETH_MAX_JUMBO_PACKET_SIZE 9600
@ -4066,6 +4066,7 @@ static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
"verification\n"); "verification\n");
return -EINVAL; return -EINVAL;
} }
fw_cmd_param = FW_PARAM_SET(phy->addr, phy->type, phy->mdio_ctrl); fw_cmd_param = FW_PARAM_SET(phy->addr, phy->type, phy->mdio_ctrl);
fw_resp = bnx2x_fw_command(bp, cmd, fw_cmd_param); fw_resp = bnx2x_fw_command(bp, cmd, fw_cmd_param);
if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) { if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) {

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -153,7 +153,7 @@ static inline long bnx2x_hilo(u32 *hiref)
static void bnx2x_storm_stats_post(struct bnx2x *bp) static void bnx2x_storm_stats_post(struct bnx2x *bp)
{ {
if (!bp->stats_pending) { if (!bp->stats_pending) {
struct eth_query_ramrod_data ramrod_data = {0}; struct common_query_ramrod_data ramrod_data = {0};
int i, rc; int i, rc;
spin_lock_bh(&bp->stats_lock); spin_lock_bh(&bp->stats_lock);
@ -163,9 +163,9 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
for_each_queue(bp, i) for_each_queue(bp, i)
ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id); ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
((u32 *)&ramrod_data)[1], ((u32 *)&ramrod_data)[1],
((u32 *)&ramrod_data)[0], 0); ((u32 *)&ramrod_data)[0], 1);
if (rc == 0) { if (rc == 0) {
/* stats ramrod has it's own slot on the spq */ /* stats ramrod has it's own slot on the spq */
bp->spq_left++; bp->spq_left++;
@ -398,9 +398,9 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
BIGMAC_REGISTER_RX_STAT_GR64) >> 2; BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
dmae->src_addr_hi = 0; dmae->src_addr_hi = 0;
dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
offsetof(struct bmac_stats, rx_stat_gr64_lo)); offsetof(struct bmac1_stats, rx_stat_gr64_lo));
dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
offsetof(struct bmac_stats, rx_stat_gr64_lo)); offsetof(struct bmac1_stats, rx_stat_gr64_lo));
dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
BIGMAC_REGISTER_RX_STAT_GR64) >> 2; BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
@ -571,7 +571,7 @@ static void bnx2x_stats_restart(struct bnx2x *bp)
static void bnx2x_bmac_stats_update(struct bnx2x *bp) static void bnx2x_bmac_stats_update(struct bnx2x *bp)
{ {
struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats); struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
struct bnx2x_eth_stats *estats = &bp->eth_stats; struct bnx2x_eth_stats *estats = &bp->eth_stats;
struct { struct {

Просмотреть файл

@ -942,7 +942,7 @@ static int cnic_alloc_uio(struct cnic_dev *dev) {
} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
PAGE_MASK; PAGE_MASK;
uinfo->mem[1].size = sizeof(struct host_def_status_block); uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
uinfo->name = "bnx2x_cnic"; uinfo->name = "bnx2x_cnic";
} }
@ -1063,6 +1063,8 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
int i, j, n, ret, pages; int i, j, n, ret, pages;
struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info; struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
cp->iro_arr = ethdev->iro_arr;
cp->max_cid_space = MAX_ISCSI_TBL_SZ; cp->max_cid_space = MAX_ISCSI_TBL_SZ;
cp->iscsi_start_cid = start_cid; cp->iscsi_start_cid = start_cid;
if (start_cid < BNX2X_ISCSI_START_CID) { if (start_cid < BNX2X_ISCSI_START_CID) {
@ -1127,8 +1129,6 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk; cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
memset(cp->status_blk.bnx2x, 0, sizeof(*cp->status_blk.bnx2x));
cp->l2_rx_ring_size = 15; cp->l2_rx_ring_size = 15;
ret = cnic_alloc_l2_rings(dev, 4); ret = cnic_alloc_l2_rings(dev, 4);
@ -1211,7 +1211,7 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) | cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
BNX2X_HW_CID(cp, cid))); BNX2X_HW_CID(cp, cid)));
kwqe.hdr.type = cpu_to_le16(type); kwqe.hdr.type = cpu_to_le16(type);
kwqe.hdr.reserved = 0; kwqe.hdr.reserved1 = 0;
kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo); kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi); kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
@ -1527,8 +1527,10 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
ictx->tstorm_st_context.tcp.cwnd = 0x5A8; ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
ictx->tstorm_st_context.tcp.flags2 |= ictx->tstorm_st_context.tcp.flags2 |=
TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN; TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
ictx->tstorm_st_context.tcp.ooo_support_mode =
TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
ictx->timers_context.flags |= ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG; ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
ictx->ustorm_st_context.ring.rq.pbl_base.lo = ictx->ustorm_st_context.ring.rq.pbl_base.lo =
req2->rq_page_table_addr_lo; req2->rq_page_table_addr_lo;
@ -1717,6 +1719,7 @@ static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
int ret = 0; int ret = 0;
struct iscsi_kcqe kcqe; struct iscsi_kcqe kcqe;
struct kcqe *cqes[1]; struct kcqe *cqes[1];
u32 hw_cid, type;
if (!(ctx->ctx_flags & CTX_FL_OFFLD_START)) if (!(ctx->ctx_flags & CTX_FL_OFFLD_START))
goto skip_cfc_delete; goto skip_cfc_delete;
@ -1727,11 +1730,15 @@ static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
init_waitqueue_head(&ctx->waitq); init_waitqueue_head(&ctx->waitq);
ctx->wait_cond = 0; ctx->wait_cond = 0;
memset(&l5_data, 0, sizeof(l5_data)); memset(&l5_data, 0, sizeof(l5_data));
ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL, hw_cid = BNX2X_HW_CID(cp, ctx->cid);
req->context_id, type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
ETH_CONNECTION_TYPE | & SPE_HDR_CONN_TYPE;
(1 << SPE_HDR_COMMON_RAMROD_SHIFT), type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
&l5_data); SPE_HDR_FUNCTION_ID);
ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
hw_cid, type, &l5_data);
if (ret == 0) if (ret == 0)
wait_event(ctx->waitq, ctx->wait_cond); wait_event(ctx->waitq, ctx->wait_cond);
@ -2322,7 +2329,7 @@ static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
{ {
struct cnic_local *cp = dev->cnic_priv; struct cnic_local *cp = dev->cnic_priv;
cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID, 0, cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
IGU_INT_DISABLE, 0); IGU_INT_DISABLE, 0);
} }
@ -2357,7 +2364,7 @@ static void cnic_service_bnx2x_bh(unsigned long data)
status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID, cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
status_idx, IGU_INT_ENABLE, 1); status_idx, IGU_INT_ENABLE, 1);
} }
@ -3285,6 +3292,7 @@ static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
{ {
struct cnic_local *cp = dev->cnic_priv; struct cnic_local *cp = dev->cnic_priv;
u32 pfid = cp->pfid; u32 pfid = cp->pfid;
u32 port = CNIC_PORT(cp);
cnic_init_bnx2x_mac(dev); cnic_init_bnx2x_mac(dev);
cnic_bnx2x_set_tcp_timestamp(dev, 1); cnic_bnx2x_set_tcp_timestamp(dev, 1);
@ -3293,9 +3301,9 @@ static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0); XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
CNIC_WR(dev, BAR_XSTRORM_INTMEM + CNIC_WR(dev, BAR_XSTRORM_INTMEM +
XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(pfid), 1); XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
CNIC_WR(dev, BAR_XSTRORM_INTMEM + CNIC_WR(dev, BAR_XSTRORM_INTMEM +
XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(pfid), XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
DEF_MAX_DA_COUNT); DEF_MAX_DA_COUNT);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM + CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
@ -3859,32 +3867,48 @@ static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
return err; return err;
} }
static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
u16 sb_id, u8 sb_index,
u8 disable)
{
u32 addr = BAR_CSTRORM_INTMEM +
CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
offsetof(struct hc_status_block_data_e1x, index_data) +
sizeof(struct hc_index_data)*sb_index +
offsetof(struct hc_index_data, flags);
u16 flags = CNIC_RD16(dev, addr);
/* clear and set */
flags &= ~HC_INDEX_DATA_HC_ENABLED;
flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
HC_INDEX_DATA_HC_ENABLED);
CNIC_WR16(dev, addr, flags);
}
static void cnic_enable_bnx2x_int(struct cnic_dev *dev) static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
{ {
struct cnic_local *cp = dev->cnic_priv; struct cnic_local *cp = dev->cnic_priv;
u8 sb_id = cp->status_blk_num; u8 sb_id = cp->status_blk_num;
int port = CNIC_PORT(cp);
CNIC_WR8(dev, BAR_CSTRORM_INTMEM + CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id, CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
HC_INDEX_C_ISCSI_EQ_CONS), offsetof(struct hc_status_block_data_e1x, index_data) +
64 / 12); sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
CNIC_WR16(dev, BAR_CSTRORM_INTMEM + offsetof(struct hc_index_data, timeout), 64 / 12);
CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
HC_INDEX_C_ISCSI_EQ_CONS), 0);
} }
static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev) static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
{ {
} }
static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev) static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
struct client_init_ramrod_data *data)
{ {
struct cnic_local *cp = dev->cnic_priv; struct cnic_local *cp = dev->cnic_priv;
union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) cp->l2_ring; union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) cp->l2_ring;
struct eth_context *context; dma_addr_t buf_map, ring_map = cp->l2_ring_map;
struct regpair context_addr; struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
dma_addr_t buf_map;
int port = CNIC_PORT(cp); int port = CNIC_PORT(cp);
int i; int i;
int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
@ -3909,33 +3933,23 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev)
start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
} }
context = cnic_get_bnx2x_ctx(dev, BNX2X_ISCSI_L2_CID, 1, &context_addr);
val = (u64) cp->l2_ring_map >> 32; val = (u64) ring_map >> 32;
txbd->next_bd.addr_hi = cpu_to_le32(val); txbd->next_bd.addr_hi = cpu_to_le32(val);
context->xstorm_st_context.tx_bd_page_base_hi = val; data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
val = (u64) cp->l2_ring_map & 0xffffffff; val = (u64) ring_map & 0xffffffff;
txbd->next_bd.addr_lo = cpu_to_le32(val); txbd->next_bd.addr_lo = cpu_to_le32(val);
context->xstorm_st_context.tx_bd_page_base_lo = val; data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
context->cstorm_st_context.sb_index_number = /* Other ramrod params */
HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS; data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
context->cstorm_st_context.status_block_id = BNX2X_DEF_SB_ID; data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
if (cli < MAX_X_STAT_COUNTER_ID)
context->xstorm_st_context.statistics_data = cli |
XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE;
context->xstorm_ag_context.cdu_reserved =
CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(cp, BNX2X_ISCSI_L2_CID),
CDU_REGION_NUMBER_XCM_AG,
ETH_CONNECTION_TYPE);
/* reset xstorm per client statistics */ /* reset xstorm per client statistics */
if (cli < MAX_X_STAT_COUNTER_ID) { if (cli < MAX_STAT_COUNTER_ID) {
val = BAR_XSTRORM_INTMEM + val = BAR_XSTRORM_INTMEM +
XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++)
@ -3943,24 +3957,31 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev)
} }
cp->tx_cons_ptr = cp->tx_cons_ptr =
&cp->bnx2x_def_status_blk->c_def_status_block.index_values[ &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS];
} }
static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev) static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
struct client_init_ramrod_data *data)
{ {
struct cnic_local *cp = dev->cnic_priv; struct cnic_local *cp = dev->cnic_priv;
struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (cp->l2_ring + struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (cp->l2_ring +
BCM_PAGE_SIZE); BCM_PAGE_SIZE);
struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
(cp->l2_ring + (2 * BCM_PAGE_SIZE)); (cp->l2_ring + (2 * BCM_PAGE_SIZE));
struct eth_context *context; struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
struct regpair context_addr;
int i; int i;
int port = CNIC_PORT(cp); int port = CNIC_PORT(cp);
int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
u32 val; u32 val;
struct tstorm_eth_client_config tstorm_client = {0}; dma_addr_t ring_map = cp->l2_ring_map;
/* General data */
data->general.client_id = cli;
data->general.statistics_en_flg = 1;
data->general.statistics_counter_id = cli;
data->general.activate_flg = 1;
data->general.sp_client_id = cli;
for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) { for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
dma_addr_t buf_map; dma_addr_t buf_map;
@ -3970,83 +3991,42 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev)
rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32); rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
} }
context = cnic_get_bnx2x_ctx(dev, BNX2X_ISCSI_L2_CID, 0, &context_addr);
val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32; val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
rxbd->addr_hi = cpu_to_le32(val); rxbd->addr_hi = cpu_to_le32(val);
data->rx.bd_page_base.hi = cpu_to_le32(val);
context->ustorm_st_context.common.bd_page_base_hi = val; val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff;
rxbd->addr_lo = cpu_to_le32(val); rxbd->addr_lo = cpu_to_le32(val);
data->rx.bd_page_base.lo = cpu_to_le32(val);
context->ustorm_st_context.common.bd_page_base_lo = val;
context->ustorm_st_context.common.sb_index_numbers =
BNX2X_ISCSI_RX_SB_INDEX_NUM;
context->ustorm_st_context.common.clientId = cli;
context->ustorm_st_context.common.status_block_id = BNX2X_DEF_SB_ID;
if (cli < MAX_U_STAT_COUNTER_ID) {
context->ustorm_st_context.common.flags =
USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS;
context->ustorm_st_context.common.statistics_counter_id = cli;
}
context->ustorm_st_context.common.mc_alignment_log_size = 0;
context->ustorm_st_context.common.bd_buff_size =
cp->l2_single_buf_size;
context->ustorm_ag_context.cdu_usage =
CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(cp, BNX2X_ISCSI_L2_CID),
CDU_REGION_NUMBER_UCM_AG,
ETH_CONNECTION_TYPE);
rxcqe += BNX2X_MAX_RCQ_DESC_CNT; rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
val = (u64) (cp->l2_ring_map + (2 * BCM_PAGE_SIZE)) >> 32; val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32;
rxcqe->addr_hi = cpu_to_le32(val); rxcqe->addr_hi = cpu_to_le32(val);
data->rx.cqe_page_base.hi = cpu_to_le32(val);
CNIC_WR(dev, BAR_USTRORM_INTMEM + val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
USTORM_CQE_PAGE_BASE_OFFSET(port, cli) + 4, val);
CNIC_WR(dev, BAR_USTRORM_INTMEM +
USTORM_CQE_PAGE_NEXT_OFFSET(port, cli) + 4, val);
val = (u64) (cp->l2_ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
rxcqe->addr_lo = cpu_to_le32(val); rxcqe->addr_lo = cpu_to_le32(val);
data->rx.cqe_page_base.lo = cpu_to_le32(val);
CNIC_WR(dev, BAR_USTRORM_INTMEM + /* Other ramrod params */
USTORM_CQE_PAGE_BASE_OFFSET(port, cli), val); data->rx.client_qzone_id = cl_qzone_id;
data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
data->rx.status_block_id = BNX2X_DEF_SB_ID;
CNIC_WR(dev, BAR_USTRORM_INTMEM + data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
USTORM_CQE_PAGE_NEXT_OFFSET(port, cli), val); data->rx.bd_buff_size = cpu_to_le16(cp->l2_single_buf_size);
/* client tstorm info */ data->rx.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
tstorm_client.mtu = cp->l2_single_buf_size - 14; data->rx.outer_vlan_removal_enable_flg = 1;
tstorm_client.config_flags = TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE;
if (cli < MAX_T_STAT_COUNTER_ID) {
tstorm_client.config_flags |=
TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
tstorm_client.statistics_counter_id = cli;
}
CNIC_WR(dev, BAR_TSTRORM_INTMEM +
TSTORM_CLIENT_CONFIG_OFFSET(port, cli),
((u32 *)&tstorm_client)[0]);
CNIC_WR(dev, BAR_TSTRORM_INTMEM +
TSTORM_CLIENT_CONFIG_OFFSET(port, cli) + 4,
((u32 *)&tstorm_client)[1]);
/* reset tstorm per client statistics */
if (cli < MAX_T_STAT_COUNTER_ID) {
/* reset tstorm and ustorm per client statistics */
if (cli < MAX_STAT_COUNTER_ID) {
val = BAR_TSTRORM_INTMEM + val = BAR_TSTRORM_INTMEM +
TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++)
CNIC_WR(dev, val + i * 4, 0); CNIC_WR(dev, val + i * 4, 0);
}
/* reset ustorm per client statistics */
if (cli < MAX_U_STAT_COUNTER_ID) {
val = BAR_USTRORM_INTMEM + val = BAR_USTRORM_INTMEM +
USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++) for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++)
@ -4054,8 +4034,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev)
} }
cp->rx_cons_ptr = cp->rx_cons_ptr =
&cp->bnx2x_def_status_blk->u_def_status_block.index_values[ &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS];
} }
static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev) static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
@ -4066,7 +4045,7 @@ static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
dev->max_iscsi_conn = 0; dev->max_iscsi_conn = 0;
base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR); base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR);
if (base < 0xa0000 || base >= 0xc0000) if (base == 0)
return; return;
addr = BNX2X_SHMEM_ADDR(base, addr = BNX2X_SHMEM_ADDR(base,
@ -4103,14 +4082,19 @@ static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
} }
if (BNX2X_CHIP_IS_E1H(cp->chip_id)) { if (BNX2X_CHIP_IS_E1H(cp->chip_id)) {
int func = CNIC_FUNC(cp); int func = CNIC_FUNC(cp);
u32 mf_cfg_addr;
mf_cfg_addr = base + BNX2X_SHMEM_MF_BLK_OFFSET;
addr = mf_cfg_addr +
offsetof(struct mf_cfg, func_mf_config[func].e1hov_tag);
addr = BNX2X_SHMEM_ADDR(base,
mf_cfg.func_mf_config[func].e1hov_tag);
val = CNIC_RD(dev, addr); val = CNIC_RD(dev, addr);
val &= FUNC_MF_CFG_E1HOV_TAG_MASK; val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
addr = BNX2X_SHMEM_ADDR(base, addr = mf_cfg_addr +
mf_cfg.func_mf_config[func].config); offsetof(struct mf_cfg,
func_mf_config[func].config);
val = CNIC_RD(dev, addr); val = CNIC_RD(dev, addr);
val &= FUNC_MF_CFG_PROTOCOL_MASK; val &= FUNC_MF_CFG_PROTOCOL_MASK;
if (val != FUNC_MF_CFG_PROTOCOL_ISCSI) if (val != FUNC_MF_CFG_PROTOCOL_ISCSI)
@ -4122,11 +4106,10 @@ static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
static int cnic_start_bnx2x_hw(struct cnic_dev *dev) static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
{ {
struct cnic_local *cp = dev->cnic_priv; struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
int func = CNIC_FUNC(cp), ret, i; int func = CNIC_FUNC(cp), ret, i;
int port = CNIC_PORT(cp);
u32 pfid; u32 pfid;
u16 eq_idx; struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
u8 sb_id = cp->status_blk_num;
cp->pfid = func; cp->pfid = func;
pfid = cp->pfid; pfid = cp->pfid;
@ -4137,15 +4120,16 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
if (ret) if (ret)
return -ENOMEM; return -ENOMEM;
cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
cp->kcq1.io_addr = BAR_CSTRORM_INTMEM + cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0); CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
cp->kcq1.sw_prod_idx = 0; cp->kcq1.sw_prod_idx = 0;
cp->kcq1.hw_prod_idx_ptr = cp->kcq1.hw_prod_idx_ptr =
&cp->status_blk.bnx2x->c_status_block.index_values[ &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
HC_INDEX_C_ISCSI_EQ_CONS];
cp->kcq1.status_idx_ptr = cp->kcq1.status_idx_ptr =
&cp->status_blk.bnx2x->c_status_block.status_block_index; &sb->sb.running_index[SM_RX_ID];
cnic_get_bnx2x_iscsi_info(dev); cnic_get_bnx2x_iscsi_info(dev);
@ -4171,7 +4155,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num); CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
CNIC_WR8(dev, BAR_CSTRORM_INTMEM + CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0), CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
HC_INDEX_C_ISCSI_EQ_CONS); HC_INDEX_ISCSI_EQ_CONS);
for (i = 0; i < cp->conn_buf_info.num_pages; i++) { for (i = 0; i < cp->conn_buf_info.num_pages; i++) {
CNIC_WR(dev, BAR_TSTRORM_INTMEM + CNIC_WR(dev, BAR_TSTRORM_INTMEM +
@ -4189,16 +4173,11 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4, USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
(u64) cp->gbl_buf_info.pg_map_arr[0] >> 32); (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
CNIC_WR(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
cnic_setup_bnx2x_context(dev); cnic_setup_bnx2x_context(dev);
eq_idx = CNIC_RD16(dev, BAR_CSTRORM_INTMEM +
CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id) +
offsetof(struct cstorm_status_block_c,
index_values[HC_INDEX_C_ISCSI_EQ_CONS]));
if (eq_idx != 0) {
netdev_err(dev->netdev, "EQ cons index %x != 0\n", eq_idx);
return -EBUSY;
}
ret = cnic_init_bnx2x_irq(dev); ret = cnic_init_bnx2x_irq(dev);
if (ret) if (ret)
return ret; return ret;
@ -4218,8 +4197,9 @@ static void cnic_init_rings(struct cnic_dev *dev)
cnic_init_bnx2_rx_ring(dev); cnic_init_bnx2_rx_ring(dev);
set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
struct cnic_local *cp = dev->cnic_priv;
u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
u32 cl_qzone_id, type;
struct client_init_ramrod_data *data;
union l5cm_specific_data l5_data; union l5cm_specific_data l5_data;
struct ustorm_eth_rx_producers rx_prods = {0}; struct ustorm_eth_rx_producers rx_prods = {0};
u32 off, i; u32 off, i;
@ -4228,23 +4208,36 @@ static void cnic_init_rings(struct cnic_dev *dev)
rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT; rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
barrier(); barrier();
cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
off = BAR_USTRORM_INTMEM + off = BAR_USTRORM_INTMEM +
USTORM_RX_PRODS_OFFSET(CNIC_PORT(cp), cli); USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli);
for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++) for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]); CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
cnic_init_bnx2x_tx_ring(dev); data = cp->l2_buf;
cnic_init_bnx2x_rx_ring(dev);
memset(data, 0, sizeof(*data));
cnic_init_bnx2x_tx_ring(dev, data);
cnic_init_bnx2x_rx_ring(dev, data);
l5_data.phy_address.lo = cp->l2_buf_map & 0xffffffff;
l5_data.phy_address.hi = (u64) cp->l2_buf_map >> 32;
type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
& SPE_HDR_CONN_TYPE;
type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
SPE_HDR_FUNCTION_ID);
set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
l5_data.phy_address.lo = cli;
l5_data.phy_address.hi = 0;
cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP, cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data); BNX2X_ISCSI_L2_CID, type, &l5_data);
i = 0; i = 0;
while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
++i < 10) ++i < 10)
@ -4272,6 +4265,7 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)
u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
union l5cm_specific_data l5_data; union l5cm_specific_data l5_data;
int i; int i;
u32 type;
cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 0); cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 0);
@ -4292,9 +4286,12 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)
cnic_kwq_completion(dev, 1); cnic_kwq_completion(dev, 1);
memset(&l5_data, 0, sizeof(l5_data)); memset(&l5_data, 0, sizeof(l5_data));
cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL, type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE | & SPE_HDR_CONN_TYPE;
(1 << SPE_HDR_COMMON_RAMROD_SHIFT), &l5_data); type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
SPE_HDR_FUNCTION_ID);
cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
BNX2X_ISCSI_L2_CID, type, &l5_data);
msleep(10); msleep(10);
} }
clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
@ -4392,15 +4389,9 @@ static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
static void cnic_stop_bnx2x_hw(struct cnic_dev *dev) static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
{ {
struct cnic_local *cp = dev->cnic_priv; struct cnic_local *cp = dev->cnic_priv;
u8 sb_id = cp->status_blk_num;
int port = CNIC_PORT(cp);
cnic_free_irq(dev); cnic_free_irq(dev);
CNIC_WR16(dev, BAR_CSTRORM_INTMEM + *cp->kcq1.hw_prod_idx_ptr = 0;
CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id) +
offsetof(struct cstorm_status_block_c,
index_values[HC_INDEX_C_ISCSI_EQ_CONS]),
0);
CNIC_WR(dev, BAR_CSTRORM_INTMEM + CNIC_WR(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0); CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0);
CNIC_WR16(dev, cp->kcq1.io_addr, 0); CNIC_WR16(dev, cp->kcq1.io_addr, 0);

Просмотреть файл

@ -12,6 +12,13 @@
#ifndef CNIC_H #ifndef CNIC_H
#define CNIC_H #define CNIC_H
#define HC_INDEX_ISCSI_EQ_CONS 6
#define HC_INDEX_FCOE_EQ_CONS 3
#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5
#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1
#define KWQ_PAGE_CNT 4 #define KWQ_PAGE_CNT 4
#define KCQ_PAGE_CNT 16 #define KCQ_PAGE_CNT 16
@ -179,6 +186,14 @@ struct kcq_info {
u32 io_addr; u32 io_addr;
}; };
struct iro {
u32 base;
u16 m1;
u16 m2;
u16 m3;
u16 size;
};
struct cnic_local { struct cnic_local {
spinlock_t cnic_ulp_lock; spinlock_t cnic_ulp_lock;
@ -213,6 +228,9 @@ struct cnic_local {
u16 rx_cons; u16 rx_cons;
u16 tx_cons; u16 tx_cons;
struct iro *iro_arr;
#define IRO (((struct cnic_local *) dev->cnic_priv)->iro_arr)
struct cnic_dma kwq_info; struct cnic_dma kwq_info;
struct kwqe **kwq; struct kwqe **kwq;
@ -231,12 +249,16 @@ struct cnic_local {
union { union {
void *gen; void *gen;
struct status_block_msix *bnx2; struct status_block_msix *bnx2;
struct host_status_block *bnx2x; struct host_hc_status_block_e1x *bnx2x_e1x;
/* index values - which counter to update */
#define SM_RX_ID 0
#define SM_TX_ID 1
} status_blk; } status_blk;
struct host_def_status_block *bnx2x_def_status_blk; struct host_sp_status_block *bnx2x_def_status_blk;
u32 status_blk_num; u32 status_blk_num;
u32 bnx2x_igu_sb_id;
u32 int_num; u32 int_num;
u32 last_status_idx; u32 last_status_idx;
struct tasklet_struct cnic_irq_task; struct tasklet_struct cnic_irq_task;
@ -358,24 +380,33 @@ struct bnx2x_bd_chain_next {
(BNX2X_MAX_RCQ_DESC_CNT - 1)) ? \ (BNX2X_MAX_RCQ_DESC_CNT - 1)) ? \
((x) + 2) : ((x) + 1) ((x) + 2) : ((x) + 1)
#define BNX2X_DEF_SB_ID 16 #define BNX2X_DEF_SB_ID HC_SP_SB_ID
#define BNX2X_ISCSI_RX_SB_INDEX_NUM \ #define BNX2X_SHMEM_MF_BLK_OFFSET 0x7e4
((HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS << \
USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT) & \
USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER)
#define BNX2X_SHMEM_ADDR(base, field) (base + \ #define BNX2X_SHMEM_ADDR(base, field) (base + \
offsetof(struct shmem_region, field)) offsetof(struct shmem_region, field))
#define CNIC_PORT(cp) ((cp)->func % PORT_MAX) #define BNX2X_SHMEM2_ADDR(base, field) (base + \
#define CNIC_FUNC(cp) ((cp)->func) offsetof(struct shmem2_region, field))
#define CNIC_E1HVN(cp) ((cp)->func >> 1)
#define BNX2X_HW_CID(cp, x) (((CNIC_FUNC(cp) % PORT_MAX) << 23) | \ #define BNX2X_SHMEM2_HAS(base, field) \
((base) && \
(CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base, size)) > \
offsetof(struct shmem2_region, field)))
#define CNIC_PORT(cp) ((cp)->pfid & 1)
#define CNIC_FUNC(cp) ((cp)->func)
#define CNIC_E1HVN(cp) ((cp)->pfid >> 1)
#define BNX2X_HW_CID(cp, x) ((CNIC_PORT(cp) << 23) | \
(CNIC_E1HVN(cp) << 17) | (x)) (CNIC_E1HVN(cp) << 17) | (x))
#define BNX2X_SW_CID(x) (x & 0x1ffff) #define BNX2X_SW_CID(x) (x & 0x1ffff)
#define BNX2X_CL_QZONE_ID(cp, cli) \
(cli + (CNIC_PORT(cp) * ETH_MAX_RX_CLIENTS_E1H))
#define TCP_TSTORM_OOO_DROP_AND_PROC_ACK (0<<4)
#endif #endif

Просмотреть файл

@ -14,6 +14,7 @@
/* KWQ (kernel work queue) request op codes */ /* KWQ (kernel work queue) request op codes */
#define L2_KWQE_OPCODE_VALUE_FLUSH (4) #define L2_KWQE_OPCODE_VALUE_FLUSH (4)
#define L2_KWQE_OPCODE_VALUE_VM_FREE_RX_QUEUE (8)
#define L4_KWQE_OPCODE_VALUE_CONNECT1 (50) #define L4_KWQE_OPCODE_VALUE_CONNECT1 (50)
#define L4_KWQE_OPCODE_VALUE_CONNECT2 (51) #define L4_KWQE_OPCODE_VALUE_CONNECT2 (51)
@ -48,11 +49,14 @@
#define L4_KCQE_OPCODE_VALUE_UPLOAD_PG (14) #define L4_KCQE_OPCODE_VALUE_UPLOAD_PG (14)
/* KCQ (kernel completion queue) completion status */ /* KCQ (kernel completion queue) completion status */
#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0) #define L4_KCQE_COMPLETION_STATUS_SUCCESS (0)
#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93) #define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93)
#define L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL (0x83) #define L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL (0x83)
#define L4_KCQE_COMPLETION_STATUS_OFFLOADED_PG (0x89) #define L4_KCQE_COMPLETION_STATUS_OFFLOADED_PG (0x89)
#define L4_KCQE_OPCODE_VALUE_OOO_EVENT_NOTIFICATION (0xa0)
#define L4_KCQE_OPCODE_VALUE_OOO_FLUSH (0xa1)
#define L4_LAYER_CODE (4) #define L4_LAYER_CODE (4)
#define L2_LAYER_CODE (2) #define L2_LAYER_CODE (2)
@ -584,6 +588,100 @@ struct l4_kwq_upload {
* bnx2x structures * bnx2x structures
*/ */
/*
* The iscsi aggregative context of Cstorm
*/
struct cstorm_iscsi_ag_context {
u32 agg_vars1;
#define CSTORM_ISCSI_AG_CONTEXT_STATE (0xFF<<0)
#define CSTORM_ISCSI_AG_CONTEXT_STATE_SHIFT 0
#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<8)
#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 8
#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<9)
#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 9
#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<10)
#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 10
#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<11)
#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 11
#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN (0x1<<12)
#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN_SHIFT 12
#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN (0x1<<13)
#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN_SHIFT 13
#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF (0x3<<14)
#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_SHIFT 14
#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66 (0x3<<16)
#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66_SHIFT 16
#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN (0x1<<18)
#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN_SHIFT 18
#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION0_CF_EN (0x1<<19)
#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION0_CF_EN_SHIFT 19
#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION1_CF_EN (0x1<<20)
#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION1_CF_EN_SHIFT 20
#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION2_CF_EN (0x1<<21)
#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION2_CF_EN_SHIFT 21
#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_EN (0x1<<22)
#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_EN_SHIFT 22
#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE (0x7<<23)
#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE_SHIFT 23
#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE (0x3<<26)
#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE_SHIFT 26
#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52 (0x3<<28)
#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52_SHIFT 28
#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53 (0x3<<30)
#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53_SHIFT 30
#if defined(__BIG_ENDIAN)
u8 __aux1_th;
u8 __aux1_val;
u16 __agg_vars2;
#elif defined(__LITTLE_ENDIAN)
u16 __agg_vars2;
u8 __aux1_val;
u8 __aux1_th;
#endif
u32 rel_seq;
u32 rel_seq_th;
#if defined(__BIG_ENDIAN)
u16 hq_cons;
u16 hq_prod;
#elif defined(__LITTLE_ENDIAN)
u16 hq_prod;
u16 hq_cons;
#endif
#if defined(__BIG_ENDIAN)
u8 __reserved62;
u8 __reserved61;
u8 __reserved60;
u8 __reserved59;
#elif defined(__LITTLE_ENDIAN)
u8 __reserved59;
u8 __reserved60;
u8 __reserved61;
u8 __reserved62;
#endif
#if defined(__BIG_ENDIAN)
u16 __reserved64;
u16 __cq_u_prod0;
#elif defined(__LITTLE_ENDIAN)
u16 __cq_u_prod0;
u16 __reserved64;
#endif
u32 __cq_u_prod1;
#if defined(__BIG_ENDIAN)
u16 __agg_vars3;
u16 __cq_u_prod2;
#elif defined(__LITTLE_ENDIAN)
u16 __cq_u_prod2;
u16 __agg_vars3;
#endif
#if defined(__BIG_ENDIAN)
u16 __aux2_th;
u16 __cq_u_prod3;
#elif defined(__LITTLE_ENDIAN)
u16 __cq_u_prod3;
u16 __aux2_th;
#endif
};
/* /*
* iSCSI context region, used only in iSCSI * iSCSI context region, used only in iSCSI
*/ */
@ -696,7 +794,7 @@ struct ustorm_iscsi_st_context {
struct regpair task_pbl_base; struct regpair task_pbl_base;
struct regpair tce_phy_addr; struct regpair tce_phy_addr;
struct ustorm_iscsi_placement_db place_db; struct ustorm_iscsi_placement_db place_db;
u32 data_rcv_seq; u32 reserved8;
u32 rem_rcv_len; u32 rem_rcv_len;
#if defined(__BIG_ENDIAN) #if defined(__BIG_ENDIAN)
u16 hdr_itt; u16 hdr_itt;
@ -713,8 +811,10 @@ struct ustorm_iscsi_st_context {
#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0 #define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0
#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1) #define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1)
#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1 #define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1
#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x3F<<2) #define USTORM_ISCSI_ST_CONTEXT_BRESETCRC (0x1<<2)
#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 2 #define USTORM_ISCSI_ST_CONTEXT_BRESETCRC_SHIFT 2
#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x1F<<3)
#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 3
u8 task_pdu_cache_index; u8 task_pdu_cache_index;
u8 task_pbe_cache_index; u8 task_pbe_cache_index;
#elif defined(__LITTLE_ENDIAN) #elif defined(__LITTLE_ENDIAN)
@ -725,8 +825,10 @@ struct ustorm_iscsi_st_context {
#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0 #define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0
#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1) #define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1)
#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1 #define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1
#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x3F<<2) #define USTORM_ISCSI_ST_CONTEXT_BRESETCRC (0x1<<2)
#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 2 #define USTORM_ISCSI_ST_CONTEXT_BRESETCRC_SHIFT 2
#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x1F<<3)
#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 3
u8 hdr_second_byte_union; u8 hdr_second_byte_union;
#endif #endif
#if defined(__BIG_ENDIAN) #if defined(__BIG_ENDIAN)
@ -777,14 +879,14 @@ struct ustorm_iscsi_st_context {
*/ */
struct tstorm_tcp_st_context_section { struct tstorm_tcp_st_context_section {
u32 flags1; u32 flags1;
#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_20B (0xFFFFFF<<0) #define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT (0xFFFFFF<<0)
#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_20B_SHIFT 0 #define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_SHIFT 0
#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID (0x1<<24) #define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID (0x1<<24)
#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID_SHIFT 24 #define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID_SHIFT 24
#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS (0x1<<25) #define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS (0x1<<25)
#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS_SHIFT 25 #define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS_SHIFT 25
#define TSTORM_TCP_ST_CONTEXT_SECTION_ISLE_EXISTS (0x1<<26) #define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED0 (0x1<<26)
#define TSTORM_TCP_ST_CONTEXT_SECTION_ISLE_EXISTS_SHIFT 26 #define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED0_SHIFT 26
#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD (0x1<<27) #define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD (0x1<<27)
#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD_SHIFT 27 #define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD_SHIFT 27
#define TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED (0x1<<28) #define TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED (0x1<<28)
@ -793,11 +895,11 @@ struct tstorm_tcp_st_context_section {
#define TSTORM_TCP_ST_CONTEXT_SECTION_FIRST_RTO_ESTIMATE_SHIFT 29 #define TSTORM_TCP_ST_CONTEXT_SECTION_FIRST_RTO_ESTIMATE_SHIFT 29
#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN (0x1<<30) #define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN (0x1<<30)
#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN_SHIFT 30 #define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN_SHIFT 30
#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED3 (0x1<<31) #define TSTORM_TCP_ST_CONTEXT_SECTION_LAST_ISLE_HAS_FIN (0x1<<31)
#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED3_SHIFT 31 #define TSTORM_TCP_ST_CONTEXT_SECTION_LAST_ISLE_HAS_FIN_SHIFT 31
u32 flags2; u32 flags2;
#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_20B (0xFFFFFF<<0) #define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION (0xFFFFFF<<0)
#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_20B_SHIFT 0 #define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_SHIFT 0
#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN (0x1<<24) #define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN (0x1<<24)
#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN_SHIFT 24 #define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN_SHIFT 24
#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN (0x1<<25) #define TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN (0x1<<25)
@ -810,18 +912,18 @@ struct tstorm_tcp_st_context_section {
#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 28 #define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 28
#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<29) #define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<29)
#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 29 #define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 29
#define __TSTORM_TCP_ST_CONTEXT_SECTION_SECOND_ISLE_DROPPED (0x1<<30) #define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_RST_ATTACK (0x1<<30)
#define __TSTORM_TCP_ST_CONTEXT_SECTION_SECOND_ISLE_DROPPED_SHIFT 30 #define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_RST_ATTACK_SHIFT 30
#define __TSTORM_TCP_ST_CONTEXT_SECTION_DONT_SUPPORT_OOO (0x1<<31) #define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_SYN_ATTACK (0x1<<31)
#define __TSTORM_TCP_ST_CONTEXT_SECTION_DONT_SUPPORT_OOO_SHIFT 31 #define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_SYN_ATTACK_SHIFT 31
#if defined(__BIG_ENDIAN) #if defined(__BIG_ENDIAN)
u16 reserved_slowpath; u16 mss;
u8 tcp_sm_state_3b; u8 tcp_sm_state;
u8 rto_exp_3b; u8 rto_exp;
#elif defined(__LITTLE_ENDIAN) #elif defined(__LITTLE_ENDIAN)
u8 rto_exp_3b; u8 rto_exp;
u8 tcp_sm_state_3b; u8 tcp_sm_state;
u16 reserved_slowpath; u16 mss;
#endif #endif
u32 rcv_nxt; u32 rcv_nxt;
u32 timestamp_recent; u32 timestamp_recent;
@ -846,11 +948,11 @@ struct tstorm_tcp_st_context_section {
#if defined(__BIG_ENDIAN) #if defined(__BIG_ENDIAN)
u8 statistics_counter_id; u8 statistics_counter_id;
u8 ooo_support_mode; u8 ooo_support_mode;
u8 snd_wnd_scale_4b; u8 snd_wnd_scale;
u8 dup_ack_count; u8 dup_ack_count;
#elif defined(__LITTLE_ENDIAN) #elif defined(__LITTLE_ENDIAN)
u8 dup_ack_count; u8 dup_ack_count;
u8 snd_wnd_scale_4b; u8 snd_wnd_scale;
u8 ooo_support_mode; u8 ooo_support_mode;
u8 statistics_counter_id; u8 statistics_counter_id;
#endif #endif
@ -860,13 +962,21 @@ struct tstorm_tcp_st_context_section {
u32 isle_start_seq; u32 isle_start_seq;
u32 isle_end_seq; u32 isle_end_seq;
#if defined(__BIG_ENDIAN) #if defined(__BIG_ENDIAN)
u16 mss; u16 second_isle_address;
u16 recent_seg_wnd; u16 recent_seg_wnd;
#elif defined(__LITTLE_ENDIAN) #elif defined(__LITTLE_ENDIAN)
u16 recent_seg_wnd; u16 recent_seg_wnd;
u16 mss; u16 second_isle_address;
#endif
#if defined(__BIG_ENDIAN)
u8 max_isles_ever_happened;
u8 isles_number;
u16 last_isle_address;
#elif defined(__LITTLE_ENDIAN)
u16 last_isle_address;
u8 isles_number;
u8 max_isles_ever_happened;
#endif #endif
u32 reserved4;
u32 max_rt_time; u32 max_rt_time;
#if defined(__BIG_ENDIAN) #if defined(__BIG_ENDIAN)
u16 lsb_mac_address; u16 lsb_mac_address;
@ -876,7 +986,7 @@ struct tstorm_tcp_st_context_section {
u16 lsb_mac_address; u16 lsb_mac_address;
#endif #endif
u32 msb_mac_address; u32 msb_mac_address;
u32 reserved2; u32 rightmost_received_seq;
}; };
/* /*
@ -951,7 +1061,7 @@ struct tstorm_iscsi_st_context_section {
u8 scratchpad_idx; u8 scratchpad_idx;
struct iscsi_term_vars term_vars; struct iscsi_term_vars term_vars;
#endif #endif
u32 reserved2; u32 process_nxt;
}; };
/* /*
@ -1174,24 +1284,12 @@ struct xstorm_iscsi_ag_context {
#endif #endif
#if defined(__BIG_ENDIAN) #if defined(__BIG_ENDIAN)
u8 cdu_reserved; u8 cdu_reserved;
u8 agg_vars4; u8 __agg_vars4;
#define XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF (0x3<<0)
#define XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_SHIFT 0
#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF (0x3<<2)
#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_SHIFT 2
#define __XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_EN (0x1<<4)
#define __XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_EN_SHIFT 4
#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_EN (0x1<<5)
#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_EN_SHIFT 5
#define __XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_EN (0x1<<6)
#define __XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_EN_SHIFT 6
#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_EN (0x1<<7)
#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_EN_SHIFT 7
u8 agg_vars3; u8 agg_vars3;
#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF (0x3<<6) #define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_SHIFT 6 #define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
u8 agg_vars2; u8 agg_vars2;
#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0) #define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0)
#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0 #define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0
@ -1222,21 +1320,9 @@ struct xstorm_iscsi_ag_context {
u8 agg_vars3; u8 agg_vars3;
#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 #define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF (0x3<<6) #define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_SHIFT 6 #define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
u8 agg_vars4; u8 __agg_vars4;
#define XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF (0x3<<0)
#define XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_SHIFT 0
#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF (0x3<<2)
#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_SHIFT 2
#define __XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_EN (0x1<<4)
#define __XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_EN_SHIFT 4
#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_EN (0x1<<5)
#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_EN_SHIFT 5
#define __XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_EN (0x1<<6)
#define __XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_EN_SHIFT 6
#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_EN (0x1<<7)
#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_EN_SHIFT 7
u8 cdu_reserved; u8 cdu_reserved;
#endif #endif
u32 more_to_send; u32 more_to_send;
@ -1270,8 +1356,8 @@ struct xstorm_iscsi_ag_context {
#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 #define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3) #define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3)
#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3 #define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3
#define XSTORM_ISCSI_AG_CONTEXT_AUX18_CF (0x3<<4) #define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
#define XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_SHIFT 4 #define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6) #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6 #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6
#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8) #define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8)
@ -1286,8 +1372,8 @@ struct xstorm_iscsi_ag_context {
#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13 #define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13
#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14) #define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14)
#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14 #define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14
#define __XSTORM_ISCSI_AG_CONTEXT_AUX2_FLAG (0x1<<15) #define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
#define __XSTORM_ISCSI_AG_CONTEXT_AUX2_FLAG_SHIFT 15 #define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
u8 agg_val3_th; u8 agg_val3_th;
u8 agg_vars6; u8 agg_vars6;
#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0) #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
@ -1310,8 +1396,8 @@ struct xstorm_iscsi_ag_context {
#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 #define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3) #define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3)
#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3 #define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3
#define XSTORM_ISCSI_AG_CONTEXT_AUX18_CF (0x3<<4) #define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
#define XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_SHIFT 4 #define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6) #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6 #define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6
#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8) #define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8)
@ -1326,14 +1412,14 @@ struct xstorm_iscsi_ag_context {
#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13 #define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13
#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14) #define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14)
#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14 #define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14
#define __XSTORM_ISCSI_AG_CONTEXT_AUX2_FLAG (0x1<<15) #define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
#define __XSTORM_ISCSI_AG_CONTEXT_AUX2_FLAG_SHIFT 15 #define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
#endif #endif
#if defined(__BIG_ENDIAN) #if defined(__BIG_ENDIAN)
u16 __agg_val11_th; u16 __agg_val11_th;
u16 __agg_val11; u16 __gen_data;
#elif defined(__LITTLE_ENDIAN) #elif defined(__LITTLE_ENDIAN)
u16 __agg_val11; u16 __gen_data;
u16 __agg_val11_th; u16 __agg_val11_th;
#endif #endif
#if defined(__BIG_ENDIAN) #if defined(__BIG_ENDIAN)
@ -1384,7 +1470,7 @@ struct xstorm_iscsi_ag_context {
#endif #endif
u32 hq_cons_tcp_seq; u32 hq_cons_tcp_seq;
u32 exp_stat_sn; u32 exp_stat_sn;
u32 agg_misc5; u32 rst_seq_num;
}; };
/* /*
@ -1478,12 +1564,12 @@ struct tstorm_iscsi_ag_context {
#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_CF (0x3<<4) #define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4)
#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_SHIFT 4 #define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4
#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6) #define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6)
#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6 #define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6
#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_FLAG (0x1<<7) #define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7)
#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_FLAG_SHIFT 7 #define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7
u8 state; u8 state;
#elif defined(__LITTLE_ENDIAN) #elif defined(__LITTLE_ENDIAN)
u8 state; u8 state;
@ -1496,63 +1582,63 @@ struct tstorm_iscsi_ag_context {
#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 #define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_CF (0x3<<4) #define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4)
#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_SHIFT 4 #define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4
#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6) #define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6)
#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6 #define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6
#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_FLAG (0x1<<7) #define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7)
#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_FLAG_SHIFT 7 #define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7
u16 ulp_credit; u16 ulp_credit;
#endif #endif
#if defined(__BIG_ENDIAN) #if defined(__BIG_ENDIAN)
u16 __agg_val4; u16 __agg_val4;
u16 agg_vars2; u16 agg_vars2;
#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_FLAG (0x1<<0) #define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0)
#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_FLAG_SHIFT 0 #define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0
#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_FLAG (0x1<<1) #define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1)
#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_FLAG_SHIFT 1 #define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1
#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_CF (0x3<<2) #define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2)
#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_SHIFT 2 #define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2
#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_CF (0x3<<4) #define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4)
#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_SHIFT 4 #define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4
#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6) #define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6)
#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6 #define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6
#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8) #define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8)
#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8 #define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8
#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10) #define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10)
#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10 #define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10
#define TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<11) #define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11)
#define TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 11 #define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11
#define TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN (0x1<<12) #define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12)
#define TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN_SHIFT 12 #define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12
#define TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_EN (0x1<<13) #define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13)
#define TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_EN_SHIFT 13 #define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13
#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14) #define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14 #define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15) #define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15 #define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
#elif defined(__LITTLE_ENDIAN) #elif defined(__LITTLE_ENDIAN)
u16 agg_vars2; u16 agg_vars2;
#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_FLAG (0x1<<0) #define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0)
#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_FLAG_SHIFT 0 #define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0
#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_FLAG (0x1<<1) #define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1)
#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_FLAG_SHIFT 1 #define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1
#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_CF (0x3<<2) #define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2)
#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_SHIFT 2 #define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2
#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_CF (0x3<<4) #define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4)
#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_SHIFT 4 #define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4
#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6) #define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6)
#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6 #define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6
#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8) #define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8)
#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8 #define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8
#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10) #define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10)
#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10 #define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10
#define TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<11) #define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11)
#define TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 11 #define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11
#define TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN (0x1<<12) #define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12)
#define TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN_SHIFT 12 #define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12
#define TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_EN (0x1<<13) #define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13)
#define TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_EN_SHIFT 13 #define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13
#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14) #define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14 #define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15) #define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
@ -1562,100 +1648,6 @@ struct tstorm_iscsi_ag_context {
struct tstorm_tcp_tcp_ag_context_section tcp; struct tstorm_tcp_tcp_ag_context_section tcp;
}; };
/*
* The iscsi aggregative context of Cstorm
*/
struct cstorm_iscsi_ag_context {
u32 agg_vars1;
#define CSTORM_ISCSI_AG_CONTEXT_STATE (0xFF<<0)
#define CSTORM_ISCSI_AG_CONTEXT_STATE_SHIFT 0
#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<8)
#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 8
#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<9)
#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 9
#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<10)
#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 10
#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<11)
#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 11
#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN (0x1<<12)
#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN_SHIFT 12
#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN (0x1<<13)
#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN_SHIFT 13
#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF (0x3<<14)
#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_SHIFT 14
#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66 (0x3<<16)
#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66_SHIFT 16
#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN (0x1<<18)
#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN_SHIFT 18
#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION0_CF_EN (0x1<<19)
#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION0_CF_EN_SHIFT 19
#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION1_CF_EN (0x1<<20)
#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION1_CF_EN_SHIFT 20
#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION2_CF_EN (0x1<<21)
#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION2_CF_EN_SHIFT 21
#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_EN (0x1<<22)
#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_EN_SHIFT 22
#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE (0x7<<23)
#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE_SHIFT 23
#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE (0x3<<26)
#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE_SHIFT 26
#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52 (0x3<<28)
#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52_SHIFT 28
#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53 (0x3<<30)
#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53_SHIFT 30
#if defined(__BIG_ENDIAN)
u8 __aux1_th;
u8 __aux1_val;
u16 __agg_vars2;
#elif defined(__LITTLE_ENDIAN)
u16 __agg_vars2;
u8 __aux1_val;
u8 __aux1_th;
#endif
u32 rel_seq;
u32 rel_seq_th;
#if defined(__BIG_ENDIAN)
u16 hq_cons;
u16 hq_prod;
#elif defined(__LITTLE_ENDIAN)
u16 hq_prod;
u16 hq_cons;
#endif
#if defined(__BIG_ENDIAN)
u8 __reserved62;
u8 __reserved61;
u8 __reserved60;
u8 __reserved59;
#elif defined(__LITTLE_ENDIAN)
u8 __reserved59;
u8 __reserved60;
u8 __reserved61;
u8 __reserved62;
#endif
#if defined(__BIG_ENDIAN)
u16 __reserved64;
u16 __cq_u_prod0;
#elif defined(__LITTLE_ENDIAN)
u16 __cq_u_prod0;
u16 __reserved64;
#endif
u32 __cq_u_prod1;
#if defined(__BIG_ENDIAN)
u16 __agg_vars3;
u16 __cq_u_prod2;
#elif defined(__LITTLE_ENDIAN)
u16 __cq_u_prod2;
u16 __agg_vars3;
#endif
#if defined(__BIG_ENDIAN)
u16 __aux2_th;
u16 __cq_u_prod3;
#elif defined(__LITTLE_ENDIAN)
u16 __cq_u_prod3;
u16 __aux2_th;
#endif
};
/* /*
* The iscsi aggregative context of Ustorm * The iscsi aggregative context of Ustorm
*/ */
@ -1746,8 +1738,8 @@ struct ustorm_iscsi_ag_context {
#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0 #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3) #define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3 #define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6) #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6 #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7) #define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7 #define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
u8 decision_rule_enable_bits; u8 decision_rule_enable_bits;
@ -1790,30 +1782,14 @@ struct ustorm_iscsi_ag_context {
#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0 #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3) #define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3 #define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6) #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6 #define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7) #define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7 #define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
u16 __reserved2; u16 __reserved2;
#endif #endif
}; };
/*
* Timers connection context
*/
struct iscsi_timers_block_context {
u32 __reserved_0;
u32 __reserved_1;
u32 __reserved_2;
u32 flags;
#define __ISCSI_TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0)
#define __ISCSI_TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0
#define ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2)
#define ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2
#define __ISCSI_TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3)
#define __ISCSI_TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3
};
/* /*
* Ethernet context section, shared in TOE, RDMA and ISCSI * Ethernet context section, shared in TOE, RDMA and ISCSI
*/ */
@ -1963,7 +1939,7 @@ struct xstorm_tcp_context_section {
#endif #endif
#if defined(__BIG_ENDIAN) #if defined(__BIG_ENDIAN)
u8 original_nagle_1b; u8 original_nagle_1b;
u8 ts_enabled_1b; u8 ts_enabled;
u16 tcp_params; u16 tcp_params;
#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE (0xFF<<0) #define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE (0xFF<<0)
#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE_SHIFT 0 #define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE_SHIFT 0
@ -1973,8 +1949,8 @@ struct xstorm_tcp_context_section {
#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9 #define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9
#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10) #define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10)
#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10 #define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10
#define XSTORM_TCP_CONTEXT_SECTION_KA_STATE (0x1<<11) #define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV (0x1<<11)
#define XSTORM_TCP_CONTEXT_SECTION_KA_STATE_SHIFT 11 #define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV_SHIFT 11
#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12) #define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12)
#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12 #define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12
#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13) #define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13)
@ -1991,15 +1967,15 @@ struct xstorm_tcp_context_section {
#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9 #define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9
#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10) #define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10)
#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10 #define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10
#define XSTORM_TCP_CONTEXT_SECTION_KA_STATE (0x1<<11) #define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV (0x1<<11)
#define XSTORM_TCP_CONTEXT_SECTION_KA_STATE_SHIFT 11 #define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV_SHIFT 11
#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12) #define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12)
#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12 #define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12
#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13) #define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13)
#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED_SHIFT 13 #define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED_SHIFT 13
#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER (0x3<<14) #define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER (0x3<<14)
#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER_SHIFT 14 #define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER_SHIFT 14
u8 ts_enabled_1b; u8 ts_enabled;
u8 original_nagle_1b; u8 original_nagle_1b;
#endif #endif
#if defined(__BIG_ENDIAN) #if defined(__BIG_ENDIAN)
@ -2030,8 +2006,8 @@ struct xstorm_common_context_section {
#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1 #define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1
#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID (0x1F<<2) #define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID (0x1F<<2)
#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID_SHIFT 2 #define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID_SHIFT 2
#define XSTORM_COMMON_CONTEXT_SECTION_RESERVED0 (0x1<<7) #define XSTORM_COMMON_CONTEXT_SECTION_DCB_EXISTS (0x1<<7)
#define XSTORM_COMMON_CONTEXT_SECTION_RESERVED0_SHIFT 7 #define XSTORM_COMMON_CONTEXT_SECTION_DCB_EXISTS_SHIFT 7
u8 ip_version_1b; u8 ip_version_1b;
#elif defined(__LITTLE_ENDIAN) #elif defined(__LITTLE_ENDIAN)
u8 ip_version_1b; u8 ip_version_1b;
@ -2042,8 +2018,8 @@ struct xstorm_common_context_section {
#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1 #define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1
#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID (0x1F<<2) #define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID (0x1F<<2)
#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID_SHIFT 2 #define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID_SHIFT 2
#define XSTORM_COMMON_CONTEXT_SECTION_RESERVED0 (0x1<<7) #define XSTORM_COMMON_CONTEXT_SECTION_DCB_EXISTS (0x1<<7)
#define XSTORM_COMMON_CONTEXT_SECTION_RESERVED0_SHIFT 7 #define XSTORM_COMMON_CONTEXT_SECTION_DCB_EXISTS_SHIFT 7
u16 reserved; u16 reserved;
#endif #endif
}; };
@ -2284,7 +2260,7 @@ struct iscsi_context {
struct tstorm_iscsi_ag_context tstorm_ag_context; struct tstorm_iscsi_ag_context tstorm_ag_context;
struct cstorm_iscsi_ag_context cstorm_ag_context; struct cstorm_iscsi_ag_context cstorm_ag_context;
struct ustorm_iscsi_ag_context ustorm_ag_context; struct ustorm_iscsi_ag_context ustorm_ag_context;
struct iscsi_timers_block_context timers_context; struct timers_block_context timers_context;
struct regpair upb_context; struct regpair upb_context;
struct xstorm_iscsi_st_context xstorm_st_context; struct xstorm_iscsi_st_context xstorm_st_context;
struct regpair xpb_context; struct regpair xpb_context;
@ -2434,16 +2410,16 @@ struct l5cm_packet_size {
* l5cm connection parameters * l5cm connection parameters
*/ */
union l5cm_reduce_param_union { union l5cm_reduce_param_union {
u32 passive_side_scramble_key; u32 opaque1;
u32 pcs_id; u32 opaque2;
}; };
/* /*
* l5cm connection parameters * l5cm connection parameters
*/ */
struct l5cm_reduce_conn { struct l5cm_reduce_conn {
union l5cm_reduce_param_union param; union l5cm_reduce_param_union opaque1;
u32 isn; u32 opaque2;
}; };
/* /*

Просмотреть файл

@ -138,6 +138,7 @@ struct cnic_irq {
unsigned int vector; unsigned int vector;
void *status_blk; void *status_blk;
u32 status_blk_num; u32 status_blk_num;
u32 status_blk_num2;
u32 irq_flags; u32 irq_flags;
#define CNIC_IRQ_FL_MSIX 0x00000001 #define CNIC_IRQ_FL_MSIX 0x00000001
}; };
@ -152,6 +153,7 @@ struct cnic_eth_dev {
struct pci_dev *pdev; struct pci_dev *pdev;
void __iomem *io_base; void __iomem *io_base;
void __iomem *io_base2; void __iomem *io_base2;
void *iro_arr;
u32 ctx_tbl_offset; u32 ctx_tbl_offset;
u32 ctx_tbl_len; u32 ctx_tbl_len;

Просмотреть файл

@ -58,6 +58,8 @@
#define MAX_PAGES_PER_CTRL_STRUCT_POOL 8 #define MAX_PAGES_PER_CTRL_STRUCT_POOL 8
#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4 #define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4
#define BNX2I_5771X_DBELL_PAGE_SIZE 128
/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */ /* 5706/08 hardware has limit on maximum buffer size per BD it can handle */
#define MAX_BD_LENGTH 65535 #define MAX_BD_LENGTH 65535
#define BD_SPLIT_SIZE 32768 #define BD_SPLIT_SIZE 32768

Просмотреть файл

@ -2405,7 +2405,8 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
reg_base = pci_resource_start(ep->hba->pcidev, reg_base = pci_resource_start(ep->hba->pcidev,
BNX2X_DOORBELL_PCI_BAR); BNX2X_DOORBELL_PCI_BAR);
reg_off = PAGE_SIZE * (cid_num & 0x1FFFF) + DPM_TRIGER_TYPE; reg_off = BNX2I_5771X_DBELL_PAGE_SIZE * (cid_num & 0x1FFFF) +
DPM_TRIGER_TYPE;
ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4); ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
goto arm_cq; goto arm_cq;
} }

Просмотреть файл

@ -32,8 +32,8 @@ fw-shipped-$(CONFIG_ADAPTEC_STARFIRE) += adaptec/starfire_rx.bin \
adaptec/starfire_tx.bin adaptec/starfire_tx.bin
fw-shipped-$(CONFIG_ATARI_DSP56K) += dsp56k/bootstrap.bin fw-shipped-$(CONFIG_ATARI_DSP56K) += dsp56k/bootstrap.bin
fw-shipped-$(CONFIG_ATM_AMBASSADOR) += atmsar11.fw fw-shipped-$(CONFIG_ATM_AMBASSADOR) += atmsar11.fw
fw-shipped-$(CONFIG_BNX2X) += bnx2x/bnx2x-e1-5.2.13.0.fw \ fw-shipped-$(CONFIG_BNX2X) += bnx2x/bnx2x-e1-6.0.34.0.fw \
bnx2x/bnx2x-e1h-5.2.13.0.fw bnx2x/bnx2x-e1h-6.0.34.0.fw
fw-shipped-$(CONFIG_BNX2) += bnx2/bnx2-mips-09-5.0.0.j15.fw \ fw-shipped-$(CONFIG_BNX2) += bnx2/bnx2-mips-09-5.0.0.j15.fw \
bnx2/bnx2-rv2p-09-5.0.0.j10.fw \ bnx2/bnx2-rv2p-09-5.0.0.j10.fw \
bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw \ bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw \