bnx2x: Remove the sriov VFOP mechanism
Since we now posses a workqueue dedicated for sriov, the paradigm that sriov- related tasks cannot sleep is no longer correct. The VFOP mechanism was the one previously supporting said paradigm - the sriov related tasks were broken into segments which did not require sleep, and the mechanism re-scheduled the next segment whenever possible. This patch remvoes the VFOP mechanism altogether - the resulting code is a much easier to follow code; The segments are gathered into straight-forward functions which sleep whenever neccessary. Signed-off-by: Yuval Mintz <Yuval.Mintz@qlogic.com> Signed-off-by: Ariel Elior <Ariel.Elior@qlogic.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Родитель
370d4a2659
Коммит
2dc33bbc4f
|
@ -1414,7 +1414,6 @@ enum sp_rtnl_flag {
|
|||
|
||||
enum bnx2x_iov_flag {
|
||||
BNX2X_IOV_HANDLE_VF_MSG,
|
||||
BNX2X_IOV_CONT_VFOP,
|
||||
BNX2X_IOV_HANDLE_FLR,
|
||||
};
|
||||
|
||||
|
|
|
@ -1857,8 +1857,6 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
|
|||
#else
|
||||
return;
|
||||
#endif
|
||||
/* SRIOV: reschedule any 'in_progress' operations */
|
||||
bnx2x_iov_sp_event(bp, cid);
|
||||
|
||||
smp_mb__before_atomic_inc();
|
||||
atomic_inc(&bp->cq_spq_left);
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -88,113 +88,32 @@ struct bnx2x_vf_queue {
|
|||
bool sp_initialized;
|
||||
};
|
||||
|
||||
/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters:
|
||||
* q-init, q-setup and SB index
|
||||
/* struct bnx2x_vf_queue_construct_params - prepare queue construction
|
||||
* parameters: q-init, q-setup and SB index
|
||||
*/
|
||||
struct bnx2x_vfop_qctor_params {
|
||||
struct bnx2x_vf_queue_construct_params {
|
||||
struct bnx2x_queue_state_params qstate;
|
||||
struct bnx2x_queue_setup_params prep_qsetup;
|
||||
};
|
||||
|
||||
/* VFOP parameters (one copy per VF) */
|
||||
union bnx2x_vfop_params {
|
||||
struct bnx2x_vlan_mac_ramrod_params vlan_mac;
|
||||
struct bnx2x_rx_mode_ramrod_params rx_mode;
|
||||
struct bnx2x_mcast_ramrod_params mcast;
|
||||
struct bnx2x_config_rss_params rss;
|
||||
struct bnx2x_vfop_qctor_params qctor;
|
||||
struct bnx2x_queue_state_params qstate;
|
||||
};
|
||||
|
||||
/* forward */
|
||||
struct bnx2x_virtf;
|
||||
|
||||
/* VFOP definitions */
|
||||
typedef void (*vfop_handler_t)(struct bnx2x *bp, struct bnx2x_virtf *vf);
|
||||
|
||||
struct bnx2x_vfop_cmd {
|
||||
vfop_handler_t done;
|
||||
bool block;
|
||||
};
|
||||
|
||||
/* VFOP queue filters command additional arguments */
|
||||
struct bnx2x_vfop_filter {
|
||||
struct list_head link;
|
||||
struct bnx2x_vf_mac_vlan_filter {
|
||||
int type;
|
||||
#define BNX2X_VFOP_FILTER_MAC 1
|
||||
#define BNX2X_VFOP_FILTER_VLAN 2
|
||||
#define BNX2X_VF_FILTER_MAC 1
|
||||
#define BNX2X_VF_FILTER_VLAN 2
|
||||
|
||||
bool add;
|
||||
u8 *mac;
|
||||
u16 vid;
|
||||
};
|
||||
|
||||
struct bnx2x_vfop_filters {
|
||||
int add_cnt;
|
||||
struct list_head head;
|
||||
struct bnx2x_vfop_filter filters[];
|
||||
};
|
||||
|
||||
/* transient list allocated, built and saved until its
|
||||
* passed to the SP-VERBs layer.
|
||||
*/
|
||||
struct bnx2x_vfop_args_mcast {
|
||||
int mc_num;
|
||||
struct bnx2x_mcast_list_elem *mc;
|
||||
};
|
||||
|
||||
struct bnx2x_vfop_args_qctor {
|
||||
int qid;
|
||||
u16 sb_idx;
|
||||
};
|
||||
|
||||
struct bnx2x_vfop_args_qdtor {
|
||||
int qid;
|
||||
struct eth_context *cxt;
|
||||
};
|
||||
|
||||
struct bnx2x_vfop_args_defvlan {
|
||||
int qid;
|
||||
bool enable;
|
||||
u16 vid;
|
||||
u8 prio;
|
||||
};
|
||||
|
||||
struct bnx2x_vfop_args_qx {
|
||||
int qid;
|
||||
bool en_add;
|
||||
};
|
||||
|
||||
struct bnx2x_vfop_args_filters {
|
||||
struct bnx2x_vfop_filters *multi_filter;
|
||||
atomic_t *credit; /* non NULL means 'don't consume credit' */
|
||||
};
|
||||
|
||||
struct bnx2x_vfop_args_tpa {
|
||||
int qid;
|
||||
dma_addr_t sge_map[PFVF_MAX_QUEUES_PER_VF];
|
||||
};
|
||||
|
||||
union bnx2x_vfop_args {
|
||||
struct bnx2x_vfop_args_mcast mc_list;
|
||||
struct bnx2x_vfop_args_qctor qctor;
|
||||
struct bnx2x_vfop_args_qdtor qdtor;
|
||||
struct bnx2x_vfop_args_defvlan defvlan;
|
||||
struct bnx2x_vfop_args_qx qx;
|
||||
struct bnx2x_vfop_args_filters filters;
|
||||
struct bnx2x_vfop_args_tpa tpa;
|
||||
};
|
||||
|
||||
struct bnx2x_vfop {
|
||||
struct list_head link;
|
||||
int rc; /* return code */
|
||||
int state; /* next state */
|
||||
union bnx2x_vfop_args args; /* extra arguments */
|
||||
union bnx2x_vfop_params *op_p; /* ramrod params */
|
||||
|
||||
/* state machine callbacks */
|
||||
vfop_handler_t transition;
|
||||
vfop_handler_t done;
|
||||
struct bnx2x_vf_mac_vlan_filters {
|
||||
int count;
|
||||
struct bnx2x_vf_mac_vlan_filter filters[];
|
||||
};
|
||||
|
||||
/* vf context */
|
||||
|
@ -214,15 +133,7 @@ struct bnx2x_virtf {
|
|||
#define VF_ENABLED 2 /* VF Enabled */
|
||||
#define VF_RESET 3 /* VF FLR'd, pending cleanup */
|
||||
|
||||
/* non 0 during flr cleanup */
|
||||
u8 flr_clnup_stage;
|
||||
#define VF_FLR_CLN 1 /* reclaim resources and do 'final cleanup'
|
||||
* sans the end-wait
|
||||
*/
|
||||
#define VF_FLR_ACK 2 /* ACK flr notification */
|
||||
#define VF_FLR_EPILOG 3 /* wait for VF remnants to dissipate in the HW
|
||||
* ~ final cleanup' end wait
|
||||
*/
|
||||
bool flr_clnup_stage; /* true during flr cleanup */
|
||||
|
||||
/* dma */
|
||||
dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */
|
||||
|
@ -286,11 +197,6 @@ struct bnx2x_virtf {
|
|||
struct bnx2x_rss_config_obj rss_conf_obj;
|
||||
|
||||
/* slow-path operations */
|
||||
atomic_t op_in_progress;
|
||||
int op_rc;
|
||||
bool op_wait_blocking;
|
||||
struct list_head op_list_head;
|
||||
union bnx2x_vfop_params op_params;
|
||||
struct mutex op_mutex; /* one vfop at a time mutex */
|
||||
enum channel_tlvs op_current;
|
||||
};
|
||||
|
@ -477,7 +383,6 @@ void bnx2x_iov_init_dq(struct bnx2x *bp);
|
|||
void bnx2x_iov_init_dmae(struct bnx2x *bp);
|
||||
void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
|
||||
struct bnx2x_queue_sp_obj **q_obj);
|
||||
void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid);
|
||||
int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem);
|
||||
void bnx2x_iov_adjust_stats_req(struct bnx2x *bp);
|
||||
void bnx2x_iov_storm_stats_update(struct bnx2x *bp);
|
||||
|
@ -497,163 +402,6 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
|||
int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
||||
dma_addr_t *sb_map);
|
||||
|
||||
/* VFOP generic helpers */
|
||||
#define bnx2x_vfop_default(state) do { \
|
||||
BNX2X_ERR("Bad state %d\n", (state)); \
|
||||
vfop->rc = -EINVAL; \
|
||||
goto op_err; \
|
||||
} while (0)
|
||||
|
||||
enum {
|
||||
VFOP_DONE,
|
||||
VFOP_CONT,
|
||||
VFOP_VERIFY_PEND,
|
||||
};
|
||||
|
||||
#define bnx2x_vfop_finalize(vf, rc, next) do { \
|
||||
if ((rc) < 0) \
|
||||
goto op_err; \
|
||||
else if ((rc) > 0) \
|
||||
goto op_pending; \
|
||||
else if ((next) == VFOP_DONE) \
|
||||
goto op_done; \
|
||||
else if ((next) == VFOP_VERIFY_PEND) \
|
||||
BNX2X_ERR("expected pending\n"); \
|
||||
else { \
|
||||
DP(BNX2X_MSG_IOV, "no ramrod. Scheduling\n"); \
|
||||
atomic_set(&vf->op_in_progress, 1); \
|
||||
bnx2x_schedule_iov_task(bp, \
|
||||
BNX2X_IOV_CONT_VFOP); \
|
||||
return; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define bnx2x_vfop_opset(first_state, trans_hndlr, done_hndlr) \
|
||||
do { \
|
||||
vfop->state = first_state; \
|
||||
vfop->op_p = &vf->op_params; \
|
||||
vfop->transition = trans_hndlr; \
|
||||
vfop->done = done_hndlr; \
|
||||
} while (0)
|
||||
|
||||
static inline struct bnx2x_vfop *bnx2x_vfop_cur(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf)
|
||||
{
|
||||
WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!");
|
||||
WARN_ON(list_empty(&vf->op_list_head));
|
||||
return list_first_entry(&vf->op_list_head, struct bnx2x_vfop, link);
|
||||
}
|
||||
|
||||
static inline struct bnx2x_vfop *bnx2x_vfop_add(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf)
|
||||
{
|
||||
struct bnx2x_vfop *vfop = kzalloc(sizeof(*vfop), GFP_KERNEL);
|
||||
|
||||
WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!");
|
||||
if (vfop) {
|
||||
INIT_LIST_HEAD(&vfop->link);
|
||||
list_add(&vfop->link, &vf->op_list_head);
|
||||
}
|
||||
return vfop;
|
||||
}
|
||||
|
||||
static inline void bnx2x_vfop_end(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
||||
struct bnx2x_vfop *vfop)
|
||||
{
|
||||
/* rc < 0 - error, otherwise set to 0 */
|
||||
DP(BNX2X_MSG_IOV, "rc was %d\n", vfop->rc);
|
||||
if (vfop->rc >= 0)
|
||||
vfop->rc = 0;
|
||||
DP(BNX2X_MSG_IOV, "rc is now %d\n", vfop->rc);
|
||||
|
||||
/* unlink the current op context and propagate error code
|
||||
* must be done before invoking the 'done()' handler
|
||||
*/
|
||||
WARN(!mutex_is_locked(&vf->op_mutex),
|
||||
"about to access vf op linked list but mutex was not locked!");
|
||||
list_del(&vfop->link);
|
||||
|
||||
if (list_empty(&vf->op_list_head)) {
|
||||
DP(BNX2X_MSG_IOV, "list was empty %d\n", vfop->rc);
|
||||
vf->op_rc = vfop->rc;
|
||||
DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n",
|
||||
vf->op_rc, vfop->rc);
|
||||
} else {
|
||||
struct bnx2x_vfop *cur_vfop;
|
||||
|
||||
DP(BNX2X_MSG_IOV, "list not empty %d\n", vfop->rc);
|
||||
cur_vfop = bnx2x_vfop_cur(bp, vf);
|
||||
cur_vfop->rc = vfop->rc;
|
||||
DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n",
|
||||
vf->op_rc, vfop->rc);
|
||||
}
|
||||
|
||||
/* invoke done handler */
|
||||
if (vfop->done) {
|
||||
DP(BNX2X_MSG_IOV, "calling done handler\n");
|
||||
vfop->done(bp, vf);
|
||||
} else {
|
||||
/* there is no done handler for the operation to unlock
|
||||
* the mutex. Must have gotten here from PF initiated VF RELEASE
|
||||
*/
|
||||
bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
|
||||
}
|
||||
|
||||
DP(BNX2X_MSG_IOV, "done handler complete. vf->op_rc %d, vfop->rc %d\n",
|
||||
vf->op_rc, vfop->rc);
|
||||
|
||||
/* if this is the last nested op reset the wait_blocking flag
|
||||
* to release any blocking wrappers, only after 'done()' is invoked
|
||||
*/
|
||||
if (list_empty(&vf->op_list_head)) {
|
||||
DP(BNX2X_MSG_IOV, "list was empty after done %d\n", vfop->rc);
|
||||
vf->op_wait_blocking = false;
|
||||
}
|
||||
|
||||
kfree(vfop);
|
||||
}
|
||||
|
||||
static inline int bnx2x_vfop_wait_blocking(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf)
|
||||
{
|
||||
/* can take a while if any port is running */
|
||||
int cnt = 5000;
|
||||
|
||||
might_sleep();
|
||||
while (cnt--) {
|
||||
if (vf->op_wait_blocking == false) {
|
||||
#ifdef BNX2X_STOP_ON_ERROR
|
||||
DP(BNX2X_MSG_IOV, "exit (cnt %d)\n", 5000 - cnt);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
usleep_range(1000, 2000);
|
||||
|
||||
if (bp->panic)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* timeout! */
|
||||
#ifdef BNX2X_STOP_ON_ERROR
|
||||
bnx2x_panic();
|
||||
#endif
|
||||
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static inline int bnx2x_vfop_transition(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf,
|
||||
vfop_handler_t transition,
|
||||
bool block)
|
||||
{
|
||||
if (block)
|
||||
vf->op_wait_blocking = true;
|
||||
transition(bp, vf);
|
||||
if (block)
|
||||
return bnx2x_vfop_wait_blocking(bp, vf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* VFOP queue construction helpers */
|
||||
void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
||||
struct bnx2x_queue_init_params *init_params,
|
||||
|
@ -668,64 +416,41 @@ void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
|||
void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf,
|
||||
struct bnx2x_vf_queue *q,
|
||||
struct bnx2x_vfop_qctor_params *p,
|
||||
struct bnx2x_vf_queue_construct_params *p,
|
||||
unsigned long q_type);
|
||||
int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf,
|
||||
struct bnx2x_vfop_cmd *cmd,
|
||||
struct bnx2x_vfop_filters *macs,
|
||||
int qid, bool drv_only);
|
||||
|
||||
int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf,
|
||||
struct bnx2x_vfop_cmd *cmd,
|
||||
struct bnx2x_vfop_filters *vlans,
|
||||
int qid, bool drv_only);
|
||||
int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
||||
struct bnx2x_vf_mac_vlan_filters *filters,
|
||||
int qid, bool drv_only);
|
||||
|
||||
int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf,
|
||||
struct bnx2x_vfop_cmd *cmd,
|
||||
int qid);
|
||||
int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
|
||||
struct bnx2x_vf_queue_construct_params *qctor);
|
||||
|
||||
int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf,
|
||||
struct bnx2x_vfop_cmd *cmd,
|
||||
int qid);
|
||||
int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid);
|
||||
|
||||
int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf,
|
||||
struct bnx2x_vfop_cmd *cmd,
|
||||
bnx2x_mac_addr_t *mcasts,
|
||||
int mcast_num, bool drv_only);
|
||||
int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
||||
bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only);
|
||||
|
||||
int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf,
|
||||
struct bnx2x_vfop_cmd *cmd,
|
||||
int qid, unsigned long accept_flags);
|
||||
int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
||||
int qid, unsigned long accept_flags);
|
||||
|
||||
int bnx2x_vfop_close_cmd(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf,
|
||||
struct bnx2x_vfop_cmd *cmd);
|
||||
int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf);
|
||||
|
||||
int bnx2x_vfop_release_cmd(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf,
|
||||
struct bnx2x_vfop_cmd *cmd);
|
||||
int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf);
|
||||
|
||||
int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf,
|
||||
struct bnx2x_vfop_cmd *cmd);
|
||||
int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
||||
struct bnx2x_config_rss_params *rss);
|
||||
|
||||
int bnx2x_vfop_tpa_cmd(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf,
|
||||
struct bnx2x_vfop_cmd *cmd,
|
||||
struct vfpf_tpa_tlv *tpa_tlv);
|
||||
int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
||||
struct vfpf_tpa_tlv *tlv,
|
||||
struct bnx2x_queue_update_tpa_params *params);
|
||||
|
||||
/* VF release ~ VF close + VF release-resources
|
||||
*
|
||||
* Release is the ultimate SW shutdown and is called whenever an
|
||||
* irrecoverable error is encountered.
|
||||
*/
|
||||
void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block);
|
||||
int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf);
|
||||
int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid);
|
||||
u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf);
|
||||
|
||||
|
@ -796,7 +521,6 @@ void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag);
|
|||
|
||||
static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
|
||||
struct bnx2x_queue_sp_obj **q_obj) {}
|
||||
static inline void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid) {}
|
||||
static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {}
|
||||
static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp,
|
||||
union event_ring_elem *elem) {return 1; }
|
||||
|
|
|
@ -673,6 +673,7 @@ static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
|
|||
|
||||
out:
|
||||
bnx2x_vfpf_finalize(bp, &req->first_tlv);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -1048,7 +1049,8 @@ static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp,
|
|||
}
|
||||
|
||||
static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf)
|
||||
struct bnx2x_virtf *vf,
|
||||
int vf_rc)
|
||||
{
|
||||
struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
|
||||
struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
|
||||
|
@ -1060,7 +1062,7 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
|
|||
DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
|
||||
mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
|
||||
|
||||
resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
|
||||
resp->hdr.status = bnx2x_pfvf_status_codes(vf_rc);
|
||||
|
||||
/* send response */
|
||||
vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
|
||||
|
@ -1108,14 +1110,15 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
|
|||
return;
|
||||
|
||||
mbx_error:
|
||||
bnx2x_vf_release(bp, vf, false); /* non blocking */
|
||||
bnx2x_vf_release(bp, vf);
|
||||
}
|
||||
|
||||
static void bnx2x_vf_mbx_resp(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf)
|
||||
struct bnx2x_virtf *vf,
|
||||
int rc)
|
||||
{
|
||||
bnx2x_vf_mbx_resp_single_tlv(bp, vf);
|
||||
bnx2x_vf_mbx_resp_send_msg(bp, vf);
|
||||
bnx2x_vf_mbx_resp_send_msg(bp, vf, rc);
|
||||
}
|
||||
|
||||
static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
|
||||
|
@ -1239,8 +1242,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
|||
sizeof(struct channel_list_end_tlv));
|
||||
|
||||
/* send the response */
|
||||
vf->op_rc = vfop_status;
|
||||
bnx2x_vf_mbx_resp_send_msg(bp, vf);
|
||||
bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status);
|
||||
}
|
||||
|
||||
static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
||||
|
@ -1272,19 +1274,20 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
|||
struct bnx2x_vf_mbx *mbx)
|
||||
{
|
||||
struct vfpf_init_tlv *init = &mbx->msg->req.init;
|
||||
int rc;
|
||||
|
||||
/* record ghost addresses from vf message */
|
||||
vf->spq_map = init->spq_addr;
|
||||
vf->fw_stat_map = init->stats_addr;
|
||||
vf->stats_stride = init->stats_stride;
|
||||
vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
|
||||
rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
|
||||
|
||||
/* set VF multiqueue statistics collection mode */
|
||||
if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
|
||||
vf->cfg_flags |= VF_CFG_STATS_COALESCE;
|
||||
|
||||
/* response */
|
||||
bnx2x_vf_mbx_resp(bp, vf);
|
||||
bnx2x_vf_mbx_resp(bp, vf, rc);
|
||||
}
|
||||
|
||||
/* convert MBX queue-flags to standard SP queue-flags */
|
||||
|
@ -1319,16 +1322,14 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
|||
struct bnx2x_vf_mbx *mbx)
|
||||
{
|
||||
struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
|
||||
struct bnx2x_vfop_cmd cmd = {
|
||||
.done = bnx2x_vf_mbx_resp,
|
||||
.block = false,
|
||||
};
|
||||
struct bnx2x_vf_queue_construct_params qctor;
|
||||
int rc = 0;
|
||||
|
||||
/* verify vf_qid */
|
||||
if (setup_q->vf_qid >= vf_rxq_count(vf)) {
|
||||
BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
|
||||
setup_q->vf_qid, vf_rxq_count(vf));
|
||||
vf->op_rc = -EINVAL;
|
||||
rc = -EINVAL;
|
||||
goto response;
|
||||
}
|
||||
|
||||
|
@ -1346,9 +1347,10 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
|||
bnx2x_leading_vfq_init(bp, vf, q);
|
||||
|
||||
/* re-init the VF operation context */
|
||||
memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
|
||||
setup_p = &vf->op_params.qctor.prep_qsetup;
|
||||
init_p = &vf->op_params.qctor.qstate.params.init;
|
||||
memset(&qctor, 0 ,
|
||||
sizeof(struct bnx2x_vf_queue_construct_params));
|
||||
setup_p = &qctor.prep_qsetup;
|
||||
init_p = &qctor.qstate.params.init;
|
||||
|
||||
/* activate immediately */
|
||||
__set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
|
||||
|
@ -1434,44 +1436,34 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
|||
q->index, q->sb_idx);
|
||||
}
|
||||
/* complete the preparations */
|
||||
bnx2x_vfop_qctor_prep(bp, vf, q, &vf->op_params.qctor, q_type);
|
||||
bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type);
|
||||
|
||||
vf->op_rc = bnx2x_vfop_qsetup_cmd(bp, vf, &cmd, q->index);
|
||||
if (vf->op_rc)
|
||||
rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor);
|
||||
if (rc)
|
||||
goto response;
|
||||
return;
|
||||
}
|
||||
response:
|
||||
bnx2x_vf_mbx_resp(bp, vf);
|
||||
bnx2x_vf_mbx_resp(bp, vf, rc);
|
||||
}
|
||||
|
||||
enum bnx2x_vfop_filters_state {
|
||||
BNX2X_VFOP_MBX_Q_FILTERS_MACS,
|
||||
BNX2X_VFOP_MBX_Q_FILTERS_VLANS,
|
||||
BNX2X_VFOP_MBX_Q_FILTERS_RXMODE,
|
||||
BNX2X_VFOP_MBX_Q_FILTERS_MCAST,
|
||||
BNX2X_VFOP_MBX_Q_FILTERS_DONE
|
||||
};
|
||||
|
||||
static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf,
|
||||
struct vfpf_set_q_filters_tlv *tlv,
|
||||
struct bnx2x_vfop_filters **pfl,
|
||||
struct bnx2x_vf_mac_vlan_filters **pfl,
|
||||
u32 type_flag)
|
||||
{
|
||||
int i, j;
|
||||
struct bnx2x_vfop_filters *fl = NULL;
|
||||
struct bnx2x_vf_mac_vlan_filters *fl = NULL;
|
||||
size_t fsz;
|
||||
|
||||
fsz = tlv->n_mac_vlan_filters * sizeof(struct bnx2x_vfop_filter) +
|
||||
sizeof(struct bnx2x_vfop_filters);
|
||||
fsz = tlv->n_mac_vlan_filters *
|
||||
sizeof(struct bnx2x_vf_mac_vlan_filter) +
|
||||
sizeof(struct bnx2x_vf_mac_vlan_filters);
|
||||
|
||||
fl = kzalloc(fsz, GFP_KERNEL);
|
||||
if (!fl)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&fl->head);
|
||||
|
||||
for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
|
||||
struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
|
||||
|
||||
|
@ -1479,17 +1471,17 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
|
|||
continue;
|
||||
if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
|
||||
fl->filters[j].mac = msg_filter->mac;
|
||||
fl->filters[j].type = BNX2X_VFOP_FILTER_MAC;
|
||||
fl->filters[j].type = BNX2X_VF_FILTER_MAC;
|
||||
} else {
|
||||
fl->filters[j].vid = msg_filter->vlan_tag;
|
||||
fl->filters[j].type = BNX2X_VFOP_FILTER_VLAN;
|
||||
fl->filters[j].type = BNX2X_VF_FILTER_VLAN;
|
||||
}
|
||||
fl->filters[j].add =
|
||||
(msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
|
||||
true : false;
|
||||
list_add_tail(&fl->filters[j++].link, &fl->head);
|
||||
fl->count++;
|
||||
}
|
||||
if (list_empty(&fl->head))
|
||||
if (!fl->count)
|
||||
kfree(fl);
|
||||
else
|
||||
*pfl = fl;
|
||||
|
@ -1529,168 +1521,97 @@ static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
|
|||
#define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID
|
||||
#define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID
|
||||
|
||||
static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
||||
static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
||||
{
|
||||
int rc;
|
||||
int rc = 0;
|
||||
|
||||
struct vfpf_set_q_filters_tlv *msg =
|
||||
&BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
|
||||
|
||||
struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
|
||||
enum bnx2x_vfop_filters_state state = vfop->state;
|
||||
/* check for any mac/vlan changes */
|
||||
if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
|
||||
/* build mac list */
|
||||
struct bnx2x_vf_mac_vlan_filters *fl = NULL;
|
||||
|
||||
struct bnx2x_vfop_cmd cmd = {
|
||||
.done = bnx2x_vfop_mbx_qfilters,
|
||||
.block = false,
|
||||
};
|
||||
rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
|
||||
VFPF_MAC_FILTER);
|
||||
if (rc)
|
||||
goto op_err;
|
||||
|
||||
DP(BNX2X_MSG_IOV, "STATE: %d\n", state);
|
||||
if (fl) {
|
||||
|
||||
if (vfop->rc < 0)
|
||||
goto op_err;
|
||||
|
||||
switch (state) {
|
||||
case BNX2X_VFOP_MBX_Q_FILTERS_MACS:
|
||||
/* next state */
|
||||
vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_VLANS;
|
||||
|
||||
/* check for any vlan/mac changes */
|
||||
if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
|
||||
/* build mac list */
|
||||
struct bnx2x_vfop_filters *fl = NULL;
|
||||
|
||||
vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
|
||||
VFPF_MAC_FILTER);
|
||||
if (vfop->rc)
|
||||
/* set mac list */
|
||||
rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
|
||||
msg->vf_qid,
|
||||
false);
|
||||
if (rc)
|
||||
goto op_err;
|
||||
|
||||
if (fl) {
|
||||
/* set mac list */
|
||||
rc = bnx2x_vfop_mac_list_cmd(bp, vf, &cmd, fl,
|
||||
msg->vf_qid,
|
||||
false);
|
||||
if (rc) {
|
||||
vfop->rc = rc;
|
||||
goto op_err;
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
/* fall through */
|
||||
|
||||
case BNX2X_VFOP_MBX_Q_FILTERS_VLANS:
|
||||
/* next state */
|
||||
vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_RXMODE;
|
||||
/* build vlan list */
|
||||
fl = NULL;
|
||||
|
||||
/* check for any vlan/mac changes */
|
||||
if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
|
||||
/* build vlan list */
|
||||
struct bnx2x_vfop_filters *fl = NULL;
|
||||
rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
|
||||
VFPF_VLAN_FILTER);
|
||||
if (rc)
|
||||
goto op_err;
|
||||
|
||||
vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
|
||||
VFPF_VLAN_FILTER);
|
||||
if (vfop->rc)
|
||||
if (fl) {
|
||||
/* set vlan list */
|
||||
rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
|
||||
msg->vf_qid,
|
||||
false);
|
||||
if (rc)
|
||||
goto op_err;
|
||||
|
||||
if (fl) {
|
||||
/* set vlan list */
|
||||
rc = bnx2x_vfop_vlan_list_cmd(bp, vf, &cmd, fl,
|
||||
msg->vf_qid,
|
||||
false);
|
||||
if (rc) {
|
||||
vfop->rc = rc;
|
||||
goto op_err;
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
/* fall through */
|
||||
}
|
||||
|
||||
case BNX2X_VFOP_MBX_Q_FILTERS_RXMODE:
|
||||
/* next state */
|
||||
vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_MCAST;
|
||||
if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
|
||||
unsigned long accept = 0;
|
||||
struct pf_vf_bulletin_content *bulletin =
|
||||
BP_VF_BULLETIN(bp, vf->index);
|
||||
|
||||
if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
|
||||
unsigned long accept = 0;
|
||||
struct pf_vf_bulletin_content *bulletin =
|
||||
BP_VF_BULLETIN(bp, vf->index);
|
||||
/* covert VF-PF if mask to bnx2x accept flags */
|
||||
if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
|
||||
__set_bit(BNX2X_ACCEPT_UNICAST, &accept);
|
||||
|
||||
/* covert VF-PF if mask to bnx2x accept flags */
|
||||
if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
|
||||
__set_bit(BNX2X_ACCEPT_UNICAST, &accept);
|
||||
if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST)
|
||||
__set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
|
||||
|
||||
if (msg->rx_mask &
|
||||
VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST)
|
||||
__set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
|
||||
if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST)
|
||||
__set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept);
|
||||
|
||||
if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST)
|
||||
__set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept);
|
||||
if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST)
|
||||
__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept);
|
||||
|
||||
if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST)
|
||||
__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept);
|
||||
if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST)
|
||||
__set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
|
||||
|
||||
if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST)
|
||||
__set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
|
||||
/* A packet arriving the vf's mac should be accepted
|
||||
* with any vlan, unless a vlan has already been
|
||||
* configured.
|
||||
*/
|
||||
if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
|
||||
__set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
|
||||
|
||||
/* A packet arriving the vf's mac should be accepted
|
||||
* with any vlan, unless a vlan has already been
|
||||
* configured.
|
||||
*/
|
||||
if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
|
||||
__set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
|
||||
/* set rx-mode */
|
||||
rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept);
|
||||
if (rc)
|
||||
goto op_err;
|
||||
}
|
||||
|
||||
/* set rx-mode */
|
||||
rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
|
||||
msg->vf_qid, accept);
|
||||
if (rc) {
|
||||
vfop->rc = rc;
|
||||
goto op_err;
|
||||
}
|
||||
return;
|
||||
}
|
||||
/* fall through */
|
||||
|
||||
case BNX2X_VFOP_MBX_Q_FILTERS_MCAST:
|
||||
/* next state */
|
||||
vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_DONE;
|
||||
|
||||
if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
|
||||
/* set mcasts */
|
||||
rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, msg->multicast,
|
||||
msg->n_multicast, false);
|
||||
if (rc) {
|
||||
vfop->rc = rc;
|
||||
goto op_err;
|
||||
}
|
||||
return;
|
||||
}
|
||||
/* fall through */
|
||||
op_done:
|
||||
case BNX2X_VFOP_MBX_Q_FILTERS_DONE:
|
||||
bnx2x_vfop_end(bp, vf, vfop);
|
||||
return;
|
||||
if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
|
||||
/* set mcasts */
|
||||
rc = bnx2x_vf_mcast(bp, vf, msg->multicast,
|
||||
msg->n_multicast, false);
|
||||
if (rc)
|
||||
goto op_err;
|
||||
}
|
||||
op_err:
|
||||
BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
|
||||
vf->abs_vfid, msg->vf_qid, vfop->rc);
|
||||
goto op_done;
|
||||
|
||||
default:
|
||||
bnx2x_vfop_default(state);
|
||||
}
|
||||
}
|
||||
|
||||
static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf,
|
||||
struct bnx2x_vfop_cmd *cmd)
|
||||
{
|
||||
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
|
||||
if (vfop) {
|
||||
bnx2x_vfop_opset(BNX2X_VFOP_MBX_Q_FILTERS_MACS,
|
||||
bnx2x_vfop_mbx_qfilters, cmd->done);
|
||||
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mbx_qfilters,
|
||||
cmd->block);
|
||||
}
|
||||
return -ENOMEM;
|
||||
if (rc)
|
||||
BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
|
||||
vf->abs_vfid, msg->vf_qid, rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int bnx2x_filters_validate_mac(struct bnx2x *bp,
|
||||
|
@ -1710,7 +1631,6 @@ static int bnx2x_filters_validate_mac(struct bnx2x *bp,
|
|||
if (filters->n_mac_vlan_filters > 1) {
|
||||
BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
|
||||
vf->abs_vfid);
|
||||
vf->op_rc = -EPERM;
|
||||
rc = -EPERM;
|
||||
goto response;
|
||||
}
|
||||
|
@ -1721,7 +1641,6 @@ static int bnx2x_filters_validate_mac(struct bnx2x *bp,
|
|||
BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
|
||||
vf->abs_vfid);
|
||||
|
||||
vf->op_rc = -EPERM;
|
||||
rc = -EPERM;
|
||||
goto response;
|
||||
}
|
||||
|
@ -1748,7 +1667,6 @@ static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
|
|||
VFPF_Q_FILTER_VLAN_TAG_VALID) {
|
||||
BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
|
||||
vf->abs_vfid);
|
||||
vf->op_rc = -EPERM;
|
||||
rc = -EPERM;
|
||||
goto response;
|
||||
}
|
||||
|
@ -1770,15 +1688,14 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
|
|||
struct bnx2x_vf_mbx *mbx)
|
||||
{
|
||||
struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
|
||||
struct bnx2x_vfop_cmd cmd = {
|
||||
.done = bnx2x_vf_mbx_resp,
|
||||
.block = false,
|
||||
};
|
||||
int rc;
|
||||
|
||||
if (bnx2x_filters_validate_mac(bp, vf, filters))
|
||||
rc = bnx2x_filters_validate_mac(bp, vf, filters);
|
||||
if (rc)
|
||||
goto response;
|
||||
|
||||
if (bnx2x_filters_validate_vlan(bp, vf, filters))
|
||||
rc = bnx2x_filters_validate_vlan(bp, vf, filters);
|
||||
if (rc)
|
||||
goto response;
|
||||
|
||||
DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
|
||||
|
@ -1788,125 +1705,105 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
|
|||
/* print q_filter message */
|
||||
bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
|
||||
|
||||
vf->op_rc = bnx2x_vfop_mbx_qfilters_cmd(bp, vf, &cmd);
|
||||
if (vf->op_rc)
|
||||
goto response;
|
||||
return;
|
||||
|
||||
rc = bnx2x_vf_mbx_qfilters(bp, vf);
|
||||
response:
|
||||
bnx2x_vf_mbx_resp(bp, vf);
|
||||
bnx2x_vf_mbx_resp(bp, vf, rc);
|
||||
}
|
||||
|
||||
static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
||||
struct bnx2x_vf_mbx *mbx)
|
||||
{
|
||||
int qid = mbx->msg->req.q_op.vf_qid;
|
||||
struct bnx2x_vfop_cmd cmd = {
|
||||
.done = bnx2x_vf_mbx_resp,
|
||||
.block = false,
|
||||
};
|
||||
int rc;
|
||||
|
||||
DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
|
||||
vf->abs_vfid, qid);
|
||||
|
||||
vf->op_rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qid);
|
||||
if (vf->op_rc)
|
||||
bnx2x_vf_mbx_resp(bp, vf);
|
||||
rc = bnx2x_vf_queue_teardown(bp, vf, qid);
|
||||
bnx2x_vf_mbx_resp(bp, vf, rc);
|
||||
}
|
||||
|
||||
static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
||||
struct bnx2x_vf_mbx *mbx)
|
||||
{
|
||||
struct bnx2x_vfop_cmd cmd = {
|
||||
.done = bnx2x_vf_mbx_resp,
|
||||
.block = false,
|
||||
};
|
||||
int rc;
|
||||
|
||||
DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid);
|
||||
|
||||
vf->op_rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
|
||||
if (vf->op_rc)
|
||||
bnx2x_vf_mbx_resp(bp, vf);
|
||||
rc = bnx2x_vf_close(bp, vf);
|
||||
bnx2x_vf_mbx_resp(bp, vf, rc);
|
||||
}
|
||||
|
||||
static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
||||
struct bnx2x_vf_mbx *mbx)
|
||||
{
|
||||
struct bnx2x_vfop_cmd cmd = {
|
||||
.done = bnx2x_vf_mbx_resp,
|
||||
.block = false,
|
||||
};
|
||||
int rc;
|
||||
|
||||
DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid);
|
||||
|
||||
vf->op_rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
|
||||
if (vf->op_rc)
|
||||
bnx2x_vf_mbx_resp(bp, vf);
|
||||
rc = bnx2x_vf_free(bp, vf);
|
||||
bnx2x_vf_mbx_resp(bp, vf, rc);
|
||||
}
|
||||
|
||||
static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
||||
struct bnx2x_vf_mbx *mbx)
|
||||
{
|
||||
struct bnx2x_vfop_cmd cmd = {
|
||||
.done = bnx2x_vf_mbx_resp,
|
||||
.block = false,
|
||||
};
|
||||
struct bnx2x_config_rss_params *vf_op_params = &vf->op_params.rss;
|
||||
struct bnx2x_config_rss_params rss;
|
||||
struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
|
||||
int rc = 0;
|
||||
|
||||
if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
|
||||
rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
|
||||
BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
|
||||
vf->index);
|
||||
vf->op_rc = -EINVAL;
|
||||
rc = -EINVAL;
|
||||
goto mbx_resp;
|
||||
}
|
||||
|
||||
memset(&rss, 0, sizeof(struct bnx2x_config_rss_params));
|
||||
|
||||
/* set vfop params according to rss tlv */
|
||||
memcpy(vf_op_params->ind_table, rss_tlv->ind_table,
|
||||
memcpy(rss.ind_table, rss_tlv->ind_table,
|
||||
T_ETH_INDIRECTION_TABLE_SIZE);
|
||||
memcpy(vf_op_params->rss_key, rss_tlv->rss_key,
|
||||
sizeof(rss_tlv->rss_key));
|
||||
vf_op_params->rss_obj = &vf->rss_conf_obj;
|
||||
vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
|
||||
memcpy(rss.rss_key, rss_tlv->rss_key, sizeof(rss_tlv->rss_key));
|
||||
rss.rss_obj = &vf->rss_conf_obj;
|
||||
rss.rss_result_mask = rss_tlv->rss_result_mask;
|
||||
|
||||
/* flags handled individually for backward/forward compatability */
|
||||
vf_op_params->rss_flags = 0;
|
||||
vf_op_params->ramrod_flags = 0;
|
||||
rss.rss_flags = 0;
|
||||
rss.ramrod_flags = 0;
|
||||
|
||||
if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
|
||||
__set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
|
||||
__set_bit(BNX2X_RSS_MODE_DISABLED, &rss.rss_flags);
|
||||
if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
|
||||
__set_bit(BNX2X_RSS_MODE_REGULAR, &vf_op_params->rss_flags);
|
||||
__set_bit(BNX2X_RSS_MODE_REGULAR, &rss.rss_flags);
|
||||
if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
|
||||
__set_bit(BNX2X_RSS_SET_SRCH, &vf_op_params->rss_flags);
|
||||
__set_bit(BNX2X_RSS_SET_SRCH, &rss.rss_flags);
|
||||
if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
|
||||
__set_bit(BNX2X_RSS_IPV4, &vf_op_params->rss_flags);
|
||||
__set_bit(BNX2X_RSS_IPV4, &rss.rss_flags);
|
||||
if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
|
||||
__set_bit(BNX2X_RSS_IPV4_TCP, &vf_op_params->rss_flags);
|
||||
__set_bit(BNX2X_RSS_IPV4_TCP, &rss.rss_flags);
|
||||
if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
|
||||
__set_bit(BNX2X_RSS_IPV4_UDP, &vf_op_params->rss_flags);
|
||||
__set_bit(BNX2X_RSS_IPV4_UDP, &rss.rss_flags);
|
||||
if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
|
||||
__set_bit(BNX2X_RSS_IPV6, &vf_op_params->rss_flags);
|
||||
__set_bit(BNX2X_RSS_IPV6, &rss.rss_flags);
|
||||
if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
|
||||
__set_bit(BNX2X_RSS_IPV6_TCP, &vf_op_params->rss_flags);
|
||||
__set_bit(BNX2X_RSS_IPV6_TCP, &rss.rss_flags);
|
||||
if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
|
||||
__set_bit(BNX2X_RSS_IPV6_UDP, &vf_op_params->rss_flags);
|
||||
__set_bit(BNX2X_RSS_IPV6_UDP, &rss.rss_flags);
|
||||
|
||||
if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
|
||||
rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
|
||||
(!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
|
||||
rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
|
||||
BNX2X_ERR("about to hit a FW assert. aborting...\n");
|
||||
vf->op_rc = -EINVAL;
|
||||
rc = -EINVAL;
|
||||
goto mbx_resp;
|
||||
}
|
||||
|
||||
vf->op_rc = bnx2x_vfop_rss_cmd(bp, vf, &cmd);
|
||||
|
||||
rc = bnx2x_vf_rss_update(bp, vf, &rss);
|
||||
mbx_resp:
|
||||
if (vf->op_rc)
|
||||
bnx2x_vf_mbx_resp(bp, vf);
|
||||
bnx2x_vf_mbx_resp(bp, vf, rc);
|
||||
}
|
||||
|
||||
static int bnx2x_validate_tpa_params(struct bnx2x *bp,
|
||||
|
@ -1935,47 +1832,42 @@ static int bnx2x_validate_tpa_params(struct bnx2x *bp,
|
|||
static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
||||
struct bnx2x_vf_mbx *mbx)
|
||||
{
|
||||
struct bnx2x_vfop_cmd cmd = {
|
||||
.done = bnx2x_vf_mbx_resp,
|
||||
.block = false,
|
||||
};
|
||||
struct bnx2x_queue_update_tpa_params *vf_op_params =
|
||||
&vf->op_params.qstate.params.update_tpa;
|
||||
struct bnx2x_queue_update_tpa_params vf_op_params;
|
||||
struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
|
||||
int rc = 0;
|
||||
|
||||
memset(vf_op_params, 0, sizeof(*vf_op_params));
|
||||
memset(&vf_op_params, 0, sizeof(vf_op_params));
|
||||
|
||||
if (bnx2x_validate_tpa_params(bp, tpa_tlv))
|
||||
goto mbx_resp;
|
||||
|
||||
vf_op_params->complete_on_both_clients =
|
||||
vf_op_params.complete_on_both_clients =
|
||||
tpa_tlv->tpa_client_info.complete_on_both_clients;
|
||||
vf_op_params->dont_verify_thr =
|
||||
vf_op_params.dont_verify_thr =
|
||||
tpa_tlv->tpa_client_info.dont_verify_thr;
|
||||
vf_op_params->max_agg_sz =
|
||||
vf_op_params.max_agg_sz =
|
||||
tpa_tlv->tpa_client_info.max_agg_size;
|
||||
vf_op_params->max_sges_pkt =
|
||||
vf_op_params.max_sges_pkt =
|
||||
tpa_tlv->tpa_client_info.max_sges_for_packet;
|
||||
vf_op_params->max_tpa_queues =
|
||||
vf_op_params.max_tpa_queues =
|
||||
tpa_tlv->tpa_client_info.max_tpa_queues;
|
||||
vf_op_params->sge_buff_sz =
|
||||
vf_op_params.sge_buff_sz =
|
||||
tpa_tlv->tpa_client_info.sge_buff_size;
|
||||
vf_op_params->sge_pause_thr_high =
|
||||
vf_op_params.sge_pause_thr_high =
|
||||
tpa_tlv->tpa_client_info.sge_pause_thr_high;
|
||||
vf_op_params->sge_pause_thr_low =
|
||||
vf_op_params.sge_pause_thr_low =
|
||||
tpa_tlv->tpa_client_info.sge_pause_thr_low;
|
||||
vf_op_params->tpa_mode =
|
||||
vf_op_params.tpa_mode =
|
||||
tpa_tlv->tpa_client_info.tpa_mode;
|
||||
vf_op_params->update_ipv4 =
|
||||
vf_op_params.update_ipv4 =
|
||||
tpa_tlv->tpa_client_info.update_ipv4;
|
||||
vf_op_params->update_ipv6 =
|
||||
vf_op_params.update_ipv6 =
|
||||
tpa_tlv->tpa_client_info.update_ipv6;
|
||||
|
||||
vf->op_rc = bnx2x_vfop_tpa_cmd(bp, vf, &cmd, tpa_tlv);
|
||||
rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params);
|
||||
|
||||
mbx_resp:
|
||||
if (vf->op_rc)
|
||||
bnx2x_vf_mbx_resp(bp, vf);
|
||||
bnx2x_vf_mbx_resp(bp, vf, rc);
|
||||
}
|
||||
|
||||
/* dispatch request */
|
||||
|
@ -2039,11 +1931,8 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
|||
|
||||
/* can we respond to VF (do we have an address for it?) */
|
||||
if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
|
||||
/* mbx_resp uses the op_rc of the VF */
|
||||
vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
|
||||
|
||||
/* notify the VF that we do not support this request */
|
||||
bnx2x_vf_mbx_resp(bp, vf);
|
||||
bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED);
|
||||
} else {
|
||||
/* can't send a response since this VF is unknown to us
|
||||
* just ack the FW to release the mailbox and unlock
|
||||
|
@ -2123,7 +2012,7 @@ void bnx2x_vf_mbx(struct bnx2x *bp)
|
|||
if (rc) {
|
||||
BNX2X_ERR("Failed to copy request VF %d\n",
|
||||
vf->abs_vfid);
|
||||
bnx2x_vf_release(bp, vf, false); /* non blocking */
|
||||
bnx2x_vf_release(bp, vf);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче