Merge branch 'qed-next'
Yuval Mintz says: ==================== qed: IOV enhncements and fixups This is a follow-up on the recent patch series that adds SR-IOV support to qed. All content here is iov-related fixups [nothing terminal] and enhancements. Please consider applying this series to `net-next'. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Коммит
a2a658a303
|
@ -2805,20 +2805,13 @@ void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
|
|||
}
|
||||
|
||||
#define IGU_CLEANUP_SLEEP_LENGTH (1000)
|
||||
void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
u32 sb_id,
|
||||
bool cleanup_set,
|
||||
u16 opaque_fid
|
||||
)
|
||||
static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
u32 sb_id, bool cleanup_set, u16 opaque_fid)
|
||||
{
|
||||
u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
|
||||
u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
|
||||
u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
|
||||
u32 data = 0;
|
||||
u32 cmd_ctrl = 0;
|
||||
u32 val = 0;
|
||||
u32 sb_bit = 0;
|
||||
u32 sb_bit_addr = 0;
|
||||
|
||||
/* Set the data field */
|
||||
SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
|
||||
|
@ -2863,11 +2856,9 @@ void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
|
|||
|
||||
void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
u32 sb_id,
|
||||
u16 opaque,
|
||||
bool b_set)
|
||||
u32 sb_id, u16 opaque, bool b_set)
|
||||
{
|
||||
int pi;
|
||||
int pi, i;
|
||||
|
||||
/* Set */
|
||||
if (b_set)
|
||||
|
@ -2876,6 +2867,22 @@ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
|
|||
/* Clear */
|
||||
qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
|
||||
|
||||
/* Wait for the IGU SB to cleanup */
|
||||
for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
|
||||
u32 val;
|
||||
|
||||
val = qed_rd(p_hwfn, p_ptt,
|
||||
IGU_REG_WRITE_DONE_PENDING + ((sb_id / 32) * 4));
|
||||
if (val & (1 << (sb_id % 32)))
|
||||
usleep_range(10, 20);
|
||||
else
|
||||
break;
|
||||
}
|
||||
if (i == IGU_CLEANUP_SLEEP_LENGTH)
|
||||
DP_NOTICE(p_hwfn,
|
||||
"Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
|
||||
sb_id);
|
||||
|
||||
/* Clear the CAU for the SB */
|
||||
for (pi = 0; pi < 12; pi++)
|
||||
qed_wr(p_hwfn, p_ptt,
|
||||
|
@ -2884,13 +2891,11 @@ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
|
|||
|
||||
void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
bool b_set,
|
||||
bool b_slowpath)
|
||||
bool b_set, bool b_slowpath)
|
||||
{
|
||||
u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
|
||||
u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
|
||||
u32 sb_id = 0;
|
||||
u32 val = 0;
|
||||
u32 sb_id = 0, val = 0;
|
||||
|
||||
val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
|
||||
val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
|
||||
|
@ -2906,14 +2911,14 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
|
|||
p_hwfn->hw_info.opaque_fid,
|
||||
b_set);
|
||||
|
||||
if (b_slowpath) {
|
||||
sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
|
||||
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
|
||||
"IGU cleaning slowpath SB [%d]\n", sb_id);
|
||||
qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
|
||||
p_hwfn->hw_info.opaque_fid,
|
||||
b_set);
|
||||
}
|
||||
if (!b_slowpath)
|
||||
return;
|
||||
|
||||
sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
|
||||
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
|
||||
"IGU cleaning slowpath SB [%d]\n", sb_id);
|
||||
qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
|
||||
p_hwfn->hw_info.opaque_fid, b_set);
|
||||
}
|
||||
|
||||
static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
|
||||
|
|
|
@ -291,24 +291,6 @@ int qed_int_unregister_cb(struct qed_hwfn *p_hwfn,
|
|||
*/
|
||||
u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn);
|
||||
|
||||
/**
|
||||
* @brief Status block cleanup. Should be called for each status
|
||||
* block that will be used -> both PF / VF
|
||||
*
|
||||
* @param p_hwfn
|
||||
* @param p_ptt
|
||||
* @param sb_id - igu status block id
|
||||
* @param cleanup_set - set(1) / clear(0)
|
||||
* @param opaque_fid - the function for which to perform
|
||||
* cleanup, for example a PF on behalf of
|
||||
* its VFs.
|
||||
*/
|
||||
void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
u32 sb_id,
|
||||
bool cleanup_set,
|
||||
u16 opaque_fid);
|
||||
|
||||
/**
|
||||
* @brief Status block cleanup. Should be called for each status
|
||||
* block that will be used -> both PF / VF
|
||||
|
@ -317,7 +299,7 @@ void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
|
|||
* @param p_ptt
|
||||
* @param sb_id - igu status block id
|
||||
* @param opaque - opaque fid of the sb owner.
|
||||
* @param cleanup_set - set(1) / clear(0)
|
||||
* @param b_set - set(1) / clear(0)
|
||||
*/
|
||||
void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
|
|
|
@ -158,7 +158,7 @@ static int qed_init_pci(struct qed_dev *cdev,
|
|||
}
|
||||
|
||||
cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
|
||||
if (cdev->pci_params.pm_cap == 0)
|
||||
if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
|
||||
DP_NOTICE(cdev, "Cannot find power management capability\n");
|
||||
|
||||
rc = qed_set_coherency_mask(cdev);
|
||||
|
|
|
@ -429,6 +429,8 @@
|
|||
0x184000UL
|
||||
#define IGU_REG_STATISTIC_NUM_VF_MSG_SENT \
|
||||
0x180408UL
|
||||
#define IGU_REG_WRITE_DONE_PENDING \
|
||||
0x180900UL
|
||||
#define MISCS_REG_GENERIC_POR_0 \
|
||||
0x0096d4UL
|
||||
#define MCP_REG_NVM_CFG4 \
|
||||
|
|
|
@ -476,12 +476,12 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
|
|||
static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
|
||||
{
|
||||
/* Check PF supports sriov */
|
||||
if (!IS_QED_SRIOV(p_hwfn->cdev) || !IS_PF_SRIOV_ALLOC(p_hwfn))
|
||||
if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
|
||||
!IS_PF_SRIOV_ALLOC(p_hwfn))
|
||||
return false;
|
||||
|
||||
/* Check VF validity */
|
||||
if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
|
||||
!IS_PF_SRIOV_ALLOC(p_hwfn))
|
||||
if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
@ -526,7 +526,6 @@ static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
|
|||
static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt, struct qed_vf_info *vf)
|
||||
{
|
||||
u16 igu_sb_id;
|
||||
int i;
|
||||
|
||||
/* Set VF masks and configuration - pretend */
|
||||
|
@ -534,23 +533,14 @@ static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
|
|||
|
||||
qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
|
||||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
|
||||
"value in VF_CONFIGURATION of vf %d after write %x\n",
|
||||
vf->abs_vf_id,
|
||||
qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION));
|
||||
|
||||
/* unpretend */
|
||||
qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
|
||||
|
||||
/* iterate over all queues, clear sb consumer */
|
||||
for (i = 0; i < vf->num_sbs; i++) {
|
||||
igu_sb_id = vf->igu_sbs[i];
|
||||
/* Set then clear... */
|
||||
qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1,
|
||||
vf->opaque_fid);
|
||||
qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0,
|
||||
vf->opaque_fid);
|
||||
}
|
||||
for (i = 0; i < vf->num_sbs; i++)
|
||||
qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
|
||||
vf->igu_sbs[i],
|
||||
vf->opaque_fid, true);
|
||||
}
|
||||
|
||||
static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
|
||||
|
@ -591,6 +581,8 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
|
|||
|
||||
qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
|
||||
|
||||
qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
|
||||
|
||||
rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
@ -814,9 +806,51 @@ static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
|
|||
return rc;
|
||||
}
|
||||
|
||||
static void qed_iov_set_link(struct qed_hwfn *p_hwfn,
|
||||
u16 vfid,
|
||||
struct qed_mcp_link_params *params,
|
||||
struct qed_mcp_link_state *link,
|
||||
struct qed_mcp_link_capabilities *p_caps)
|
||||
{
|
||||
struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
|
||||
vfid,
|
||||
false);
|
||||
struct qed_bulletin_content *p_bulletin;
|
||||
|
||||
if (!p_vf)
|
||||
return;
|
||||
|
||||
p_bulletin = p_vf->bulletin.p_virt;
|
||||
p_bulletin->req_autoneg = params->speed.autoneg;
|
||||
p_bulletin->req_adv_speed = params->speed.advertised_speeds;
|
||||
p_bulletin->req_forced_speed = params->speed.forced_speed;
|
||||
p_bulletin->req_autoneg_pause = params->pause.autoneg;
|
||||
p_bulletin->req_forced_rx = params->pause.forced_rx;
|
||||
p_bulletin->req_forced_tx = params->pause.forced_tx;
|
||||
p_bulletin->req_loopback = params->loopback_mode;
|
||||
|
||||
p_bulletin->link_up = link->link_up;
|
||||
p_bulletin->speed = link->speed;
|
||||
p_bulletin->full_duplex = link->full_duplex;
|
||||
p_bulletin->autoneg = link->an;
|
||||
p_bulletin->autoneg_complete = link->an_complete;
|
||||
p_bulletin->parallel_detection = link->parallel_detection;
|
||||
p_bulletin->pfc_enabled = link->pfc_enabled;
|
||||
p_bulletin->partner_adv_speed = link->partner_adv_speed;
|
||||
p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
|
||||
p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
|
||||
p_bulletin->partner_adv_pause = link->partner_adv_pause;
|
||||
p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
|
||||
|
||||
p_bulletin->capability_speed = p_caps->speed_capabilities;
|
||||
}
|
||||
|
||||
static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt, u16 rel_vf_id)
|
||||
{
|
||||
struct qed_mcp_link_capabilities caps;
|
||||
struct qed_mcp_link_params params;
|
||||
struct qed_mcp_link_state link;
|
||||
struct qed_vf_info *vf = NULL;
|
||||
int rc = 0;
|
||||
|
||||
|
@ -831,6 +865,15 @@ static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
|
|||
|
||||
memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
|
||||
|
||||
/* Get the link configuration back in bulletin so
|
||||
* that when VFs are re-enabled they get the actual
|
||||
* link configuration.
|
||||
*/
|
||||
memcpy(¶ms, qed_mcp_get_link_params(p_hwfn), sizeof(params));
|
||||
memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link));
|
||||
memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
|
||||
qed_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps);
|
||||
|
||||
if (vf->state != VF_STOPPED) {
|
||||
/* Stopping the VF */
|
||||
rc = qed_sp_vf_stop(p_hwfn, vf->concrete_fid, vf->opaque_fid);
|
||||
|
@ -2550,45 +2593,6 @@ int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
|
|||
return found;
|
||||
}
|
||||
|
||||
void qed_iov_set_link(struct qed_hwfn *p_hwfn,
|
||||
u16 vfid,
|
||||
struct qed_mcp_link_params *params,
|
||||
struct qed_mcp_link_state *link,
|
||||
struct qed_mcp_link_capabilities *p_caps)
|
||||
{
|
||||
struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
|
||||
vfid,
|
||||
false);
|
||||
struct qed_bulletin_content *p_bulletin;
|
||||
|
||||
if (!p_vf)
|
||||
return;
|
||||
|
||||
p_bulletin = p_vf->bulletin.p_virt;
|
||||
p_bulletin->req_autoneg = params->speed.autoneg;
|
||||
p_bulletin->req_adv_speed = params->speed.advertised_speeds;
|
||||
p_bulletin->req_forced_speed = params->speed.forced_speed;
|
||||
p_bulletin->req_autoneg_pause = params->pause.autoneg;
|
||||
p_bulletin->req_forced_rx = params->pause.forced_rx;
|
||||
p_bulletin->req_forced_tx = params->pause.forced_tx;
|
||||
p_bulletin->req_loopback = params->loopback_mode;
|
||||
|
||||
p_bulletin->link_up = link->link_up;
|
||||
p_bulletin->speed = link->speed;
|
||||
p_bulletin->full_duplex = link->full_duplex;
|
||||
p_bulletin->autoneg = link->an;
|
||||
p_bulletin->autoneg_complete = link->an_complete;
|
||||
p_bulletin->parallel_detection = link->parallel_detection;
|
||||
p_bulletin->pfc_enabled = link->pfc_enabled;
|
||||
p_bulletin->partner_adv_speed = link->partner_adv_speed;
|
||||
p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
|
||||
p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
|
||||
p_bulletin->partner_adv_pause = link->partner_adv_pause;
|
||||
p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
|
||||
|
||||
p_bulletin->capability_speed = p_caps->speed_capabilities;
|
||||
}
|
||||
|
||||
static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
|
||||
u16 vfid,
|
||||
struct qed_mcp_link_params *p_params,
|
||||
|
@ -3095,6 +3099,9 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
|
|||
goto err;
|
||||
}
|
||||
|
||||
if (IS_MF_DEFAULT(hwfn))
|
||||
limit = MAX_NUM_VFS_BB / hwfn->num_funcs_on_engine;
|
||||
|
||||
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
|
||||
qed_int_get_num_sbs(hwfn, &sb_cnt_info);
|
||||
num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit);
|
||||
|
|
Загрузка…
Ссылка в новой задаче