Merge branch 'qed-XDP-support'
Yuval Mintz says: ==================== qed*: Add XDP support This patch series is intended to add XDP to the qede driver, although it contains quite a bit of cleanups, refactorings and infrastructure changes as well. The content of this series can be roughly divided into: - Datapath improvements - mostly focused on having the datapath utilize parameters which can be more tightly contained in cachelines. Patches #1, #2, #8, #9 belong to this group. - Refactoring - done mostly in favour of XDP. Patches #3, #4, #5, #9. - Infrastructure changes - done in favour of XDP. Paches #6 and #7 belong to this category [#7 being by far the biggest patch in the series]. - Actual XDP support - last two patches [#10, #11]. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Коммит
348bfec21f
|
@ -241,15 +241,6 @@ struct qed_hw_info {
|
|||
enum qed_wol_support b_wol_support;
|
||||
};
|
||||
|
||||
struct qed_hw_cid_data {
|
||||
u32 cid;
|
||||
bool b_cid_allocated;
|
||||
|
||||
/* Additional identifiers */
|
||||
u16 opaque_fid;
|
||||
u8 vport_id;
|
||||
};
|
||||
|
||||
/* maximun size of read/write commands (HW limit) */
|
||||
#define DMAE_MAX_RW_SIZE 0x2000
|
||||
|
||||
|
@ -416,9 +407,6 @@ struct qed_hwfn {
|
|||
|
||||
struct qed_dcbx_info *p_dcbx_info;
|
||||
|
||||
struct qed_hw_cid_data *p_tx_cids;
|
||||
struct qed_hw_cid_data *p_rx_cids;
|
||||
|
||||
struct qed_dmae_info dmae_info;
|
||||
|
||||
/* QM init */
|
||||
|
|
|
@ -134,15 +134,6 @@ void qed_resc_free(struct qed_dev *cdev)
|
|||
|
||||
kfree(cdev->reset_stats);
|
||||
|
||||
for_each_hwfn(cdev, i) {
|
||||
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
|
||||
|
||||
kfree(p_hwfn->p_tx_cids);
|
||||
p_hwfn->p_tx_cids = NULL;
|
||||
kfree(p_hwfn->p_rx_cids);
|
||||
p_hwfn->p_rx_cids = NULL;
|
||||
}
|
||||
|
||||
for_each_hwfn(cdev, i) {
|
||||
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
|
||||
|
||||
|
@ -425,23 +416,6 @@ int qed_resc_alloc(struct qed_dev *cdev)
|
|||
if (!cdev->fw_data)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Allocate Memory for the Queue->CID mapping */
|
||||
for_each_hwfn(cdev, i) {
|
||||
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
|
||||
int tx_size = sizeof(struct qed_hw_cid_data) *
|
||||
RESC_NUM(p_hwfn, QED_L2_QUEUE);
|
||||
int rx_size = sizeof(struct qed_hw_cid_data) *
|
||||
RESC_NUM(p_hwfn, QED_L2_QUEUE);
|
||||
|
||||
p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
|
||||
if (!p_hwfn->p_tx_cids)
|
||||
goto alloc_no_mem;
|
||||
|
||||
p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
|
||||
if (!p_hwfn->p_rx_cids)
|
||||
goto alloc_no_mem;
|
||||
}
|
||||
|
||||
for_each_hwfn(cdev, i) {
|
||||
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
|
||||
u32 n_eqes, num_cons;
|
||||
|
@ -2283,12 +2257,12 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
|
|||
{
|
||||
void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
|
||||
u32 page_cnt = p_chain->page_cnt, i, pbl_size;
|
||||
u8 *p_pbl_virt = p_chain->pbl.p_virt_table;
|
||||
u8 *p_pbl_virt = p_chain->pbl_sp.p_virt_table;
|
||||
|
||||
if (!pp_virt_addr_tbl)
|
||||
return;
|
||||
|
||||
if (!p_chain->pbl.p_virt_table)
|
||||
if (!p_pbl_virt)
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < page_cnt; i++) {
|
||||
|
@ -2306,7 +2280,8 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
|
|||
pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
|
||||
dma_free_coherent(&cdev->pdev->dev,
|
||||
pbl_size,
|
||||
p_chain->pbl.p_virt_table, p_chain->pbl.p_phys_table);
|
||||
p_chain->pbl_sp.p_virt_table,
|
||||
p_chain->pbl_sp.p_phys_table);
|
||||
out:
|
||||
vfree(p_chain->pbl.pp_virt_addr_tbl);
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <linux/workqueue.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include "qed.h"
|
||||
#include <linux/qed/qed_chain.h>
|
||||
#include "qed_cxt.h"
|
||||
|
@ -41,6 +42,124 @@
|
|||
#define QED_MAX_SGES_NUM 16
|
||||
#define CRC32_POLY 0x1edc6f41
|
||||
|
||||
void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
|
||||
struct qed_queue_cid *p_cid)
|
||||
{
|
||||
/* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */
|
||||
if (!p_cid->is_vf && IS_PF(p_hwfn->cdev))
|
||||
qed_cxt_release_cid(p_hwfn, p_cid->cid);
|
||||
vfree(p_cid);
|
||||
}
|
||||
|
||||
/* The internal is only meant to be directly called by PFs initializeing CIDs
|
||||
* for their VFs.
|
||||
*/
|
||||
struct qed_queue_cid *
|
||||
_qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
|
||||
u16 opaque_fid,
|
||||
u32 cid,
|
||||
u8 vf_qid,
|
||||
struct qed_queue_start_common_params *p_params)
|
||||
{
|
||||
bool b_is_same = (p_hwfn->hw_info.opaque_fid == opaque_fid);
|
||||
struct qed_queue_cid *p_cid;
|
||||
int rc;
|
||||
|
||||
p_cid = vmalloc(sizeof(*p_cid));
|
||||
if (!p_cid)
|
||||
return NULL;
|
||||
memset(p_cid, 0, sizeof(*p_cid));
|
||||
|
||||
p_cid->opaque_fid = opaque_fid;
|
||||
p_cid->cid = cid;
|
||||
p_cid->vf_qid = vf_qid;
|
||||
p_cid->rel = *p_params;
|
||||
|
||||
/* Don't try calculating the absolute indices for VFs */
|
||||
if (IS_VF(p_hwfn->cdev)) {
|
||||
p_cid->abs = p_cid->rel;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Calculate the engine-absolute indices of the resources.
|
||||
* This would guarantee they're valid later on.
|
||||
* In some cases [SBs] we already have the right values.
|
||||
*/
|
||||
rc = qed_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
rc = qed_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, &p_cid->abs.queue_id);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
/* In case of a PF configuring its VF's queues, the stats-id is already
|
||||
* absolute [since there's a single index that's suitable per-VF].
|
||||
*/
|
||||
if (b_is_same) {
|
||||
rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id,
|
||||
&p_cid->abs.stats_id);
|
||||
if (rc)
|
||||
goto fail;
|
||||
} else {
|
||||
p_cid->abs.stats_id = p_cid->rel.stats_id;
|
||||
}
|
||||
|
||||
/* SBs relevant information was already provided as absolute */
|
||||
p_cid->abs.sb = p_cid->rel.sb;
|
||||
p_cid->abs.sb_idx = p_cid->rel.sb_idx;
|
||||
|
||||
/* This is tricky - we're actually interested in whehter this is a PF
|
||||
* entry meant for the VF.
|
||||
*/
|
||||
if (!b_is_same)
|
||||
p_cid->is_vf = true;
|
||||
out:
|
||||
DP_VERBOSE(p_hwfn,
|
||||
QED_MSG_SP,
|
||||
"opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
|
||||
p_cid->opaque_fid,
|
||||
p_cid->cid,
|
||||
p_cid->rel.vport_id,
|
||||
p_cid->abs.vport_id,
|
||||
p_cid->rel.queue_id,
|
||||
p_cid->abs.queue_id,
|
||||
p_cid->rel.stats_id,
|
||||
p_cid->abs.stats_id, p_cid->abs.sb, p_cid->abs.sb_idx);
|
||||
|
||||
return p_cid;
|
||||
|
||||
fail:
|
||||
vfree(p_cid);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct qed_queue_cid *qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
|
||||
u16 opaque_fid, struct
|
||||
qed_queue_start_common_params
|
||||
*p_params)
|
||||
{
|
||||
struct qed_queue_cid *p_cid;
|
||||
u32 cid = 0;
|
||||
|
||||
/* Get a unique firmware CID for this queue, in case it's a PF.
|
||||
* VF's don't need a CID as the queue configuration will be done
|
||||
* by PF.
|
||||
*/
|
||||
if (IS_PF(p_hwfn->cdev)) {
|
||||
if (qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &cid)) {
|
||||
DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid, 0, p_params);
|
||||
if (!p_cid && IS_PF(p_hwfn->cdev))
|
||||
qed_cxt_release_cid(p_hwfn, cid);
|
||||
|
||||
return p_cid;
|
||||
}
|
||||
|
||||
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
|
||||
struct qed_sp_vport_start_params *p_params)
|
||||
{
|
||||
|
@ -496,61 +615,26 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int qed_sp_release_queue_cid(
|
||||
struct qed_hwfn *p_hwfn,
|
||||
struct qed_hw_cid_data *p_cid_data)
|
||||
{
|
||||
if (!p_cid_data->b_cid_allocated)
|
||||
return 0;
|
||||
|
||||
qed_cxt_release_cid(p_hwfn, p_cid_data->cid);
|
||||
|
||||
p_cid_data->b_cid_allocated = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
|
||||
u16 opaque_fid,
|
||||
u32 cid,
|
||||
struct qed_queue_start_common_params *p_params,
|
||||
u8 stats_id,
|
||||
u16 bd_max_bytes,
|
||||
dma_addr_t bd_chain_phys_addr,
|
||||
dma_addr_t cqe_pbl_addr,
|
||||
u16 cqe_pbl_size, bool b_use_zone_a_prod)
|
||||
int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
|
||||
struct qed_queue_cid *p_cid,
|
||||
u16 bd_max_bytes,
|
||||
dma_addr_t bd_chain_phys_addr,
|
||||
dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
|
||||
{
|
||||
struct rx_queue_start_ramrod_data *p_ramrod = NULL;
|
||||
struct qed_spq_entry *p_ent = NULL;
|
||||
struct qed_sp_init_data init_data;
|
||||
struct qed_hw_cid_data *p_rx_cid;
|
||||
u16 abs_rx_q_id = 0;
|
||||
u8 abs_vport_id = 0;
|
||||
int rc = -EINVAL;
|
||||
|
||||
/* Store information for the stop */
|
||||
p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
|
||||
p_rx_cid->cid = cid;
|
||||
p_rx_cid->opaque_fid = opaque_fid;
|
||||
p_rx_cid->vport_id = p_params->vport_id;
|
||||
|
||||
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_rx_q_id);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_SP,
|
||||
"opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
|
||||
opaque_fid,
|
||||
cid, p_params->queue_id, p_params->vport_id, p_params->sb);
|
||||
"opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
|
||||
p_cid->opaque_fid, p_cid->cid,
|
||||
p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->abs.sb);
|
||||
|
||||
/* Get SPQ entry */
|
||||
memset(&init_data, 0, sizeof(init_data));
|
||||
init_data.cid = cid;
|
||||
init_data.opaque_fid = opaque_fid;
|
||||
init_data.cid = p_cid->cid;
|
||||
init_data.opaque_fid = p_cid->opaque_fid;
|
||||
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
|
||||
|
||||
rc = qed_sp_init_request(p_hwfn, &p_ent,
|
||||
|
@ -561,11 +645,11 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
|
|||
|
||||
p_ramrod = &p_ent->ramrod.rx_queue_start;
|
||||
|
||||
p_ramrod->sb_id = cpu_to_le16(p_params->sb);
|
||||
p_ramrod->sb_index = p_params->sb_idx;
|
||||
p_ramrod->vport_id = abs_vport_id;
|
||||
p_ramrod->stats_counter_id = stats_id;
|
||||
p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
|
||||
p_ramrod->sb_id = cpu_to_le16(p_cid->abs.sb);
|
||||
p_ramrod->sb_index = p_cid->abs.sb_idx;
|
||||
p_ramrod->vport_id = p_cid->abs.vport_id;
|
||||
p_ramrod->stats_counter_id = p_cid->abs.stats_id;
|
||||
p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
|
||||
p_ramrod->complete_cqe_flg = 0;
|
||||
p_ramrod->complete_event_flg = 1;
|
||||
|
||||
|
@ -575,85 +659,85 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
|
|||
p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
|
||||
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
|
||||
|
||||
if (p_params->vf_qid || b_use_zone_a_prod) {
|
||||
p_ramrod->vf_rx_prod_index = p_params->vf_qid;
|
||||
if (p_cid->is_vf) {
|
||||
p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_SP,
|
||||
"Queue%s is meant for VF rxq[%02x]\n",
|
||||
b_use_zone_a_prod ? " [legacy]" : "",
|
||||
p_params->vf_qid);
|
||||
p_ramrod->vf_rx_prod_use_zone_a = b_use_zone_a_prod;
|
||||
!!p_cid->b_legacy_vf ? " [legacy]" : "",
|
||||
p_cid->vf_qid);
|
||||
p_ramrod->vf_rx_prod_use_zone_a = !!p_cid->b_legacy_vf;
|
||||
}
|
||||
|
||||
return qed_spq_post(p_hwfn, p_ent, NULL);
|
||||
}
|
||||
|
||||
static int
|
||||
qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
|
||||
u16 opaque_fid,
|
||||
struct qed_queue_start_common_params *p_params,
|
||||
qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn,
|
||||
struct qed_queue_cid *p_cid,
|
||||
u16 bd_max_bytes,
|
||||
dma_addr_t bd_chain_phys_addr,
|
||||
dma_addr_t cqe_pbl_addr,
|
||||
u16 cqe_pbl_size, void __iomem **pp_prod)
|
||||
{
|
||||
struct qed_hw_cid_data *p_rx_cid;
|
||||
u32 init_prod_val = 0;
|
||||
u16 abs_l2_queue = 0;
|
||||
u8 abs_stats_id = 0;
|
||||
int rc;
|
||||
|
||||
if (IS_VF(p_hwfn->cdev)) {
|
||||
return qed_vf_pf_rxq_start(p_hwfn,
|
||||
p_params->queue_id,
|
||||
p_params->sb,
|
||||
(u8)p_params->sb_idx,
|
||||
bd_max_bytes,
|
||||
bd_chain_phys_addr,
|
||||
cqe_pbl_addr, cqe_pbl_size, pp_prod);
|
||||
}
|
||||
|
||||
rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_l2_queue);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
*pp_prod = (u8 __iomem *)p_hwfn->regview +
|
||||
GTT_BAR0_MAP_REG_MSDM_RAM +
|
||||
MSTORM_ETH_PF_PRODS_OFFSET(abs_l2_queue);
|
||||
*pp_prod = p_hwfn->regview +
|
||||
GTT_BAR0_MAP_REG_MSDM_RAM +
|
||||
MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
|
||||
|
||||
/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
|
||||
__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
|
||||
(u32 *)(&init_prod_val));
|
||||
|
||||
/* Allocate a CID for the queue */
|
||||
p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
|
||||
rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_rx_cid->cid);
|
||||
if (rc) {
|
||||
DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
|
||||
return rc;
|
||||
}
|
||||
p_rx_cid->b_cid_allocated = true;
|
||||
return qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
|
||||
bd_max_bytes,
|
||||
bd_chain_phys_addr,
|
||||
cqe_pbl_addr, cqe_pbl_size);
|
||||
}
|
||||
|
||||
rc = qed_sp_eth_rxq_start_ramrod(p_hwfn,
|
||||
opaque_fid,
|
||||
p_rx_cid->cid,
|
||||
p_params,
|
||||
abs_stats_id,
|
||||
static int
|
||||
qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
|
||||
u16 opaque_fid,
|
||||
struct qed_queue_start_common_params *p_params,
|
||||
u16 bd_max_bytes,
|
||||
dma_addr_t bd_chain_phys_addr,
|
||||
dma_addr_t cqe_pbl_addr,
|
||||
u16 cqe_pbl_size,
|
||||
struct qed_rxq_start_ret_params *p_ret_params)
|
||||
{
|
||||
struct qed_queue_cid *p_cid;
|
||||
int rc;
|
||||
|
||||
/* Allocate a CID for the queue */
|
||||
p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params);
|
||||
if (!p_cid)
|
||||
return -ENOMEM;
|
||||
|
||||
if (IS_PF(p_hwfn->cdev)) {
|
||||
rc = qed_eth_pf_rx_queue_start(p_hwfn, p_cid,
|
||||
bd_max_bytes,
|
||||
bd_chain_phys_addr,
|
||||
cqe_pbl_addr, cqe_pbl_size,
|
||||
&p_ret_params->p_prod);
|
||||
} else {
|
||||
rc = qed_vf_pf_rxq_start(p_hwfn, p_cid,
|
||||
bd_max_bytes,
|
||||
bd_chain_phys_addr,
|
||||
cqe_pbl_addr, cqe_pbl_size, false);
|
||||
cqe_pbl_addr,
|
||||
cqe_pbl_size, &p_ret_params->p_prod);
|
||||
}
|
||||
|
||||
/* Provide the caller with a reference to as handler */
|
||||
if (rc)
|
||||
qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
|
||||
qed_eth_queue_cid_release(p_hwfn, p_cid);
|
||||
else
|
||||
p_ret_params->p_handle = (void *)p_cid;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
|
||||
u16 rx_queue_id,
|
||||
void **pp_rxq_handles,
|
||||
u8 num_rxqs,
|
||||
u8 complete_cqe_flg,
|
||||
u8 complete_event_flg,
|
||||
|
@ -663,8 +747,7 @@ int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
|
|||
struct rx_queue_update_ramrod_data *p_ramrod = NULL;
|
||||
struct qed_spq_entry *p_ent = NULL;
|
||||
struct qed_sp_init_data init_data;
|
||||
struct qed_hw_cid_data *p_rx_cid;
|
||||
u16 qid, abs_rx_q_id = 0;
|
||||
struct qed_queue_cid *p_cid;
|
||||
int rc = -EINVAL;
|
||||
u8 i;
|
||||
|
||||
|
@ -673,12 +756,11 @@ int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
|
|||
init_data.p_comp_data = p_comp_data;
|
||||
|
||||
for (i = 0; i < num_rxqs; i++) {
|
||||
qid = rx_queue_id + i;
|
||||
p_rx_cid = &p_hwfn->p_rx_cids[qid];
|
||||
p_cid = ((struct qed_queue_cid **)pp_rxq_handles)[i];
|
||||
|
||||
/* Get SPQ entry */
|
||||
init_data.cid = p_rx_cid->cid;
|
||||
init_data.opaque_fid = p_rx_cid->opaque_fid;
|
||||
init_data.cid = p_cid->cid;
|
||||
init_data.opaque_fid = p_cid->opaque_fid;
|
||||
|
||||
rc = qed_sp_init_request(p_hwfn, &p_ent,
|
||||
ETH_RAMROD_RX_QUEUE_UPDATE,
|
||||
|
@ -687,10 +769,9 @@ int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
|
|||
return rc;
|
||||
|
||||
p_ramrod = &p_ent->ramrod.rx_queue_update;
|
||||
p_ramrod->vport_id = p_cid->abs.vport_id;
|
||||
|
||||
qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
|
||||
qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
|
||||
p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
|
||||
p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
|
||||
p_ramrod->complete_cqe_flg = complete_cqe_flg;
|
||||
p_ramrod->complete_event_flg = complete_event_flg;
|
||||
|
||||
|
@ -702,24 +783,19 @@ int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
|
|||
return rc;
|
||||
}
|
||||
|
||||
int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
|
||||
u16 rx_queue_id,
|
||||
bool eq_completion_only, bool cqe_completion)
|
||||
static int
|
||||
qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn,
|
||||
struct qed_queue_cid *p_cid,
|
||||
bool b_eq_completion_only, bool b_cqe_completion)
|
||||
{
|
||||
struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
|
||||
struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
|
||||
struct qed_spq_entry *p_ent = NULL;
|
||||
struct qed_sp_init_data init_data;
|
||||
u16 abs_rx_q_id = 0;
|
||||
int rc = -EINVAL;
|
||||
int rc;
|
||||
|
||||
if (IS_VF(p_hwfn->cdev))
|
||||
return qed_vf_pf_rxq_stop(p_hwfn, rx_queue_id, cqe_completion);
|
||||
|
||||
/* Get SPQ entry */
|
||||
memset(&init_data, 0, sizeof(init_data));
|
||||
init_data.cid = p_rx_cid->cid;
|
||||
init_data.opaque_fid = p_rx_cid->opaque_fid;
|
||||
init_data.cid = p_cid->cid;
|
||||
init_data.opaque_fid = p_cid->opaque_fid;
|
||||
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
|
||||
|
||||
rc = qed_sp_init_request(p_hwfn, &p_ent,
|
||||
|
@ -729,62 +805,53 @@ int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
|
|||
return rc;
|
||||
|
||||
p_ramrod = &p_ent->ramrod.rx_queue_stop;
|
||||
|
||||
qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
|
||||
qed_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
|
||||
p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
|
||||
p_ramrod->vport_id = p_cid->abs.vport_id;
|
||||
p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
|
||||
|
||||
/* Cleaning the queue requires the completion to arrive there.
|
||||
* In addition, VFs require the answer to come as eqe to PF.
|
||||
*/
|
||||
p_ramrod->complete_cqe_flg =
|
||||
(!!(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) &&
|
||||
!eq_completion_only) || cqe_completion;
|
||||
p_ramrod->complete_event_flg =
|
||||
!(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) ||
|
||||
eq_completion_only;
|
||||
p_ramrod->complete_cqe_flg = (!p_cid->is_vf &&
|
||||
!b_eq_completion_only) ||
|
||||
b_cqe_completion;
|
||||
p_ramrod->complete_event_flg = p_cid->is_vf || b_eq_completion_only;
|
||||
|
||||
rc = qed_spq_post(p_hwfn, p_ent, NULL);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
return qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
|
||||
return qed_spq_post(p_hwfn, p_ent, NULL);
|
||||
}
|
||||
|
||||
int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
|
||||
u16 opaque_fid,
|
||||
u32 cid,
|
||||
struct qed_queue_start_common_params *p_params,
|
||||
u8 stats_id,
|
||||
dma_addr_t pbl_addr,
|
||||
u16 pbl_size,
|
||||
union qed_qm_pq_params *p_pq_params)
|
||||
int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
|
||||
void *p_rxq,
|
||||
bool eq_completion_only, bool cqe_completion)
|
||||
{
|
||||
struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_rxq;
|
||||
int rc = -EINVAL;
|
||||
|
||||
if (IS_PF(p_hwfn->cdev))
|
||||
rc = qed_eth_pf_rx_queue_stop(p_hwfn, p_cid,
|
||||
eq_completion_only,
|
||||
cqe_completion);
|
||||
else
|
||||
rc = qed_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
|
||||
|
||||
if (!rc)
|
||||
qed_eth_queue_cid_release(p_hwfn, p_cid);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int
|
||||
qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
|
||||
struct qed_queue_cid *p_cid,
|
||||
dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id)
|
||||
{
|
||||
struct tx_queue_start_ramrod_data *p_ramrod = NULL;
|
||||
struct qed_spq_entry *p_ent = NULL;
|
||||
struct qed_sp_init_data init_data;
|
||||
struct qed_hw_cid_data *p_tx_cid;
|
||||
u16 pq_id, abs_tx_q_id = 0;
|
||||
int rc = -EINVAL;
|
||||
u8 abs_vport_id;
|
||||
|
||||
/* Store information for the stop */
|
||||
p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
|
||||
p_tx_cid->cid = cid;
|
||||
p_tx_cid->opaque_fid = opaque_fid;
|
||||
|
||||
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_tx_q_id);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* Get SPQ entry */
|
||||
memset(&init_data, 0, sizeof(init_data));
|
||||
init_data.cid = cid;
|
||||
init_data.opaque_fid = opaque_fid;
|
||||
init_data.cid = p_cid->cid;
|
||||
init_data.opaque_fid = p_cid->opaque_fid;
|
||||
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
|
||||
|
||||
rc = qed_sp_init_request(p_hwfn, &p_ent,
|
||||
|
@ -794,96 +861,92 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
|
|||
return rc;
|
||||
|
||||
p_ramrod = &p_ent->ramrod.tx_queue_start;
|
||||
p_ramrod->vport_id = abs_vport_id;
|
||||
p_ramrod->vport_id = p_cid->abs.vport_id;
|
||||
|
||||
p_ramrod->sb_id = cpu_to_le16(p_params->sb);
|
||||
p_ramrod->sb_index = p_params->sb_idx;
|
||||
p_ramrod->stats_counter_id = stats_id;
|
||||
p_ramrod->sb_id = cpu_to_le16(p_cid->abs.sb);
|
||||
p_ramrod->sb_index = p_cid->abs.sb_idx;
|
||||
p_ramrod->stats_counter_id = p_cid->abs.stats_id;
|
||||
|
||||
p_ramrod->queue_zone_id = cpu_to_le16(abs_tx_q_id);
|
||||
p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id);
|
||||
p_ramrod->same_as_last_id = cpu_to_le16(p_cid->abs.queue_id);
|
||||
|
||||
p_ramrod->pbl_size = cpu_to_le16(pbl_size);
|
||||
DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
|
||||
|
||||
pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, p_pq_params);
|
||||
p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
|
||||
|
||||
return qed_spq_post(p_hwfn, p_ent, NULL);
|
||||
}
|
||||
|
||||
static int
|
||||
qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
|
||||
u16 opaque_fid,
|
||||
struct qed_queue_start_common_params *p_params,
|
||||
qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn,
|
||||
struct qed_queue_cid *p_cid,
|
||||
u8 tc,
|
||||
dma_addr_t pbl_addr,
|
||||
u16 pbl_size, void __iomem **pp_doorbell)
|
||||
{
|
||||
struct qed_hw_cid_data *p_tx_cid;
|
||||
union qed_qm_pq_params pq_params;
|
||||
u8 abs_stats_id = 0;
|
||||
int rc;
|
||||
|
||||
if (IS_VF(p_hwfn->cdev)) {
|
||||
return qed_vf_pf_txq_start(p_hwfn,
|
||||
p_params->queue_id,
|
||||
p_params->sb,
|
||||
p_params->sb_idx,
|
||||
pbl_addr, pbl_size, pp_doorbell);
|
||||
}
|
||||
|
||||
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
|
||||
memset(p_tx_cid, 0, sizeof(*p_tx_cid));
|
||||
memset(&pq_params, 0, sizeof(pq_params));
|
||||
|
||||
/* Allocate a CID for the queue */
|
||||
rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_tx_cid->cid);
|
||||
if (rc) {
|
||||
DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
|
||||
rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
|
||||
pbl_addr, pbl_size,
|
||||
qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH,
|
||||
&pq_params));
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
p_tx_cid->b_cid_allocated = true;
|
||||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_SP,
|
||||
"opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
|
||||
opaque_fid, p_tx_cid->cid,
|
||||
p_params->queue_id, p_params->vport_id, p_params->sb);
|
||||
/* Provide the caller with the necessary return values */
|
||||
*pp_doorbell = p_hwfn->doorbells +
|
||||
qed_db_addr(p_cid->cid, DQ_DEMS_LEGACY);
|
||||
|
||||
rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
|
||||
opaque_fid,
|
||||
p_tx_cid->cid,
|
||||
p_params,
|
||||
abs_stats_id,
|
||||
pbl_addr,
|
||||
pbl_size,
|
||||
&pq_params);
|
||||
return 0;
|
||||
}
|
||||
|
||||
*pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
|
||||
qed_db_addr(p_tx_cid->cid, DQ_DEMS_LEGACY);
|
||||
static int
|
||||
qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
|
||||
u16 opaque_fid,
|
||||
struct qed_queue_start_common_params *p_params,
|
||||
u8 tc,
|
||||
dma_addr_t pbl_addr,
|
||||
u16 pbl_size,
|
||||
struct qed_txq_start_ret_params *p_ret_params)
|
||||
{
|
||||
struct qed_queue_cid *p_cid;
|
||||
int rc;
|
||||
|
||||
p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params);
|
||||
if (!p_cid)
|
||||
return -EINVAL;
|
||||
|
||||
if (IS_PF(p_hwfn->cdev))
|
||||
rc = qed_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
|
||||
pbl_addr, pbl_size,
|
||||
&p_ret_params->p_doorbell);
|
||||
else
|
||||
rc = qed_vf_pf_txq_start(p_hwfn, p_cid,
|
||||
pbl_addr, pbl_size,
|
||||
&p_ret_params->p_doorbell);
|
||||
|
||||
if (rc)
|
||||
qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
|
||||
qed_eth_queue_cid_release(p_hwfn, p_cid);
|
||||
else
|
||||
p_ret_params->p_handle = (void *)p_cid;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id)
|
||||
static int
|
||||
qed_eth_pf_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
|
||||
{
|
||||
struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
|
||||
struct qed_spq_entry *p_ent = NULL;
|
||||
struct qed_sp_init_data init_data;
|
||||
int rc = -EINVAL;
|
||||
int rc;
|
||||
|
||||
if (IS_VF(p_hwfn->cdev))
|
||||
return qed_vf_pf_txq_stop(p_hwfn, tx_queue_id);
|
||||
|
||||
/* Get SPQ entry */
|
||||
memset(&init_data, 0, sizeof(init_data));
|
||||
init_data.cid = p_tx_cid->cid;
|
||||
init_data.opaque_fid = p_tx_cid->opaque_fid;
|
||||
init_data.cid = p_cid->cid;
|
||||
init_data.opaque_fid = p_cid->opaque_fid;
|
||||
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
|
||||
|
||||
rc = qed_sp_init_request(p_hwfn, &p_ent,
|
||||
|
@ -892,11 +955,22 @@ int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id)
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = qed_spq_post(p_hwfn, p_ent, NULL);
|
||||
if (rc)
|
||||
return rc;
|
||||
return qed_spq_post(p_hwfn, p_ent, NULL);
|
||||
}
|
||||
|
||||
return qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
|
||||
int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_handle)
|
||||
{
|
||||
struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_handle;
|
||||
int rc;
|
||||
|
||||
if (IS_PF(p_hwfn->cdev))
|
||||
rc = qed_eth_pf_tx_queue_stop(p_hwfn, p_cid);
|
||||
else
|
||||
rc = qed_vf_pf_txq_stop(p_hwfn, p_cid);
|
||||
|
||||
if (!rc)
|
||||
qed_eth_queue_cid_release(p_hwfn, p_cid);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode)
|
||||
|
@ -1880,58 +1954,53 @@ static int qed_update_vport(struct qed_dev *cdev,
|
|||
}
|
||||
|
||||
static int qed_start_rxq(struct qed_dev *cdev,
|
||||
struct qed_queue_start_common_params *params,
|
||||
u8 rss_num,
|
||||
struct qed_queue_start_common_params *p_params,
|
||||
u16 bd_max_bytes,
|
||||
dma_addr_t bd_chain_phys_addr,
|
||||
dma_addr_t cqe_pbl_addr,
|
||||
u16 cqe_pbl_size,
|
||||
void __iomem **pp_prod)
|
||||
struct qed_rxq_start_ret_params *ret_params)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn;
|
||||
int rc, hwfn_index;
|
||||
|
||||
hwfn_index = params->rss_id % cdev->num_hwfns;
|
||||
hwfn_index = rss_num % cdev->num_hwfns;
|
||||
p_hwfn = &cdev->hwfns[hwfn_index];
|
||||
|
||||
/* Fix queue ID in 100g mode */
|
||||
params->queue_id /= cdev->num_hwfns;
|
||||
|
||||
rc = qed_sp_eth_rx_queue_start(p_hwfn,
|
||||
p_hwfn->hw_info.opaque_fid,
|
||||
params,
|
||||
bd_max_bytes,
|
||||
bd_chain_phys_addr,
|
||||
cqe_pbl_addr,
|
||||
cqe_pbl_size,
|
||||
pp_prod);
|
||||
p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
|
||||
p_params->stats_id = p_params->vport_id;
|
||||
|
||||
rc = qed_eth_rx_queue_start(p_hwfn,
|
||||
p_hwfn->hw_info.opaque_fid,
|
||||
p_params,
|
||||
bd_max_bytes,
|
||||
bd_chain_phys_addr,
|
||||
cqe_pbl_addr, cqe_pbl_size, ret_params);
|
||||
if (rc) {
|
||||
DP_ERR(cdev, "Failed to start RXQ#%d\n", params->queue_id);
|
||||
DP_ERR(cdev, "Failed to start RXQ#%d\n", p_params->queue_id);
|
||||
return rc;
|
||||
}
|
||||
|
||||
DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
|
||||
"Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n",
|
||||
params->queue_id, params->rss_id, params->vport_id,
|
||||
params->sb);
|
||||
"Started RX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
|
||||
p_params->queue_id, rss_num, p_params->vport_id,
|
||||
p_params->sb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qed_stop_rxq(struct qed_dev *cdev,
|
||||
struct qed_stop_rxq_params *params)
|
||||
static int qed_stop_rxq(struct qed_dev *cdev, u8 rss_id, void *handle)
|
||||
{
|
||||
int rc, hwfn_index;
|
||||
struct qed_hwfn *p_hwfn;
|
||||
|
||||
hwfn_index = params->rss_id % cdev->num_hwfns;
|
||||
p_hwfn = &cdev->hwfns[hwfn_index];
|
||||
hwfn_index = rss_id % cdev->num_hwfns;
|
||||
p_hwfn = &cdev->hwfns[hwfn_index];
|
||||
|
||||
rc = qed_sp_eth_rx_queue_stop(p_hwfn,
|
||||
params->rx_queue_id / cdev->num_hwfns,
|
||||
params->eq_completion_only, false);
|
||||
rc = qed_eth_rx_queue_stop(p_hwfn, handle, false, false);
|
||||
if (rc) {
|
||||
DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
|
||||
DP_ERR(cdev, "Failed to stop RXQ#%02x\n", rss_id);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -1939,26 +2008,24 @@ static int qed_stop_rxq(struct qed_dev *cdev,
|
|||
}
|
||||
|
||||
static int qed_start_txq(struct qed_dev *cdev,
|
||||
u8 rss_num,
|
||||
struct qed_queue_start_common_params *p_params,
|
||||
dma_addr_t pbl_addr,
|
||||
u16 pbl_size,
|
||||
void __iomem **pp_doorbell)
|
||||
struct qed_txq_start_ret_params *ret_params)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn;
|
||||
int rc, hwfn_index;
|
||||
|
||||
hwfn_index = p_params->rss_id % cdev->num_hwfns;
|
||||
p_hwfn = &cdev->hwfns[hwfn_index];
|
||||
hwfn_index = rss_num % cdev->num_hwfns;
|
||||
p_hwfn = &cdev->hwfns[hwfn_index];
|
||||
p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
|
||||
p_params->stats_id = p_params->vport_id;
|
||||
|
||||
/* Fix queue ID in 100g mode */
|
||||
p_params->queue_id /= cdev->num_hwfns;
|
||||
|
||||
rc = qed_sp_eth_tx_queue_start(p_hwfn,
|
||||
p_hwfn->hw_info.opaque_fid,
|
||||
p_params,
|
||||
pbl_addr,
|
||||
pbl_size,
|
||||
pp_doorbell);
|
||||
rc = qed_eth_tx_queue_start(p_hwfn,
|
||||
p_hwfn->hw_info.opaque_fid,
|
||||
p_params, 0,
|
||||
pbl_addr, pbl_size, ret_params);
|
||||
|
||||
if (rc) {
|
||||
DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
|
||||
|
@ -1966,8 +2033,8 @@ static int qed_start_txq(struct qed_dev *cdev,
|
|||
}
|
||||
|
||||
DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
|
||||
"Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n",
|
||||
p_params->queue_id, p_params->rss_id, p_params->vport_id,
|
||||
"Started TX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
|
||||
p_params->queue_id, rss_num, p_params->vport_id,
|
||||
p_params->sb);
|
||||
|
||||
return 0;
|
||||
|
@ -1981,19 +2048,17 @@ static int qed_fastpath_stop(struct qed_dev *cdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int qed_stop_txq(struct qed_dev *cdev,
|
||||
struct qed_stop_txq_params *params)
|
||||
static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn;
|
||||
int rc, hwfn_index;
|
||||
|
||||
hwfn_index = params->rss_id % cdev->num_hwfns;
|
||||
p_hwfn = &cdev->hwfns[hwfn_index];
|
||||
hwfn_index = rss_id % cdev->num_hwfns;
|
||||
p_hwfn = &cdev->hwfns[hwfn_index];
|
||||
|
||||
rc = qed_sp_eth_tx_queue_stop(p_hwfn,
|
||||
params->tx_queue_id / cdev->num_hwfns);
|
||||
rc = qed_eth_tx_queue_stop(p_hwfn, handle);
|
||||
if (rc) {
|
||||
DP_ERR(cdev, "Failed to stop TXQ#%d\n", params->tx_queue_id);
|
||||
DP_ERR(cdev, "Failed to stop TXQ#%02x\n", rss_id);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -78,11 +78,34 @@ struct qed_filter_mcast {
|
|||
unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
|
||||
};
|
||||
|
||||
int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
|
||||
u16 rx_queue_id,
|
||||
bool eq_completion_only, bool cqe_completion);
|
||||
/**
|
||||
* @brief qed_eth_rx_queue_stop - This ramrod closes an Rx queue
|
||||
*
|
||||
* @param p_hwfn
|
||||
* @param p_rxq Handler of queue to close
|
||||
* @param eq_completion_only If True completion will be on
|
||||
* EQe, if False completion will be
|
||||
* on EQe if p_hwfn opaque
|
||||
* different from the RXQ opaque
|
||||
* otherwise on CQe.
|
||||
* @param cqe_completion If True completion will be
|
||||
* receive on CQe.
|
||||
* @return int
|
||||
*/
|
||||
int
|
||||
qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
|
||||
void *p_rxq,
|
||||
bool eq_completion_only, bool cqe_completion);
|
||||
|
||||
int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id);
|
||||
/**
|
||||
* @brief qed_eth_tx_queue_stop - closes a Tx queue
|
||||
*
|
||||
* @param p_hwfn
|
||||
* @param p_txq - handle to Tx queue needed to be closed
|
||||
*
|
||||
* @return int
|
||||
*/
|
||||
int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_txq);
|
||||
|
||||
enum qed_tpa_mode {
|
||||
QED_TPA_MODE_NONE,
|
||||
|
@ -196,19 +219,19 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
|
|||
* @note At the moment - only used by non-linux VFs.
|
||||
*
|
||||
* @param p_hwfn
|
||||
* @param rx_queue_id RX Queue ID
|
||||
* @param num_rxqs Allow to update multiple rx
|
||||
* queues, from rx_queue_id to
|
||||
* (rx_queue_id + num_rxqs)
|
||||
* @param pp_rxq_handlers An array of queue handlers to be updated.
|
||||
* @param num_rxqs number of queues to update.
|
||||
* @param complete_cqe_flg Post completion to the CQE Ring if set
|
||||
* @param complete_event_flg Post completion to the Event Ring if set
|
||||
* @param comp_mode
|
||||
* @param p_comp_data
|
||||
*
|
||||
* @return int
|
||||
*/
|
||||
|
||||
int
|
||||
qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
|
||||
u16 rx_queue_id,
|
||||
void **pp_rxq_handlers,
|
||||
u8 num_rxqs,
|
||||
u8 complete_cqe_flg,
|
||||
u8 complete_event_flg,
|
||||
|
@ -217,27 +240,79 @@ qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
|
|||
|
||||
void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
|
||||
|
||||
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
|
||||
struct qed_sp_vport_start_params *p_params);
|
||||
void qed_reset_vport_stats(struct qed_dev *cdev);
|
||||
|
||||
int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
|
||||
u16 opaque_fid,
|
||||
u32 cid,
|
||||
struct qed_queue_start_common_params *params,
|
||||
u8 stats_id,
|
||||
u16 bd_max_bytes,
|
||||
dma_addr_t bd_chain_phys_addr,
|
||||
dma_addr_t cqe_pbl_addr,
|
||||
u16 cqe_pbl_size, bool b_use_zone_a_prod);
|
||||
struct qed_queue_cid {
|
||||
/* 'Relative' is a relative term ;-). Usually the indices [not counting
|
||||
* SBs] would be PF-relative, but there are some cases where that isn't
|
||||
* the case - specifically for a PF configuring its VF indices it's
|
||||
* possible some fields [E.g., stats-id] in 'rel' would already be abs.
|
||||
*/
|
||||
struct qed_queue_start_common_params rel;
|
||||
struct qed_queue_start_common_params abs;
|
||||
u32 cid;
|
||||
u16 opaque_fid;
|
||||
|
||||
int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
|
||||
u16 opaque_fid,
|
||||
u32 cid,
|
||||
struct qed_queue_start_common_params *p_params,
|
||||
u8 stats_id,
|
||||
dma_addr_t pbl_addr,
|
||||
u16 pbl_size,
|
||||
union qed_qm_pq_params *p_pq_params);
|
||||
/* VFs queues are mapped differently, so we need to know the
|
||||
* relative queue associated with them [0-based].
|
||||
* Notice this is relevant on the *PF* queue-cid of its VF's queues,
|
||||
* and not on the VF itself.
|
||||
*/
|
||||
bool is_vf;
|
||||
u8 vf_qid;
|
||||
|
||||
/* Legacy VFs might have Rx producer located elsewhere */
|
||||
bool b_legacy_vf;
|
||||
};
|
||||
|
||||
void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
|
||||
struct qed_queue_cid *p_cid);
|
||||
|
||||
struct qed_queue_cid *_qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
|
||||
u16 opaque_fid,
|
||||
u32 cid,
|
||||
u8 vf_qid,
|
||||
struct qed_queue_start_common_params
|
||||
*p_params);
|
||||
|
||||
int
|
||||
qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
|
||||
struct qed_sp_vport_start_params *p_params);
|
||||
|
||||
/**
|
||||
* @brief - Starts an Rx queue, when queue_cid is already prepared
|
||||
*
|
||||
* @param p_hwfn
|
||||
* @param p_cid
|
||||
* @param bd_max_bytes
|
||||
* @param bd_chain_phys_addr
|
||||
* @param cqe_pbl_addr
|
||||
* @param cqe_pbl_size
|
||||
*
|
||||
* @return int
|
||||
*/
|
||||
int
|
||||
qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
|
||||
struct qed_queue_cid *p_cid,
|
||||
u16 bd_max_bytes,
|
||||
dma_addr_t bd_chain_phys_addr,
|
||||
dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
|
||||
|
||||
/**
|
||||
* @brief - Starts a Tx queue, where queue_cid is already prepared
|
||||
*
|
||||
* @param p_hwfn
|
||||
* @param p_cid
|
||||
* @param pbl_addr
|
||||
* @param pbl_size
|
||||
* @param p_pq_params - parameters for choosing the PQ for this Tx queue
|
||||
*
|
||||
* @return int
|
||||
*/
|
||||
int
|
||||
qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
|
||||
struct qed_queue_cid *p_cid,
|
||||
dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id);
|
||||
|
||||
u8 qed_mcast_bin_from_mac(u8 *mac);
|
||||
|
||||
|
|
|
@ -347,11 +347,11 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
|
|||
|
||||
/* Place EQ address in RAMROD */
|
||||
DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
|
||||
p_hwfn->p_eq->chain.pbl.p_phys_table);
|
||||
p_hwfn->p_eq->chain.pbl_sp.p_phys_table);
|
||||
page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain);
|
||||
p_ramrod->event_ring_num_pages = page_cnt;
|
||||
DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
|
||||
p_hwfn->p_consq->chain.pbl.p_phys_table);
|
||||
p_hwfn->p_consq->chain.pbl_sp.p_phys_table);
|
||||
|
||||
qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
|
||||
|
||||
|
|
|
@ -808,37 +808,70 @@ static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
|
|||
|
||||
static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
u16 rel_vf_id, u16 num_rx_queues)
|
||||
struct qed_iov_vf_init_params *p_params)
|
||||
{
|
||||
u8 num_of_vf_avaiable_chains = 0;
|
||||
struct qed_vf_info *vf = NULL;
|
||||
u16 qid, num_irqs;
|
||||
int rc = 0;
|
||||
u32 cids;
|
||||
u8 i;
|
||||
|
||||
vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
|
||||
vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
|
||||
if (!vf) {
|
||||
DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (vf->b_init) {
|
||||
DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", rel_vf_id);
|
||||
DP_NOTICE(p_hwfn, "VF[%d] is already active.\n",
|
||||
p_params->rel_vf_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Perform sanity checking on the requested queue_id */
|
||||
for (i = 0; i < p_params->num_queues; i++) {
|
||||
u16 min_vf_qzone = FEAT_NUM(p_hwfn, QED_PF_L2_QUE);
|
||||
u16 max_vf_qzone = min_vf_qzone +
|
||||
FEAT_NUM(p_hwfn, QED_VF_L2_QUE) - 1;
|
||||
|
||||
qid = p_params->req_rx_queue[i];
|
||||
if (qid < min_vf_qzone || qid > max_vf_qzone) {
|
||||
DP_NOTICE(p_hwfn,
|
||||
"Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
|
||||
qid,
|
||||
p_params->rel_vf_id,
|
||||
min_vf_qzone, max_vf_qzone);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
qid = p_params->req_tx_queue[i];
|
||||
if (qid > max_vf_qzone) {
|
||||
DP_NOTICE(p_hwfn,
|
||||
"Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
|
||||
qid, p_params->rel_vf_id, max_vf_qzone);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* If client *really* wants, Tx qid can be shared with PF */
|
||||
if (qid < min_vf_qzone)
|
||||
DP_VERBOSE(p_hwfn,
|
||||
QED_MSG_IOV,
|
||||
"VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
|
||||
p_params->rel_vf_id, qid, i);
|
||||
}
|
||||
|
||||
/* Limit number of queues according to number of CIDs */
|
||||
qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
|
||||
DP_VERBOSE(p_hwfn,
|
||||
QED_MSG_IOV,
|
||||
"VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
|
||||
vf->relative_vf_id, num_rx_queues, (u16) cids);
|
||||
num_rx_queues = min_t(u16, num_rx_queues, ((u16) cids));
|
||||
vf->relative_vf_id, p_params->num_queues, (u16)cids);
|
||||
num_irqs = min_t(u16, p_params->num_queues, ((u16)cids));
|
||||
|
||||
num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
|
||||
p_ptt,
|
||||
vf,
|
||||
num_rx_queues);
|
||||
vf, num_irqs);
|
||||
if (!num_of_vf_avaiable_chains) {
|
||||
DP_ERR(p_hwfn, "no available igu sbs\n");
|
||||
return -ENOMEM;
|
||||
|
@ -849,25 +882,22 @@ static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
|
|||
vf->num_txqs = num_of_vf_avaiable_chains;
|
||||
|
||||
for (i = 0; i < vf->num_rxqs; i++) {
|
||||
u16 queue_id = qed_int_queue_id_from_sb_id(p_hwfn,
|
||||
vf->igu_sbs[i]);
|
||||
struct qed_vf_q_info *p_queue = &vf->vf_queues[i];
|
||||
|
||||
if (queue_id > RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
|
||||
DP_NOTICE(p_hwfn,
|
||||
"VF[%d] will require utilizing of out-of-bounds queues - %04x\n",
|
||||
vf->relative_vf_id, queue_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
p_queue->fw_rx_qid = p_params->req_rx_queue[i];
|
||||
p_queue->fw_tx_qid = p_params->req_tx_queue[i];
|
||||
|
||||
/* CIDs are per-VF, so no problem having them 0-based. */
|
||||
vf->vf_queues[i].fw_rx_qid = queue_id;
|
||||
vf->vf_queues[i].fw_tx_qid = queue_id;
|
||||
vf->vf_queues[i].fw_cid = i;
|
||||
p_queue->fw_cid = i;
|
||||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
|
||||
"VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
|
||||
vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
|
||||
"VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x] CID %04x\n",
|
||||
vf->relative_vf_id,
|
||||
i, vf->igu_sbs[i],
|
||||
p_queue->fw_rx_qid,
|
||||
p_queue->fw_tx_qid, p_queue->fw_cid);
|
||||
}
|
||||
|
||||
rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
|
||||
if (!rc) {
|
||||
vf->b_init = true;
|
||||
|
@ -1187,8 +1217,19 @@ static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
|
|||
|
||||
p_vf->num_active_rxqs = 0;
|
||||
|
||||
for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++)
|
||||
p_vf->vf_queues[i].rxq_active = 0;
|
||||
for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
|
||||
struct qed_vf_q_info *p_queue = &p_vf->vf_queues[i];
|
||||
|
||||
if (p_queue->p_rx_cid) {
|
||||
qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid);
|
||||
p_queue->p_rx_cid = NULL;
|
||||
}
|
||||
|
||||
if (p_queue->p_tx_cid) {
|
||||
qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid);
|
||||
p_queue->p_tx_cid = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
|
||||
memset(&p_vf->acquire, 0, sizeof(p_vf->acquire));
|
||||
|
@ -1594,21 +1635,21 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
|
|||
|
||||
/* Update all the Rx queues */
|
||||
for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
|
||||
u16 qid;
|
||||
struct qed_queue_cid *p_cid;
|
||||
|
||||
if (!p_vf->vf_queues[i].rxq_active)
|
||||
p_cid = p_vf->vf_queues[i].p_rx_cid;
|
||||
if (!p_cid)
|
||||
continue;
|
||||
|
||||
qid = p_vf->vf_queues[i].fw_rx_qid;
|
||||
|
||||
rc = qed_sp_eth_rx_queues_update(p_hwfn, qid,
|
||||
rc = qed_sp_eth_rx_queues_update(p_hwfn,
|
||||
(void **)&p_cid,
|
||||
1, 0, 1,
|
||||
QED_SPQ_MODE_EBLOCK,
|
||||
NULL);
|
||||
if (rc) {
|
||||
DP_NOTICE(p_hwfn,
|
||||
"Failed to send Rx update fo queue[0x%04x]\n",
|
||||
qid);
|
||||
p_cid->rel.queue_id);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
@ -1782,23 +1823,34 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
|
|||
struct qed_queue_start_common_params params;
|
||||
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
|
||||
u8 status = PFVF_STATUS_NO_RESOURCE;
|
||||
struct qed_vf_q_info *p_queue;
|
||||
struct vfpf_start_rxq_tlv *req;
|
||||
bool b_legacy_vf = false;
|
||||
int rc;
|
||||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
req = &mbx->req_virt->start_rxq;
|
||||
|
||||
if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
|
||||
!qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
|
||||
goto out;
|
||||
|
||||
params.queue_id = vf->vf_queues[req->rx_qid].fw_rx_qid;
|
||||
params.vf_qid = req->rx_qid;
|
||||
/* Acquire a new queue-cid */
|
||||
p_queue = &vf->vf_queues[req->rx_qid];
|
||||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
params.queue_id = p_queue->fw_rx_qid;
|
||||
params.vport_id = vf->vport_id;
|
||||
params.stats_id = vf->abs_vf_id + 0x10;
|
||||
params.sb = req->hw_sb;
|
||||
params.sb_idx = req->sb_index;
|
||||
|
||||
p_queue->p_rx_cid = _qed_eth_queue_to_cid(p_hwfn,
|
||||
vf->opaque_fid,
|
||||
p_queue->fw_cid,
|
||||
req->rx_qid, ¶ms);
|
||||
if (!p_queue->p_rx_cid)
|
||||
goto out;
|
||||
|
||||
/* Legacy VFs have their Producers in a different location, which they
|
||||
* calculate on their own and clean the producer prior to this.
|
||||
*/
|
||||
|
@ -1811,21 +1863,19 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
|
|||
MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
|
||||
0);
|
||||
}
|
||||
p_queue->p_rx_cid->b_legacy_vf = b_legacy_vf;
|
||||
|
||||
rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
|
||||
vf->vf_queues[req->rx_qid].fw_cid,
|
||||
¶ms,
|
||||
vf->abs_vf_id + 0x10,
|
||||
req->bd_max_bytes,
|
||||
req->rxq_addr,
|
||||
req->cqe_pbl_addr, req->cqe_pbl_size,
|
||||
b_legacy_vf);
|
||||
|
||||
rc = qed_eth_rxq_start_ramrod(p_hwfn,
|
||||
p_queue->p_rx_cid,
|
||||
req->bd_max_bytes,
|
||||
req->rxq_addr,
|
||||
req->cqe_pbl_addr, req->cqe_pbl_size);
|
||||
if (rc) {
|
||||
status = PFVF_STATUS_FAILURE;
|
||||
qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid);
|
||||
p_queue->p_rx_cid = NULL;
|
||||
} else {
|
||||
status = PFVF_STATUS_SUCCESS;
|
||||
vf->vf_queues[req->rx_qid].rxq_active = true;
|
||||
vf->num_active_rxqs++;
|
||||
}
|
||||
|
||||
|
@ -1882,7 +1932,9 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
|
|||
u8 status = PFVF_STATUS_NO_RESOURCE;
|
||||
union qed_qm_pq_params pq_params;
|
||||
struct vfpf_start_txq_tlv *req;
|
||||
struct qed_vf_q_info *p_queue;
|
||||
int rc;
|
||||
u16 pq;
|
||||
|
||||
/* Prepare the parameters which would choose the right PQ */
|
||||
memset(&pq_params, 0, sizeof(pq_params));
|
||||
|
@ -1896,24 +1948,31 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
|
|||
!qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
|
||||
goto out;
|
||||
|
||||
params.queue_id = vf->vf_queues[req->tx_qid].fw_tx_qid;
|
||||
/* Acquire a new queue-cid */
|
||||
p_queue = &vf->vf_queues[req->tx_qid];
|
||||
|
||||
params.queue_id = p_queue->fw_tx_qid;
|
||||
params.vport_id = vf->vport_id;
|
||||
params.stats_id = vf->abs_vf_id + 0x10;
|
||||
params.sb = req->hw_sb;
|
||||
params.sb_idx = req->sb_index;
|
||||
|
||||
rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
|
||||
vf->opaque_fid,
|
||||
vf->vf_queues[req->tx_qid].fw_cid,
|
||||
¶ms,
|
||||
vf->abs_vf_id + 0x10,
|
||||
req->pbl_addr,
|
||||
req->pbl_size, &pq_params);
|
||||
p_queue->p_tx_cid = _qed_eth_queue_to_cid(p_hwfn,
|
||||
vf->opaque_fid,
|
||||
p_queue->fw_cid,
|
||||
req->tx_qid, ¶ms);
|
||||
if (!p_queue->p_tx_cid)
|
||||
goto out;
|
||||
|
||||
pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, &pq_params);
|
||||
rc = qed_eth_txq_start_ramrod(p_hwfn, p_queue->p_tx_cid,
|
||||
req->pbl_addr, req->pbl_size, pq);
|
||||
if (rc) {
|
||||
status = PFVF_STATUS_FAILURE;
|
||||
qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid);
|
||||
p_queue->p_tx_cid = NULL;
|
||||
} else {
|
||||
status = PFVF_STATUS_SUCCESS;
|
||||
vf->vf_queues[req->tx_qid].txq_active = true;
|
||||
}
|
||||
|
||||
out:
|
||||
|
@ -1924,6 +1983,7 @@ static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
|
|||
struct qed_vf_info *vf,
|
||||
u16 rxq_id, u8 num_rxqs, bool cqe_completion)
|
||||
{
|
||||
struct qed_vf_q_info *p_queue;
|
||||
int rc = 0;
|
||||
int qid;
|
||||
|
||||
|
@ -1931,16 +1991,18 @@ static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
|
|||
return -EINVAL;
|
||||
|
||||
for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
|
||||
if (vf->vf_queues[qid].rxq_active) {
|
||||
rc = qed_sp_eth_rx_queue_stop(p_hwfn,
|
||||
vf->vf_queues[qid].
|
||||
fw_rx_qid, false,
|
||||
cqe_completion);
|
||||
p_queue = &vf->vf_queues[qid];
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
vf->vf_queues[qid].rxq_active = false;
|
||||
if (!p_queue->p_rx_cid)
|
||||
continue;
|
||||
|
||||
rc = qed_eth_rx_queue_stop(p_hwfn,
|
||||
p_queue->p_rx_cid,
|
||||
false, cqe_completion);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
vf->vf_queues[qid].p_rx_cid = NULL;
|
||||
vf->num_active_rxqs--;
|
||||
}
|
||||
|
||||
|
@ -1951,22 +2013,24 @@ static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
|
|||
struct qed_vf_info *vf, u16 txq_id, u8 num_txqs)
|
||||
{
|
||||
int rc = 0;
|
||||
struct qed_vf_q_info *p_queue;
|
||||
int qid;
|
||||
|
||||
if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues))
|
||||
return -EINVAL;
|
||||
|
||||
for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
|
||||
if (vf->vf_queues[qid].txq_active) {
|
||||
rc = qed_sp_eth_tx_queue_stop(p_hwfn,
|
||||
vf->vf_queues[qid].
|
||||
fw_tx_qid);
|
||||
p_queue = &vf->vf_queues[qid];
|
||||
if (!p_queue->p_tx_cid)
|
||||
continue;
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
vf->vf_queues[qid].txq_active = false;
|
||||
rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->p_tx_cid);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
p_queue->p_tx_cid = NULL;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -2021,10 +2085,11 @@ static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
|
|||
struct qed_ptt *p_ptt,
|
||||
struct qed_vf_info *vf)
|
||||
{
|
||||
struct qed_queue_cid *handlers[QED_MAX_VF_CHAINS_PER_PF];
|
||||
u16 length = sizeof(struct pfvf_def_resp_tlv);
|
||||
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
|
||||
struct vfpf_update_rxq_tlv *req;
|
||||
u8 status = PFVF_STATUS_SUCCESS;
|
||||
u8 status = PFVF_STATUS_FAILURE;
|
||||
u8 complete_event_flg;
|
||||
u8 complete_cqe_flg;
|
||||
u16 qid;
|
||||
|
@ -2035,29 +2100,36 @@ static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
|
|||
complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
|
||||
complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
|
||||
|
||||
for (i = 0; i < req->num_rxqs; i++) {
|
||||
qid = req->rx_qid + i;
|
||||
|
||||
if (!vf->vf_queues[qid].rxq_active) {
|
||||
DP_NOTICE(p_hwfn, "VF rx_qid = %d isn`t active!\n",
|
||||
qid);
|
||||
status = PFVF_STATUS_FAILURE;
|
||||
break;
|
||||
}
|
||||
|
||||
rc = qed_sp_eth_rx_queues_update(p_hwfn,
|
||||
vf->vf_queues[qid].fw_rx_qid,
|
||||
1,
|
||||
complete_cqe_flg,
|
||||
complete_event_flg,
|
||||
QED_SPQ_MODE_EBLOCK, NULL);
|
||||
|
||||
if (rc) {
|
||||
status = PFVF_STATUS_FAILURE;
|
||||
break;
|
||||
}
|
||||
/* Validate inputs */
|
||||
if (req->num_rxqs + req->rx_qid > QED_MAX_VF_CHAINS_PER_PF ||
|
||||
!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid)) {
|
||||
DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
|
||||
vf->relative_vf_id, req->rx_qid, req->num_rxqs);
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < req->num_rxqs; i++) {
|
||||
qid = req->rx_qid + i;
|
||||
if (!vf->vf_queues[qid].p_rx_cid) {
|
||||
DP_INFO(p_hwfn,
|
||||
"VF[%d] rx_qid = %d isn`t active!\n",
|
||||
vf->relative_vf_id, qid);
|
||||
goto out;
|
||||
}
|
||||
|
||||
handlers[i] = vf->vf_queues[qid].p_rx_cid;
|
||||
}
|
||||
|
||||
rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
|
||||
req->num_rxqs,
|
||||
complete_cqe_flg,
|
||||
complete_event_flg,
|
||||
QED_SPQ_MODE_EBLOCK, NULL);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
status = PFVF_STATUS_SUCCESS;
|
||||
out:
|
||||
qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
|
||||
length, status);
|
||||
}
|
||||
|
@ -2268,7 +2340,7 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
|
|||
DP_NOTICE(p_hwfn,
|
||||
"rss_ind_table[%d] = %d, rxq is out of range\n",
|
||||
i, q_idx);
|
||||
else if (!vf->vf_queues[q_idx].rxq_active)
|
||||
else if (!vf->vf_queues[q_idx].p_rx_cid)
|
||||
DP_NOTICE(p_hwfn,
|
||||
"rss_ind_table[%d] = %d, rxq is not active\n",
|
||||
i, q_idx);
|
||||
|
@ -3468,8 +3540,28 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn,
|
||||
u16 vfid,
|
||||
struct qed_iov_vf_init_params *params)
|
||||
{
|
||||
u16 base, i;
|
||||
|
||||
/* Since we have an equal resource distribution per-VF, and we assume
|
||||
* PF has acquired the QED_PF_L2_QUE first queues, we start setting
|
||||
* sequentially from there.
|
||||
*/
|
||||
base = FEAT_NUM(hwfn, QED_PF_L2_QUE) + vfid * params->num_queues;
|
||||
|
||||
params->rel_vf_id = vfid;
|
||||
for (i = 0; i < params->num_queues; i++) {
|
||||
params->req_rx_queue[i] = base + i;
|
||||
params->req_tx_queue[i] = base + i;
|
||||
}
|
||||
}
|
||||
|
||||
static int qed_sriov_enable(struct qed_dev *cdev, int num)
|
||||
{
|
||||
struct qed_iov_vf_init_params params;
|
||||
int i, j, rc;
|
||||
|
||||
if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
|
||||
|
@ -3478,15 +3570,17 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
|
||||
/* Initialize HW for VF access */
|
||||
for_each_hwfn(cdev, j) {
|
||||
struct qed_hwfn *hwfn = &cdev->hwfns[j];
|
||||
struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
|
||||
int num_queues;
|
||||
|
||||
/* Make sure not to use more than 16 queues per VF */
|
||||
num_queues = min_t(int,
|
||||
FEAT_NUM(hwfn, QED_VF_L2_QUE) / num, 16);
|
||||
params.num_queues = min_t(int,
|
||||
FEAT_NUM(hwfn, QED_VF_L2_QUE) / num,
|
||||
16);
|
||||
|
||||
if (!ptt) {
|
||||
DP_ERR(hwfn, "Failed to acquire ptt\n");
|
||||
|
@ -3498,7 +3592,8 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
|
|||
if (!qed_iov_is_valid_vfid(hwfn, i, false, true))
|
||||
continue;
|
||||
|
||||
rc = qed_iov_init_hw_for_vf(hwfn, ptt, i, num_queues);
|
||||
qed_sriov_enable_qid_config(hwfn, i, ¶ms);
|
||||
rc = qed_iov_init_hw_for_vf(hwfn, ptt, ¶ms);
|
||||
if (rc) {
|
||||
DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
|
||||
qed_ptt_release(hwfn, ptt);
|
||||
|
|
|
@ -58,6 +58,23 @@ struct qed_public_vf_info {
|
|||
int tx_rate;
|
||||
};
|
||||
|
||||
struct qed_iov_vf_init_params {
|
||||
u16 rel_vf_id;
|
||||
|
||||
/* Number of requested Queues; Currently, don't support different
|
||||
* number of Rx/Tx queues.
|
||||
*/
|
||||
|
||||
u16 num_queues;
|
||||
|
||||
/* Allow the client to choose which qzones to use for Rx/Tx,
|
||||
* and which queue_base to use for Tx queues on a per-queue basis.
|
||||
* Notice values should be relative to the PF resources.
|
||||
*/
|
||||
u16 req_rx_queue[QED_MAX_VF_CHAINS_PER_PF];
|
||||
u16 req_tx_queue[QED_MAX_VF_CHAINS_PER_PF];
|
||||
};
|
||||
|
||||
/* This struct is part of qed_dev and contains data relevant to all hwfns;
|
||||
* Initialized only if SR-IOV cpabability is exposed in PCIe config space.
|
||||
*/
|
||||
|
@ -99,10 +116,10 @@ struct qed_iov_vf_mbx {
|
|||
|
||||
struct qed_vf_q_info {
|
||||
u16 fw_rx_qid;
|
||||
struct qed_queue_cid *p_rx_cid;
|
||||
u16 fw_tx_qid;
|
||||
struct qed_queue_cid *p_tx_cid;
|
||||
u8 fw_cid;
|
||||
u8 rxq_active;
|
||||
u8 txq_active;
|
||||
};
|
||||
|
||||
enum vf_state {
|
||||
|
|
|
@ -388,18 +388,18 @@ free_p_iov:
|
|||
#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
|
||||
(TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
|
||||
|
||||
int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
|
||||
u8 rx_qid,
|
||||
u16 sb,
|
||||
u8 sb_index,
|
||||
u16 bd_max_bytes,
|
||||
dma_addr_t bd_chain_phys_addr,
|
||||
dma_addr_t cqe_pbl_addr,
|
||||
u16 cqe_pbl_size, void __iomem **pp_prod)
|
||||
int
|
||||
qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
|
||||
struct qed_queue_cid *p_cid,
|
||||
u16 bd_max_bytes,
|
||||
dma_addr_t bd_chain_phys_addr,
|
||||
dma_addr_t cqe_pbl_addr,
|
||||
u16 cqe_pbl_size, void __iomem **pp_prod)
|
||||
{
|
||||
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
|
||||
struct pfvf_start_queue_resp_tlv *resp;
|
||||
struct vfpf_start_rxq_tlv *req;
|
||||
u8 rx_qid = p_cid->rel.queue_id;
|
||||
int rc;
|
||||
|
||||
/* clear mailbox and prep first tlv */
|
||||
|
@ -409,21 +409,22 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
|
|||
req->cqe_pbl_addr = cqe_pbl_addr;
|
||||
req->cqe_pbl_size = cqe_pbl_size;
|
||||
req->rxq_addr = bd_chain_phys_addr;
|
||||
req->hw_sb = sb;
|
||||
req->sb_index = sb_index;
|
||||
req->hw_sb = p_cid->rel.sb;
|
||||
req->sb_index = p_cid->rel.sb_idx;
|
||||
req->bd_max_bytes = bd_max_bytes;
|
||||
req->stat_id = -1;
|
||||
|
||||
/* If PF is legacy, we'll need to calculate producers ourselves
|
||||
* as well as clean them.
|
||||
*/
|
||||
if (pp_prod && p_iov->b_pre_fp_hsi) {
|
||||
if (p_iov->b_pre_fp_hsi) {
|
||||
u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
|
||||
u32 init_prod_val = 0;
|
||||
|
||||
*pp_prod = (u8 __iomem *)p_hwfn->regview +
|
||||
MSTORM_QZONE_START(p_hwfn->cdev) +
|
||||
hw_qid * MSTORM_QZONE_SIZE;
|
||||
*pp_prod = (u8 __iomem *)
|
||||
p_hwfn->regview +
|
||||
MSTORM_QZONE_START(p_hwfn->cdev) +
|
||||
hw_qid * MSTORM_QZONE_SIZE;
|
||||
|
||||
/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
|
||||
__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
|
||||
|
@ -444,7 +445,7 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
|
|||
}
|
||||
|
||||
/* Learn the address of the producer from the response */
|
||||
if (pp_prod && !p_iov->b_pre_fp_hsi) {
|
||||
if (!p_iov->b_pre_fp_hsi) {
|
||||
u32 init_prod_val = 0;
|
||||
|
||||
*pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset;
|
||||
|
@ -462,7 +463,8 @@ exit:
|
|||
return rc;
|
||||
}
|
||||
|
||||
int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion)
|
||||
int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
|
||||
struct qed_queue_cid *p_cid, bool cqe_completion)
|
||||
{
|
||||
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
|
||||
struct vfpf_stop_rxqs_tlv *req;
|
||||
|
@ -472,7 +474,7 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion)
|
|||
/* clear mailbox and prep first tlv */
|
||||
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
|
||||
|
||||
req->rx_qid = rx_qid;
|
||||
req->rx_qid = p_cid->rel.queue_id;
|
||||
req->num_rxqs = 1;
|
||||
req->cqe_completion = cqe_completion;
|
||||
|
||||
|
@ -496,28 +498,28 @@ exit:
|
|||
return rc;
|
||||
}
|
||||
|
||||
int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
|
||||
u16 tx_queue_id,
|
||||
u16 sb,
|
||||
u8 sb_index,
|
||||
dma_addr_t pbl_addr,
|
||||
u16 pbl_size, void __iomem **pp_doorbell)
|
||||
int
|
||||
qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
|
||||
struct qed_queue_cid *p_cid,
|
||||
dma_addr_t pbl_addr,
|
||||
u16 pbl_size, void __iomem **pp_doorbell)
|
||||
{
|
||||
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
|
||||
struct pfvf_start_queue_resp_tlv *resp;
|
||||
struct vfpf_start_txq_tlv *req;
|
||||
u16 qid = p_cid->rel.queue_id;
|
||||
int rc;
|
||||
|
||||
/* clear mailbox and prep first tlv */
|
||||
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
|
||||
|
||||
req->tx_qid = tx_queue_id;
|
||||
req->tx_qid = qid;
|
||||
|
||||
/* Tx */
|
||||
req->pbl_addr = pbl_addr;
|
||||
req->pbl_size = pbl_size;
|
||||
req->hw_sb = sb;
|
||||
req->sb_index = sb_index;
|
||||
req->hw_sb = p_cid->rel.sb;
|
||||
req->sb_index = p_cid->rel.sb_idx;
|
||||
|
||||
/* add list termination tlv */
|
||||
qed_add_tlv(p_hwfn, &p_iov->offset,
|
||||
|
@ -533,33 +535,29 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
|
|||
goto exit;
|
||||
}
|
||||
|
||||
if (pp_doorbell) {
|
||||
/* Modern PFs provide the actual offsets, while legacy
|
||||
* provided only the queue id.
|
||||
*/
|
||||
if (!p_iov->b_pre_fp_hsi) {
|
||||
*pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
|
||||
resp->offset;
|
||||
} else {
|
||||
u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
|
||||
u32 db_addr;
|
||||
/* Modern PFs provide the actual offsets, while legacy
|
||||
* provided only the queue id.
|
||||
*/
|
||||
if (!p_iov->b_pre_fp_hsi) {
|
||||
*pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset;
|
||||
} else {
|
||||
u8 cid = p_iov->acquire_resp.resc.cid[qid];
|
||||
|
||||
db_addr = qed_db_addr_vf(cid, DQ_DEMS_LEGACY);
|
||||
*pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
|
||||
db_addr;
|
||||
}
|
||||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
|
||||
"Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
|
||||
tx_queue_id, *pp_doorbell, resp->offset);
|
||||
*pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
|
||||
qed_db_addr_vf(cid,
|
||||
DQ_DEMS_LEGACY);
|
||||
}
|
||||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
|
||||
"Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
|
||||
qid, *pp_doorbell, resp->offset);
|
||||
exit:
|
||||
qed_vf_pf_req_end(p_hwfn, rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
|
||||
int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
|
||||
{
|
||||
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
|
||||
struct vfpf_stop_txqs_tlv *req;
|
||||
|
@ -569,7 +567,7 @@ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
|
|||
/* clear mailbox and prep first tlv */
|
||||
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
|
||||
|
||||
req->tx_qid = tx_qid;
|
||||
req->tx_qid = p_cid->rel.queue_id;
|
||||
req->num_txqs = 1;
|
||||
|
||||
/* add list termination tlv */
|
||||
|
|
|
@ -666,10 +666,7 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
|
|||
/**
|
||||
* @brief VF - start the RX Queue by sending a message to the PF
|
||||
* @param p_hwfn
|
||||
* @param cid - zero based within the VF
|
||||
* @param rx_queue_id - zero based within the VF
|
||||
* @param sb - VF status block for this queue
|
||||
* @param sb_index - Index within the status block
|
||||
* @param p_cid - Only relative fields are relevant
|
||||
* @param bd_max_bytes - maximum number of bytes per bd
|
||||
* @param bd_chain_phys_addr - physical address of bd chain
|
||||
* @param cqe_pbl_addr - physical address of pbl
|
||||
|
@ -680,9 +677,7 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
|
|||
* @return int
|
||||
*/
|
||||
int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
|
||||
u8 rx_queue_id,
|
||||
u16 sb,
|
||||
u8 sb_index,
|
||||
struct qed_queue_cid *p_cid,
|
||||
u16 bd_max_bytes,
|
||||
dma_addr_t bd_chain_phys_addr,
|
||||
dma_addr_t cqe_pbl_addr,
|
||||
|
@ -702,24 +697,23 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
|
|||
*
|
||||
* @return int
|
||||
*/
|
||||
int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
|
||||
u16 tx_queue_id,
|
||||
u16 sb,
|
||||
u8 sb_index,
|
||||
dma_addr_t pbl_addr,
|
||||
u16 pbl_size, void __iomem **pp_doorbell);
|
||||
int
|
||||
qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
|
||||
struct qed_queue_cid *p_cid,
|
||||
dma_addr_t pbl_addr,
|
||||
u16 pbl_size, void __iomem **pp_doorbell);
|
||||
|
||||
/**
|
||||
* @brief VF - stop the RX queue by sending a message to the PF
|
||||
*
|
||||
* @param p_hwfn
|
||||
* @param rx_qid
|
||||
* @param p_cid
|
||||
* @param cqe_completion
|
||||
*
|
||||
* @return int
|
||||
*/
|
||||
int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
|
||||
u16 rx_qid, bool cqe_completion);
|
||||
struct qed_queue_cid *p_cid, bool cqe_completion);
|
||||
|
||||
/**
|
||||
* @brief VF - stop the TX queue by sending a message to the PF
|
||||
|
@ -729,7 +723,7 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
|
|||
*
|
||||
* @return int
|
||||
*/
|
||||
int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid);
|
||||
int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid);
|
||||
|
||||
/**
|
||||
* @brief VF - send a vport update command
|
||||
|
@ -902,9 +896,7 @@ static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
|
|||
}
|
||||
|
||||
static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
|
||||
u8 rx_queue_id,
|
||||
u16 sb,
|
||||
u8 sb_index,
|
||||
struct qed_queue_cid *p_cid,
|
||||
u16 bd_max_bytes,
|
||||
dma_addr_t bd_chain_phys_adr,
|
||||
dma_addr_t cqe_pbl_addr,
|
||||
|
@ -914,9 +906,7 @@ static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
|
|||
}
|
||||
|
||||
static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
|
||||
u16 tx_queue_id,
|
||||
u16 sb,
|
||||
u8 sb_index,
|
||||
struct qed_queue_cid *p_cid,
|
||||
dma_addr_t pbl_addr,
|
||||
u16 pbl_size, void __iomem **pp_doorbell)
|
||||
{
|
||||
|
@ -924,12 +914,14 @@ static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
|
|||
}
|
||||
|
||||
static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
|
||||
u16 rx_qid, bool cqe_completion)
|
||||
struct qed_queue_cid *p_cid,
|
||||
bool cqe_completion)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
|
||||
static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn,
|
||||
struct qed_queue_cid *p_cid)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/bitmap.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/qed/common_hsi.h>
|
||||
#include <linux/qed/eth_common.h>
|
||||
|
@ -127,10 +128,9 @@ struct qede_dev {
|
|||
|
||||
const struct qed_eth_ops *ops;
|
||||
|
||||
struct qed_dev_eth_info dev_info;
|
||||
struct qed_dev_eth_info dev_info;
|
||||
#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
|
||||
#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues * \
|
||||
(edev)->dev_info.num_tc)
|
||||
#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues)
|
||||
|
||||
struct qede_fastpath *fp_array;
|
||||
u8 req_num_tx;
|
||||
|
@ -139,17 +139,9 @@ struct qede_dev {
|
|||
u8 fp_num_rx;
|
||||
u16 req_queues;
|
||||
u16 num_queues;
|
||||
u8 num_tc;
|
||||
#define QEDE_QUEUE_CNT(edev) ((edev)->num_queues)
|
||||
#define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx)
|
||||
#define QEDE_TSS_COUNT(edev) (((edev)->num_queues - (edev)->fp_num_rx) * \
|
||||
(edev)->num_tc)
|
||||
#define QEDE_TX_IDX(edev, txqidx) ((edev)->fp_num_rx + (txqidx) % \
|
||||
QEDE_TSS_COUNT(edev))
|
||||
#define QEDE_TC_IDX(edev, txqidx) ((txqidx) / QEDE_TSS_COUNT(edev))
|
||||
#define QEDE_TX_QUEUE(edev, txqidx) \
|
||||
(&(edev)->fp_array[QEDE_TX_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX(\
|
||||
(edev), (txqidx))])
|
||||
#define QEDE_TSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_rx)
|
||||
|
||||
struct qed_int_info int_info;
|
||||
unsigned char primary_mac[ETH_ALEN];
|
||||
|
@ -196,6 +188,8 @@ struct qede_dev {
|
|||
bool wol_enabled;
|
||||
|
||||
struct qede_rdma_dev rdma_info;
|
||||
|
||||
struct bpf_prog *xdp_prog;
|
||||
};
|
||||
|
||||
enum QEDE_STATE {
|
||||
|
@ -225,39 +219,67 @@ enum qede_agg_state {
|
|||
};
|
||||
|
||||
struct qede_agg_info {
|
||||
struct sw_rx_data replace_buf;
|
||||
dma_addr_t replace_buf_mapping;
|
||||
struct sw_rx_data start_buf;
|
||||
dma_addr_t start_buf_mapping;
|
||||
struct eth_fast_path_rx_tpa_start_cqe start_cqe;
|
||||
enum qede_agg_state agg_state;
|
||||
/* rx_buf is a data buffer that can be placed / consumed from rx bd
|
||||
* chain. It has two purposes: We will preallocate the data buffer
|
||||
* for each aggregation when we open the interface and will place this
|
||||
* buffer on the rx-bd-ring when we receive TPA_START. We don't want
|
||||
* to be in a state where allocation fails, as we can't reuse the
|
||||
* consumer buffer in the rx-chain since FW may still be writing to it
|
||||
* (since header needs to be modified for TPA).
|
||||
* The second purpose is to keep a pointer to the bd buffer during
|
||||
* aggregation.
|
||||
*/
|
||||
struct sw_rx_data buffer;
|
||||
dma_addr_t buffer_mapping;
|
||||
|
||||
struct sk_buff *skb;
|
||||
int frag_id;
|
||||
|
||||
/* We need some structs from the start cookie until termination */
|
||||
u16 vlan_tag;
|
||||
u16 start_cqe_bd_len;
|
||||
u8 start_cqe_placement_offset;
|
||||
|
||||
u8 state;
|
||||
u8 frag_id;
|
||||
|
||||
u8 tunnel_type;
|
||||
};
|
||||
|
||||
struct qede_rx_queue {
|
||||
__le16 *hw_cons_ptr;
|
||||
struct sw_rx_data *sw_rx_ring;
|
||||
u16 sw_rx_cons;
|
||||
u16 sw_rx_prod;
|
||||
struct qed_chain rx_bd_ring;
|
||||
struct qed_chain rx_comp_ring;
|
||||
void __iomem *hw_rxq_prod_addr;
|
||||
__le16 *hw_cons_ptr;
|
||||
void __iomem *hw_rxq_prod_addr;
|
||||
|
||||
/* Required for the allocation of replacement buffers */
|
||||
struct device *dev;
|
||||
|
||||
struct bpf_prog *xdp_prog;
|
||||
|
||||
u16 sw_rx_cons;
|
||||
u16 sw_rx_prod;
|
||||
|
||||
u16 num_rx_buffers; /* Slowpath */
|
||||
u8 data_direction;
|
||||
u8 rxq_id;
|
||||
|
||||
u32 rx_buf_size;
|
||||
u32 rx_buf_seg_size;
|
||||
|
||||
u64 rcv_pkts;
|
||||
|
||||
struct sw_rx_data *sw_rx_ring;
|
||||
struct qed_chain rx_bd_ring;
|
||||
struct qed_chain rx_comp_ring ____cacheline_aligned;
|
||||
|
||||
/* GRO */
|
||||
struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
|
||||
struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
|
||||
|
||||
int rx_buf_size;
|
||||
unsigned int rx_buf_seg_size;
|
||||
u64 rx_hw_errors;
|
||||
u64 rx_alloc_errors;
|
||||
u64 rx_ip_frags;
|
||||
|
||||
u16 num_rx_buffers;
|
||||
u16 rxq_id;
|
||||
u64 xdp_no_pass;
|
||||
|
||||
u64 rcv_pkts;
|
||||
u64 rx_hw_errors;
|
||||
u64 rx_alloc_errors;
|
||||
u64 rx_ip_frags;
|
||||
void *handle;
|
||||
};
|
||||
|
||||
union db_prod {
|
||||
|
@ -273,20 +295,39 @@ struct sw_tx_bd {
|
|||
};
|
||||
|
||||
struct qede_tx_queue {
|
||||
int index; /* Queue index */
|
||||
__le16 *hw_cons_ptr;
|
||||
struct sw_tx_bd *sw_tx_ring;
|
||||
u16 sw_tx_cons;
|
||||
u16 sw_tx_prod;
|
||||
struct qed_chain tx_pbl;
|
||||
void __iomem *doorbell_addr;
|
||||
union db_prod tx_db;
|
||||
u8 is_xdp;
|
||||
bool is_legacy;
|
||||
u16 sw_tx_cons;
|
||||
u16 sw_tx_prod;
|
||||
u16 num_tx_buffers; /* Slowpath only */
|
||||
|
||||
u16 num_tx_buffers;
|
||||
u64 xmit_pkts;
|
||||
u64 stopped_cnt;
|
||||
u64 xmit_pkts;
|
||||
u64 stopped_cnt;
|
||||
|
||||
bool is_legacy;
|
||||
__le16 *hw_cons_ptr;
|
||||
|
||||
/* Needed for the mapping of packets */
|
||||
struct device *dev;
|
||||
|
||||
void __iomem *doorbell_addr;
|
||||
union db_prod tx_db;
|
||||
int index; /* Slowpath only */
|
||||
#define QEDE_TXQ_XDP_TO_IDX(edev, txq) ((txq)->index - \
|
||||
QEDE_MAX_TSS_CNT(edev))
|
||||
#define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev))
|
||||
|
||||
/* Regular Tx requires skb + metadata for release purpose,
|
||||
* while XDP requires only the pages themselves.
|
||||
*/
|
||||
union {
|
||||
struct sw_tx_bd *skbs;
|
||||
struct page **pages;
|
||||
} sw_tx_ring;
|
||||
|
||||
struct qed_chain tx_pbl;
|
||||
|
||||
/* Slowpath; Should be kept in end [unless missing padding] */
|
||||
void *handle;
|
||||
};
|
||||
|
||||
#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \
|
||||
|
@ -303,13 +344,16 @@ struct qede_fastpath {
|
|||
struct qede_dev *edev;
|
||||
#define QEDE_FASTPATH_TX BIT(0)
|
||||
#define QEDE_FASTPATH_RX BIT(1)
|
||||
#define QEDE_FASTPATH_XDP BIT(2)
|
||||
#define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX)
|
||||
u8 type;
|
||||
u8 id;
|
||||
u8 xdp_xmit;
|
||||
struct napi_struct napi;
|
||||
struct qed_sb_info *sb_info;
|
||||
struct qede_rx_queue *rxq;
|
||||
struct qede_tx_queue *txqs;
|
||||
struct qede_tx_queue *txq;
|
||||
struct qede_tx_queue *xdp_tx;
|
||||
|
||||
#define VEC_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
|
||||
char name[VEC_NAME_SIZE];
|
||||
|
@ -332,8 +376,13 @@ struct qede_fastpath {
|
|||
#define QEDE_SP_VXLAN_PORT_CONFIG 2
|
||||
#define QEDE_SP_GENEVE_PORT_CONFIG 3
|
||||
|
||||
union qede_reload_args {
|
||||
u16 mtu;
|
||||
struct qede_reload_args {
|
||||
void (*func)(struct qede_dev *edev, struct qede_reload_args *args);
|
||||
union {
|
||||
netdev_features_t features;
|
||||
struct bpf_prog *new_prog;
|
||||
u16 mtu;
|
||||
} u;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DCB
|
||||
|
@ -342,15 +391,14 @@ void qede_set_dcbnl_ops(struct net_device *ndev);
|
|||
void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level);
|
||||
void qede_set_ethtool_ops(struct net_device *netdev);
|
||||
void qede_reload(struct qede_dev *edev,
|
||||
void (*func)(struct qede_dev *edev,
|
||||
union qede_reload_args *args),
|
||||
union qede_reload_args *args);
|
||||
struct qede_reload_args *args, bool is_locked);
|
||||
int qede_change_mtu(struct net_device *dev, int new_mtu);
|
||||
void qede_fill_by_demand_stats(struct qede_dev *edev);
|
||||
void __qede_lock(struct qede_dev *edev);
|
||||
void __qede_unlock(struct qede_dev *edev);
|
||||
bool qede_has_rx_work(struct qede_rx_queue *rxq);
|
||||
int qede_txq_has_work(struct qede_tx_queue *txq);
|
||||
void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
|
||||
u8 count);
|
||||
void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count);
|
||||
void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
|
||||
|
||||
#define RX_RING_SIZE_POW 13
|
||||
|
|
|
@ -16,13 +16,6 @@
|
|||
#include <linux/capability.h>
|
||||
#include "qede.h"
|
||||
|
||||
#define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name))
|
||||
#define QEDE_STAT_STRING(stat_name) (#stat_name)
|
||||
#define _QEDE_STAT(stat_name, pf_only) \
|
||||
{QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only}
|
||||
#define QEDE_PF_STAT(stat_name) _QEDE_STAT(stat_name, true)
|
||||
#define QEDE_STAT(stat_name) _QEDE_STAT(stat_name, false)
|
||||
|
||||
#define QEDE_RQSTAT_OFFSET(stat_name) \
|
||||
(offsetof(struct qede_rx_queue, stat_name))
|
||||
#define QEDE_RQSTAT_STRING(stat_name) (#stat_name)
|
||||
|
@ -39,12 +32,10 @@ static const struct {
|
|||
QEDE_RQSTAT(rx_hw_errors),
|
||||
QEDE_RQSTAT(rx_alloc_errors),
|
||||
QEDE_RQSTAT(rx_ip_frags),
|
||||
QEDE_RQSTAT(xdp_no_pass),
|
||||
};
|
||||
|
||||
#define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr)
|
||||
#define QEDE_RQSTATS_DATA(dev, sindex, rqindex) \
|
||||
(*((u64 *)(((char *)(dev->fp_array[(rqindex)].rxq)) +\
|
||||
qede_rqstats_arr[(sindex)].offset)))
|
||||
#define QEDE_TQSTAT_OFFSET(stat_name) \
|
||||
(offsetof(struct qede_tx_queue, stat_name))
|
||||
#define QEDE_TQSTAT_STRING(stat_name) (#stat_name)
|
||||
|
@ -59,10 +50,12 @@ static const struct {
|
|||
QEDE_TQSTAT(stopped_cnt),
|
||||
};
|
||||
|
||||
#define QEDE_TQSTATS_DATA(dev, sindex, tssid, tcid) \
|
||||
(*((u64 *)(((void *)(&dev->fp_array[tssid].txqs[tcid])) +\
|
||||
qede_tqstats_arr[(sindex)].offset)))
|
||||
|
||||
#define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name))
|
||||
#define QEDE_STAT_STRING(stat_name) (#stat_name)
|
||||
#define _QEDE_STAT(stat_name, pf_only) \
|
||||
{QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only}
|
||||
#define QEDE_PF_STAT(stat_name) _QEDE_STAT(stat_name, true)
|
||||
#define QEDE_STAT(stat_name) _QEDE_STAT(stat_name, false)
|
||||
static const struct {
|
||||
u64 offset;
|
||||
char string[ETH_GSTRING_LEN];
|
||||
|
@ -136,10 +129,6 @@ static const struct {
|
|||
QEDE_STAT(coalesced_bytes),
|
||||
};
|
||||
|
||||
#define QEDE_STATS_DATA(dev, index) \
|
||||
(*((u64 *)(((char *)(dev)) + offsetof(struct qede_dev, stats) \
|
||||
+ qede_stats_arr[(index)].offset)))
|
||||
|
||||
#define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr)
|
||||
|
||||
enum {
|
||||
|
@ -170,39 +159,60 @@ static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = {
|
|||
"Nvram (online)\t\t",
|
||||
};
|
||||
|
||||
static void qede_get_strings_stats_txq(struct qede_dev *edev,
|
||||
struct qede_tx_queue *txq, u8 **buf)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < QEDE_NUM_TQSTATS; i++) {
|
||||
if (txq->is_xdp)
|
||||
sprintf(*buf, "%d [XDP]: %s",
|
||||
QEDE_TXQ_XDP_TO_IDX(edev, txq),
|
||||
qede_tqstats_arr[i].string);
|
||||
else
|
||||
sprintf(*buf, "%d: %s", txq->index,
|
||||
qede_tqstats_arr[i].string);
|
||||
*buf += ETH_GSTRING_LEN;
|
||||
}
|
||||
}
|
||||
|
||||
static void qede_get_strings_stats_rxq(struct qede_dev *edev,
|
||||
struct qede_rx_queue *rxq, u8 **buf)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < QEDE_NUM_RQSTATS; i++) {
|
||||
sprintf(*buf, "%d: %s", rxq->rxq_id,
|
||||
qede_rqstats_arr[i].string);
|
||||
*buf += ETH_GSTRING_LEN;
|
||||
}
|
||||
}
|
||||
|
||||
static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
|
||||
{
|
||||
int i, j, k;
|
||||
struct qede_fastpath *fp;
|
||||
int i;
|
||||
|
||||
for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) {
|
||||
int tc;
|
||||
/* Account for queue statistics */
|
||||
for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
|
||||
fp = &edev->fp_array[i];
|
||||
|
||||
if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
|
||||
for (j = 0; j < QEDE_NUM_RQSTATS; j++)
|
||||
sprintf(buf + (k + j) * ETH_GSTRING_LEN,
|
||||
"%d: %s", i,
|
||||
qede_rqstats_arr[j].string);
|
||||
k += QEDE_NUM_RQSTATS;
|
||||
}
|
||||
if (fp->type & QEDE_FASTPATH_RX)
|
||||
qede_get_strings_stats_rxq(edev, fp->rxq, &buf);
|
||||
|
||||
if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
|
||||
for (tc = 0; tc < edev->num_tc; tc++) {
|
||||
for (j = 0; j < QEDE_NUM_TQSTATS; j++)
|
||||
sprintf(buf + (k + j) *
|
||||
ETH_GSTRING_LEN,
|
||||
"%d.%d: %s", i, tc,
|
||||
qede_tqstats_arr[j].string);
|
||||
k += QEDE_NUM_TQSTATS;
|
||||
}
|
||||
}
|
||||
if (fp->type & QEDE_FASTPATH_XDP)
|
||||
qede_get_strings_stats_txq(edev, fp->xdp_tx, &buf);
|
||||
|
||||
if (fp->type & QEDE_FASTPATH_TX)
|
||||
qede_get_strings_stats_txq(edev, fp->txq, &buf);
|
||||
}
|
||||
|
||||
for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) {
|
||||
/* Account for non-queue statistics */
|
||||
for (i = 0; i < QEDE_NUM_STATS; i++) {
|
||||
if (IS_VF(edev) && qede_stats_arr[i].pf_only)
|
||||
continue;
|
||||
strcpy(buf + (k + j) * ETH_GSTRING_LEN,
|
||||
qede_stats_arr[i].string);
|
||||
j++;
|
||||
strcpy(buf, qede_stats_arr[i].string);
|
||||
buf += ETH_GSTRING_LEN;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -228,42 +238,61 @@ static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
|
|||
}
|
||||
}
|
||||
|
||||
static void qede_get_ethtool_stats_txq(struct qede_tx_queue *txq, u64 **buf)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < QEDE_NUM_TQSTATS; i++) {
|
||||
**buf = *((u64 *)(((void *)txq) + qede_tqstats_arr[i].offset));
|
||||
(*buf)++;
|
||||
}
|
||||
}
|
||||
|
||||
static void qede_get_ethtool_stats_rxq(struct qede_rx_queue *rxq, u64 **buf)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < QEDE_NUM_RQSTATS; i++) {
|
||||
**buf = *((u64 *)(((void *)rxq) + qede_rqstats_arr[i].offset));
|
||||
(*buf)++;
|
||||
}
|
||||
}
|
||||
|
||||
static void qede_get_ethtool_stats(struct net_device *dev,
|
||||
struct ethtool_stats *stats, u64 *buf)
|
||||
{
|
||||
struct qede_dev *edev = netdev_priv(dev);
|
||||
int sidx, cnt = 0;
|
||||
int qid;
|
||||
struct qede_fastpath *fp;
|
||||
int i;
|
||||
|
||||
qede_fill_by_demand_stats(edev);
|
||||
|
||||
mutex_lock(&edev->qede_lock);
|
||||
/* Need to protect the access to the fastpath array */
|
||||
__qede_lock(edev);
|
||||
|
||||
for (qid = 0; qid < QEDE_QUEUE_CNT(edev); qid++) {
|
||||
int tc;
|
||||
for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
|
||||
fp = &edev->fp_array[i];
|
||||
|
||||
if (edev->fp_array[qid].type & QEDE_FASTPATH_RX) {
|
||||
for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++)
|
||||
buf[cnt++] = QEDE_RQSTATS_DATA(edev, sidx, qid);
|
||||
}
|
||||
if (fp->type & QEDE_FASTPATH_RX)
|
||||
qede_get_ethtool_stats_rxq(fp->rxq, &buf);
|
||||
|
||||
if (edev->fp_array[qid].type & QEDE_FASTPATH_TX) {
|
||||
for (tc = 0; tc < edev->num_tc; tc++) {
|
||||
for (sidx = 0; sidx < QEDE_NUM_TQSTATS; sidx++)
|
||||
buf[cnt++] = QEDE_TQSTATS_DATA(edev,
|
||||
sidx,
|
||||
qid, tc);
|
||||
}
|
||||
}
|
||||
if (fp->type & QEDE_FASTPATH_XDP)
|
||||
qede_get_ethtool_stats_txq(fp->xdp_tx, &buf);
|
||||
|
||||
if (fp->type & QEDE_FASTPATH_TX)
|
||||
qede_get_ethtool_stats_txq(fp->txq, &buf);
|
||||
}
|
||||
|
||||
for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) {
|
||||
if (IS_VF(edev) && qede_stats_arr[sidx].pf_only)
|
||||
for (i = 0; i < QEDE_NUM_STATS; i++) {
|
||||
if (IS_VF(edev) && qede_stats_arr[i].pf_only)
|
||||
continue;
|
||||
buf[cnt++] = QEDE_STATS_DATA(edev, sidx);
|
||||
*buf = *((u64 *)(((void *)&edev->stats) +
|
||||
qede_stats_arr[i].offset));
|
||||
|
||||
buf++;
|
||||
}
|
||||
|
||||
mutex_unlock(&edev->qede_lock);
|
||||
__qede_unlock(edev);
|
||||
}
|
||||
|
||||
static int qede_get_sset_count(struct net_device *dev, int stringset)
|
||||
|
@ -280,8 +309,18 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
|
|||
if (qede_stats_arr[i].pf_only)
|
||||
num_stats--;
|
||||
}
|
||||
return num_stats + QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS +
|
||||
QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS * edev->num_tc;
|
||||
|
||||
/* Account for the Regular Tx statistics */
|
||||
num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS;
|
||||
|
||||
/* Account for the Regular Rx statistics */
|
||||
num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS;
|
||||
|
||||
/* Account for XDP statistics [if needed] */
|
||||
if (edev->xdp_prog)
|
||||
num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_TQSTATS;
|
||||
return num_stats;
|
||||
|
||||
case ETH_SS_PRIV_FLAGS:
|
||||
return QEDE_PRI_FLAG_LEN;
|
||||
case ETH_SS_TEST:
|
||||
|
@ -352,6 +391,8 @@ static int qede_get_link_ksettings(struct net_device *dev,
|
|||
struct qede_dev *edev = netdev_priv(dev);
|
||||
struct qed_link_output current_link;
|
||||
|
||||
__qede_lock(edev);
|
||||
|
||||
memset(¤t_link, 0, sizeof(current_link));
|
||||
edev->ops->common->get_link(edev->cdev, ¤t_link);
|
||||
|
||||
|
@ -371,6 +412,9 @@ static int qede_get_link_ksettings(struct net_device *dev,
|
|||
base->speed = SPEED_UNKNOWN;
|
||||
base->duplex = DUPLEX_UNKNOWN;
|
||||
}
|
||||
|
||||
__qede_unlock(edev);
|
||||
|
||||
base->port = current_link.port;
|
||||
base->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE :
|
||||
AUTONEG_DISABLE;
|
||||
|
@ -679,8 +723,7 @@ static int qede_set_ringparam(struct net_device *dev,
|
|||
edev->q_num_rx_buffers = ering->rx_pending;
|
||||
edev->q_num_tx_buffers = ering->tx_pending;
|
||||
|
||||
if (netif_running(edev->ndev))
|
||||
qede_reload(edev, NULL, NULL);
|
||||
qede_reload(edev, NULL, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -765,29 +808,27 @@ static int qede_get_regs_len(struct net_device *ndev)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void qede_update_mtu(struct qede_dev *edev, union qede_reload_args *args)
|
||||
static void qede_update_mtu(struct qede_dev *edev,
|
||||
struct qede_reload_args *args)
|
||||
{
|
||||
edev->ndev->mtu = args->mtu;
|
||||
edev->ndev->mtu = args->u.mtu;
|
||||
}
|
||||
|
||||
/* Netdevice NDOs */
|
||||
int qede_change_mtu(struct net_device *ndev, int new_mtu)
|
||||
{
|
||||
struct qede_dev *edev = netdev_priv(ndev);
|
||||
union qede_reload_args args;
|
||||
struct qede_reload_args args;
|
||||
|
||||
DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
|
||||
"Configuring MTU size of %d\n", new_mtu);
|
||||
|
||||
/* Set the mtu field and re-start the interface if needed*/
|
||||
args.mtu = new_mtu;
|
||||
/* Set the mtu field and re-start the interface if needed */
|
||||
args.u.mtu = new_mtu;
|
||||
args.func = &qede_update_mtu;
|
||||
qede_reload(edev, &args, false);
|
||||
|
||||
if (netif_running(edev->ndev))
|
||||
qede_reload(edev, &qede_update_mtu, &args);
|
||||
|
||||
qede_update_mtu(edev, &args);
|
||||
|
||||
edev->ops->common->update_mtu(edev->cdev, args.mtu);
|
||||
edev->ops->common->update_mtu(edev->cdev, new_mtu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -871,8 +912,7 @@ static int qede_set_channels(struct net_device *dev,
|
|||
sizeof(edev->rss_params.rss_ind_table));
|
||||
}
|
||||
|
||||
if (netif_running(dev))
|
||||
qede_reload(edev, NULL, NULL);
|
||||
qede_reload(edev, NULL, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1178,7 +1218,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
|
|||
|
||||
for_each_queue(i) {
|
||||
if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
|
||||
txq = edev->fp_array[i].txqs;
|
||||
txq = edev->fp_array[i].txq;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1190,7 +1230,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
|
|||
|
||||
/* Fill the entry in the SW ring and the BDs in the FW ring */
|
||||
idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
|
||||
txq->sw_tx_ring[idx].skb = skb;
|
||||
txq->sw_tx_ring.skbs[idx].skb = skb;
|
||||
first_bd = qed_chain_produce(&txq->tx_pbl);
|
||||
memset(first_bd, 0, sizeof(*first_bd));
|
||||
val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
|
||||
|
@ -1244,7 +1284,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
|
|||
dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
|
||||
BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
|
||||
txq->sw_tx_cons++;
|
||||
txq->sw_tx_ring[idx].skb = NULL;
|
||||
txq->sw_tx_ring.skbs[idx].skb = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1312,13 +1352,13 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
|
|||
break;
|
||||
}
|
||||
|
||||
qede_recycle_rx_bd_ring(rxq, edev, 1);
|
||||
qede_recycle_rx_bd_ring(rxq, 1);
|
||||
qed_chain_recycle_consumed(&rxq->rx_comp_ring);
|
||||
break;
|
||||
}
|
||||
|
||||
DP_INFO(edev, "Not the transmitted packet\n");
|
||||
qede_recycle_rx_bd_ring(rxq, edev, 1);
|
||||
qede_recycle_rx_bd_ring(rxq, 1);
|
||||
qed_chain_recycle_consumed(&rxq->rx_comp_ring);
|
||||
}
|
||||
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -56,23 +56,6 @@ struct qed_chain_pbl_u32 {
|
|||
u32 cons_page_idx;
|
||||
};
|
||||
|
||||
struct qed_chain_pbl {
|
||||
/* Base address of a pre-allocated buffer for pbl */
|
||||
dma_addr_t p_phys_table;
|
||||
void *p_virt_table;
|
||||
|
||||
/* Table for keeping the virtual addresses of the chain pages,
|
||||
* respectively to the physical addresses in the pbl table.
|
||||
*/
|
||||
void **pp_virt_addr_tbl;
|
||||
|
||||
/* Index to current used page by producer/consumer */
|
||||
union {
|
||||
struct qed_chain_pbl_u16 pbl16;
|
||||
struct qed_chain_pbl_u32 pbl32;
|
||||
} u;
|
||||
};
|
||||
|
||||
struct qed_chain_u16 {
|
||||
/* Cyclic index of next element to produce/consme */
|
||||
u16 prod_idx;
|
||||
|
@ -86,46 +69,78 @@ struct qed_chain_u32 {
|
|||
};
|
||||
|
||||
struct qed_chain {
|
||||
void *p_virt_addr;
|
||||
dma_addr_t p_phys_addr;
|
||||
void *p_prod_elem;
|
||||
void *p_cons_elem;
|
||||
/* fastpath portion of the chain - required for commands such
|
||||
* as produce / consume.
|
||||
*/
|
||||
/* Point to next element to produce/consume */
|
||||
void *p_prod_elem;
|
||||
void *p_cons_elem;
|
||||
|
||||
enum qed_chain_mode mode;
|
||||
enum qed_chain_use_mode intended_use; /* used to produce/consume */
|
||||
enum qed_chain_cnt_type cnt_type;
|
||||
/* Fastpath portions of the PBL [if exists] */
|
||||
struct {
|
||||
/* Table for keeping the virtual addresses of the chain pages,
|
||||
* respectively to the physical addresses in the pbl table.
|
||||
*/
|
||||
void **pp_virt_addr_tbl;
|
||||
|
||||
union {
|
||||
struct qed_chain_pbl_u16 u16;
|
||||
struct qed_chain_pbl_u32 u32;
|
||||
} c;
|
||||
} pbl;
|
||||
|
||||
union {
|
||||
struct qed_chain_u16 chain16;
|
||||
struct qed_chain_u32 chain32;
|
||||
} u;
|
||||
|
||||
/* Capacity counts only usable elements */
|
||||
u32 capacity;
|
||||
u32 page_cnt;
|
||||
|
||||
/* Number of elements - capacity is for usable elements only,
|
||||
* while size will contain total number of elements [for entire chain].
|
||||
*/
|
||||
u32 capacity;
|
||||
u32 size;
|
||||
enum qed_chain_mode mode;
|
||||
|
||||
/* Elements information for fast calculations */
|
||||
u16 elem_per_page;
|
||||
u16 elem_per_page_mask;
|
||||
u16 elem_unusable;
|
||||
u16 usable_per_page;
|
||||
u16 elem_size;
|
||||
u16 next_page_mask;
|
||||
struct qed_chain_pbl pbl;
|
||||
u16 elem_per_page;
|
||||
u16 elem_per_page_mask;
|
||||
u16 elem_size;
|
||||
u16 next_page_mask;
|
||||
u16 usable_per_page;
|
||||
u8 elem_unusable;
|
||||
|
||||
u8 cnt_type;
|
||||
|
||||
/* Slowpath of the chain - required for initialization and destruction,
|
||||
* but isn't involved in regular functionality.
|
||||
*/
|
||||
|
||||
/* Base address of a pre-allocated buffer for pbl */
|
||||
struct {
|
||||
dma_addr_t p_phys_table;
|
||||
void *p_virt_table;
|
||||
} pbl_sp;
|
||||
|
||||
/* Address of first page of the chain - the address is required
|
||||
* for fastpath operation [consume/produce] but only for the the SINGLE
|
||||
* flavour which isn't considered fastpath [== SPQ].
|
||||
*/
|
||||
void *p_virt_addr;
|
||||
dma_addr_t p_phys_addr;
|
||||
|
||||
/* Total number of elements [for entire chain] */
|
||||
u32 size;
|
||||
|
||||
u8 intended_use;
|
||||
};
|
||||
|
||||
#define QED_CHAIN_PBL_ENTRY_SIZE (8)
|
||||
#define QED_CHAIN_PAGE_SIZE (0x1000)
|
||||
#define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size))
|
||||
|
||||
#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
|
||||
((mode == QED_CHAIN_MODE_NEXT_PTR) ? \
|
||||
(1 + ((sizeof(struct qed_chain_next) - 1) / \
|
||||
(elem_size))) : 0)
|
||||
#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
|
||||
(((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \
|
||||
(u8)(1 + ((sizeof(struct qed_chain_next) - 1) / \
|
||||
(elem_size))) : 0)
|
||||
|
||||
#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
|
||||
((u32)(ELEMS_PER_PAGE(elem_size) - \
|
||||
|
@ -186,7 +201,7 @@ static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain)
|
|||
return p_chain->usable_per_page;
|
||||
}
|
||||
|
||||
static inline u16 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
|
||||
static inline u8 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
|
||||
{
|
||||
return p_chain->elem_unusable;
|
||||
}
|
||||
|
@ -198,7 +213,7 @@ static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain)
|
|||
|
||||
static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
|
||||
{
|
||||
return p_chain->pbl.p_phys_table;
|
||||
return p_chain->pbl_sp.p_phys_table;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -214,10 +229,10 @@ static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
|
|||
static inline void
|
||||
qed_chain_advance_page(struct qed_chain *p_chain,
|
||||
void **p_next_elem, void *idx_to_inc, void *page_to_inc)
|
||||
|
||||
{
|
||||
struct qed_chain_next *p_next = NULL;
|
||||
u32 page_index = 0;
|
||||
|
||||
switch (p_chain->mode) {
|
||||
case QED_CHAIN_MODE_NEXT_PTR:
|
||||
p_next = *p_next_elem;
|
||||
|
@ -305,7 +320,7 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain)
|
|||
if ((p_chain->u.chain16.prod_idx &
|
||||
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
|
||||
p_prod_idx = &p_chain->u.chain16.prod_idx;
|
||||
p_prod_page_idx = &p_chain->pbl.u.pbl16.prod_page_idx;
|
||||
p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx;
|
||||
qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
|
||||
p_prod_idx, p_prod_page_idx);
|
||||
}
|
||||
|
@ -314,7 +329,7 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain)
|
|||
if ((p_chain->u.chain32.prod_idx &
|
||||
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
|
||||
p_prod_idx = &p_chain->u.chain32.prod_idx;
|
||||
p_prod_page_idx = &p_chain->pbl.u.pbl32.prod_page_idx;
|
||||
p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx;
|
||||
qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
|
||||
p_prod_idx, p_prod_page_idx);
|
||||
}
|
||||
|
@ -378,7 +393,7 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain)
|
|||
if ((p_chain->u.chain16.cons_idx &
|
||||
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
|
||||
p_cons_idx = &p_chain->u.chain16.cons_idx;
|
||||
p_cons_page_idx = &p_chain->pbl.u.pbl16.cons_page_idx;
|
||||
p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx;
|
||||
qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
|
||||
p_cons_idx, p_cons_page_idx);
|
||||
}
|
||||
|
@ -387,8 +402,8 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain)
|
|||
if ((p_chain->u.chain32.cons_idx &
|
||||
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
|
||||
p_cons_idx = &p_chain->u.chain32.cons_idx;
|
||||
p_cons_page_idx = &p_chain->pbl.u.pbl32.cons_page_idx;
|
||||
qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
|
||||
p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx;
|
||||
qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
|
||||
p_cons_idx, p_cons_page_idx);
|
||||
}
|
||||
p_chain->u.chain32.cons_idx++;
|
||||
|
@ -429,25 +444,26 @@ static inline void qed_chain_reset(struct qed_chain *p_chain)
|
|||
u32 reset_val = p_chain->page_cnt - 1;
|
||||
|
||||
if (is_chain_u16(p_chain)) {
|
||||
p_chain->pbl.u.pbl16.prod_page_idx = (u16)reset_val;
|
||||
p_chain->pbl.u.pbl16.cons_page_idx = (u16)reset_val;
|
||||
p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val;
|
||||
p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val;
|
||||
} else {
|
||||
p_chain->pbl.u.pbl32.prod_page_idx = reset_val;
|
||||
p_chain->pbl.u.pbl32.cons_page_idx = reset_val;
|
||||
p_chain->pbl.c.u32.prod_page_idx = reset_val;
|
||||
p_chain->pbl.c.u32.cons_page_idx = reset_val;
|
||||
}
|
||||
}
|
||||
|
||||
switch (p_chain->intended_use) {
|
||||
case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
|
||||
case QED_CHAIN_USE_TO_PRODUCE:
|
||||
/* Do nothing */
|
||||
break;
|
||||
|
||||
case QED_CHAIN_USE_TO_CONSUME:
|
||||
/* produce empty elements */
|
||||
for (i = 0; i < p_chain->capacity; i++)
|
||||
qed_chain_recycle_consumed(p_chain);
|
||||
break;
|
||||
|
||||
case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
|
||||
case QED_CHAIN_USE_TO_PRODUCE:
|
||||
default:
|
||||
/* Do nothing */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -473,13 +489,13 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain,
|
|||
p_chain->p_virt_addr = NULL;
|
||||
p_chain->p_phys_addr = 0;
|
||||
p_chain->elem_size = elem_size;
|
||||
p_chain->intended_use = intended_use;
|
||||
p_chain->intended_use = (u8)intended_use;
|
||||
p_chain->mode = mode;
|
||||
p_chain->cnt_type = cnt_type;
|
||||
p_chain->cnt_type = (u8)cnt_type;
|
||||
|
||||
p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
|
||||
p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
|
||||
p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
|
||||
p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
|
||||
p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
|
||||
p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
|
||||
p_chain->next_page_mask = (p_chain->usable_per_page &
|
||||
p_chain->elem_per_page_mask);
|
||||
|
@ -488,8 +504,8 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain,
|
|||
p_chain->capacity = p_chain->usable_per_page * page_cnt;
|
||||
p_chain->size = p_chain->elem_per_page * page_cnt;
|
||||
|
||||
p_chain->pbl.p_phys_table = 0;
|
||||
p_chain->pbl.p_virt_table = NULL;
|
||||
p_chain->pbl_sp.p_phys_table = 0;
|
||||
p_chain->pbl_sp.p_virt_table = NULL;
|
||||
p_chain->pbl.pp_virt_addr_tbl = NULL;
|
||||
}
|
||||
|
||||
|
@ -530,8 +546,8 @@ static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
|
|||
dma_addr_t p_phys_pbl,
|
||||
void **pp_virt_addr_tbl)
|
||||
{
|
||||
p_chain->pbl.p_phys_table = p_phys_pbl;
|
||||
p_chain->pbl.p_virt_table = p_virt_pbl;
|
||||
p_chain->pbl_sp.p_phys_table = p_phys_pbl;
|
||||
p_chain->pbl_sp.p_virt_table = p_virt_pbl;
|
||||
p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
|
||||
}
|
||||
|
||||
|
|
|
@ -15,6 +15,29 @@
|
|||
#include <linux/qed/qed_if.h>
|
||||
#include <linux/qed/qed_iov_if.h>
|
||||
|
||||
struct qed_queue_start_common_params {
|
||||
/* Should always be relative to entity sending this. */
|
||||
u8 vport_id;
|
||||
u16 queue_id;
|
||||
|
||||
/* Relative, but relevant only for PFs */
|
||||
u8 stats_id;
|
||||
|
||||
/* These are always absolute */
|
||||
u16 sb;
|
||||
u8 sb_idx;
|
||||
};
|
||||
|
||||
struct qed_rxq_start_ret_params {
|
||||
void __iomem *p_prod;
|
||||
void *p_handle;
|
||||
};
|
||||
|
||||
struct qed_txq_start_ret_params {
|
||||
void __iomem *p_doorbell;
|
||||
void *p_handle;
|
||||
};
|
||||
|
||||
struct qed_dev_eth_info {
|
||||
struct qed_dev_info common;
|
||||
|
||||
|
@ -56,18 +79,6 @@ struct qed_start_vport_params {
|
|||
bool clear_stats;
|
||||
};
|
||||
|
||||
struct qed_stop_rxq_params {
|
||||
u8 rss_id;
|
||||
u8 rx_queue_id;
|
||||
u8 vport_id;
|
||||
bool eq_completion_only;
|
||||
};
|
||||
|
||||
struct qed_stop_txq_params {
|
||||
u8 rss_id;
|
||||
u8 tx_queue_id;
|
||||
};
|
||||
|
||||
enum qed_filter_rx_mode_type {
|
||||
QED_FILTER_RX_MODE_TYPE_REGULAR,
|
||||
QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC,
|
||||
|
@ -112,15 +123,6 @@ struct qed_filter_params {
|
|||
union qed_filter_type_params filter;
|
||||
};
|
||||
|
||||
struct qed_queue_start_common_params {
|
||||
u8 rss_id;
|
||||
u8 queue_id;
|
||||
u8 vport_id;
|
||||
u16 sb;
|
||||
u16 sb_idx;
|
||||
u16 vf_qid;
|
||||
};
|
||||
|
||||
struct qed_tunn_params {
|
||||
u16 vxlan_port;
|
||||
u8 update_vxlan_port;
|
||||
|
@ -220,24 +222,24 @@ struct qed_eth_ops {
|
|||
struct qed_update_vport_params *params);
|
||||
|
||||
int (*q_rx_start)(struct qed_dev *cdev,
|
||||
u8 rss_num,
|
||||
struct qed_queue_start_common_params *params,
|
||||
u16 bd_max_bytes,
|
||||
dma_addr_t bd_chain_phys_addr,
|
||||
dma_addr_t cqe_pbl_addr,
|
||||
u16 cqe_pbl_size,
|
||||
void __iomem **pp_prod);
|
||||
struct qed_rxq_start_ret_params *ret_params);
|
||||
|
||||
int (*q_rx_stop)(struct qed_dev *cdev,
|
||||
struct qed_stop_rxq_params *params);
|
||||
int (*q_rx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle);
|
||||
|
||||
int (*q_tx_start)(struct qed_dev *cdev,
|
||||
u8 rss_num,
|
||||
struct qed_queue_start_common_params *params,
|
||||
dma_addr_t pbl_addr,
|
||||
u16 pbl_size,
|
||||
void __iomem **pp_doorbell);
|
||||
struct qed_txq_start_ret_params *ret_params);
|
||||
|
||||
int (*q_tx_stop)(struct qed_dev *cdev,
|
||||
struct qed_stop_txq_params *params);
|
||||
int (*q_tx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle);
|
||||
|
||||
int (*filter_config)(struct qed_dev *cdev,
|
||||
struct qed_filter_params *params);
|
||||
|
|
Загрузка…
Ссылка в новой задаче