ice: Move common functions out of ice_main.c part 4/7
This patch continues the code move out of ice_main.c The following top level functions (and related dependency functions) were moved to ice_lib.c: ice_vsi_alloc_rings ice_vsi_set_rss_params ice_vsi_set_num_qs ice_get_free_slot ice_vsi_init ice_vsi_clear_rings ice_vsi_alloc_arrays Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
Родитель
5153a18e57
Коммит
28c2a64573
|
@ -225,6 +225,102 @@ static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
|
||||
* @vsi: VSI pointer
|
||||
* @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
|
||||
*
|
||||
* On error: returns error code (negative)
|
||||
* On success: returns 0
|
||||
*/
|
||||
int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors)
|
||||
{
|
||||
struct ice_pf *pf = vsi->back;
|
||||
|
||||
/* allocate memory for both Tx and Rx ring pointers */
|
||||
vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
|
||||
sizeof(struct ice_ring *), GFP_KERNEL);
|
||||
if (!vsi->tx_rings)
|
||||
goto err_txrings;
|
||||
|
||||
vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
|
||||
sizeof(struct ice_ring *), GFP_KERNEL);
|
||||
if (!vsi->rx_rings)
|
||||
goto err_rxrings;
|
||||
|
||||
if (alloc_qvectors) {
|
||||
/* allocate memory for q_vector pointers */
|
||||
vsi->q_vectors = devm_kcalloc(&pf->pdev->dev,
|
||||
vsi->num_q_vectors,
|
||||
sizeof(struct ice_q_vector *),
|
||||
GFP_KERNEL);
|
||||
if (!vsi->q_vectors)
|
||||
goto err_vectors;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_vectors:
|
||||
devm_kfree(&pf->pdev->dev, vsi->rx_rings);
|
||||
err_rxrings:
|
||||
devm_kfree(&pf->pdev->dev, vsi->tx_rings);
|
||||
err_txrings:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI
|
||||
* @vsi: the VSI being configured
|
||||
*
|
||||
* Return 0 on success and a negative value on error
|
||||
*/
|
||||
void ice_vsi_set_num_qs(struct ice_vsi *vsi)
|
||||
{
|
||||
struct ice_pf *pf = vsi->back;
|
||||
|
||||
switch (vsi->type) {
|
||||
case ICE_VSI_PF:
|
||||
vsi->alloc_txq = pf->num_lan_tx;
|
||||
vsi->alloc_rxq = pf->num_lan_rx;
|
||||
vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE);
|
||||
vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
|
||||
break;
|
||||
default:
|
||||
dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
|
||||
vsi->type);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_get_free_slot - get the next non-NULL location index in array
|
||||
* @array: array to search
|
||||
* @size: size of the array
|
||||
* @curr: last known occupied index to be used as a search hint
|
||||
*
|
||||
* void * is being used to keep the functionality generic. This lets us use this
|
||||
* function on any array of pointers.
|
||||
*/
|
||||
int ice_get_free_slot(void *array, int size, int curr)
|
||||
{
|
||||
int **tmp_array = (int **)array;
|
||||
int next;
|
||||
|
||||
if (curr < (size - 1) && !tmp_array[curr + 1]) {
|
||||
next = curr + 1;
|
||||
} else {
|
||||
int i = 0;
|
||||
|
||||
while ((i < size) && (tmp_array[i]))
|
||||
i++;
|
||||
if (i == size)
|
||||
next = ICE_NO_VSI;
|
||||
else
|
||||
next = i;
|
||||
}
|
||||
return next;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_delete - delete a VSI from the switch
|
||||
* @vsi: pointer to VSI being removed
|
||||
|
@ -286,6 +382,324 @@ void ice_vsi_put_qs(struct ice_vsi *vsi)
|
|||
mutex_unlock(&pf->avail_q_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
|
||||
* @vsi: the VSI being configured
|
||||
*/
|
||||
void ice_vsi_set_rss_params(struct ice_vsi *vsi)
|
||||
{
|
||||
struct ice_hw_common_caps *cap;
|
||||
struct ice_pf *pf = vsi->back;
|
||||
|
||||
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
|
||||
vsi->rss_size = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
cap = &pf->hw.func_caps.common_cap;
|
||||
switch (vsi->type) {
|
||||
case ICE_VSI_PF:
|
||||
/* PF VSI will inherit RSS instance of PF */
|
||||
vsi->rss_table_size = cap->rss_table_size;
|
||||
vsi->rss_size = min_t(int, num_online_cpus(),
|
||||
BIT(cap->rss_table_entry_width));
|
||||
vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
|
||||
break;
|
||||
default:
|
||||
dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n",
|
||||
vsi->type);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
|
||||
* @ctxt: the VSI context being set
|
||||
*
|
||||
* This initializes a default VSI context for all sections except the Queues.
|
||||
*/
|
||||
static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
|
||||
{
|
||||
u32 table = 0;
|
||||
|
||||
memset(&ctxt->info, 0, sizeof(ctxt->info));
|
||||
/* VSI's should be allocated from shared pool */
|
||||
ctxt->alloc_from_pool = true;
|
||||
/* Src pruning enabled by default */
|
||||
ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
|
||||
/* Traffic from VSI can be sent to LAN */
|
||||
ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
|
||||
/* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
|
||||
* behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
|
||||
* packets untagged/tagged.
|
||||
*/
|
||||
ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
|
||||
ICE_AQ_VSI_VLAN_MODE_M) >>
|
||||
ICE_AQ_VSI_VLAN_MODE_S);
|
||||
/* Have 1:1 UP mapping for both ingress/egress tables */
|
||||
table |= ICE_UP_TABLE_TRANSLATE(0, 0);
|
||||
table |= ICE_UP_TABLE_TRANSLATE(1, 1);
|
||||
table |= ICE_UP_TABLE_TRANSLATE(2, 2);
|
||||
table |= ICE_UP_TABLE_TRANSLATE(3, 3);
|
||||
table |= ICE_UP_TABLE_TRANSLATE(4, 4);
|
||||
table |= ICE_UP_TABLE_TRANSLATE(5, 5);
|
||||
table |= ICE_UP_TABLE_TRANSLATE(6, 6);
|
||||
table |= ICE_UP_TABLE_TRANSLATE(7, 7);
|
||||
ctxt->info.ingress_table = cpu_to_le32(table);
|
||||
ctxt->info.egress_table = cpu_to_le32(table);
|
||||
/* Have 1:1 UP mapping for outer to inner UP table */
|
||||
ctxt->info.outer_up_table = cpu_to_le32(table);
|
||||
/* No Outer tag support outer_tag_flags remains to zero */
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_setup_q_map - Setup a VSI queue map
|
||||
* @vsi: the VSI being configured
|
||||
* @ctxt: VSI context structure
|
||||
*/
|
||||
static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
|
||||
{
|
||||
u16 offset = 0, qmap = 0, numq_tc;
|
||||
u16 pow = 0, max_rss = 0, qcount;
|
||||
u16 qcount_tx = vsi->alloc_txq;
|
||||
u16 qcount_rx = vsi->alloc_rxq;
|
||||
bool ena_tc0 = false;
|
||||
int i;
|
||||
|
||||
/* at least TC0 should be enabled by default */
|
||||
if (vsi->tc_cfg.numtc) {
|
||||
if (!(vsi->tc_cfg.ena_tc & BIT(0)))
|
||||
ena_tc0 = true;
|
||||
} else {
|
||||
ena_tc0 = true;
|
||||
}
|
||||
|
||||
if (ena_tc0) {
|
||||
vsi->tc_cfg.numtc++;
|
||||
vsi->tc_cfg.ena_tc |= 1;
|
||||
}
|
||||
|
||||
numq_tc = qcount_rx / vsi->tc_cfg.numtc;
|
||||
|
||||
/* TC mapping is a function of the number of Rx queues assigned to the
|
||||
* VSI for each traffic class and the offset of these queues.
|
||||
* The first 10 bits are for queue offset for TC0, next 4 bits for no:of
|
||||
* queues allocated to TC0. No:of queues is a power-of-2.
|
||||
*
|
||||
* If TC is not enabled, the queue offset is set to 0, and allocate one
|
||||
* queue, this way, traffic for the given TC will be sent to the default
|
||||
* queue.
|
||||
*
|
||||
* Setup number and offset of Rx queues for all TCs for the VSI
|
||||
*/
|
||||
|
||||
/* qcount will change if RSS is enabled */
|
||||
if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
|
||||
if (vsi->type == ICE_VSI_PF)
|
||||
max_rss = ICE_MAX_LG_RSS_QS;
|
||||
else
|
||||
max_rss = ICE_MAX_SMALL_RSS_QS;
|
||||
|
||||
qcount = min_t(int, numq_tc, max_rss);
|
||||
qcount = min_t(int, qcount, vsi->rss_size);
|
||||
} else {
|
||||
qcount = numq_tc;
|
||||
}
|
||||
|
||||
/* find the (rounded up) power-of-2 of qcount */
|
||||
pow = order_base_2(qcount);
|
||||
|
||||
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
|
||||
if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
|
||||
/* TC is not enabled */
|
||||
vsi->tc_cfg.tc_info[i].qoffset = 0;
|
||||
vsi->tc_cfg.tc_info[i].qcount = 1;
|
||||
ctxt->info.tc_mapping[i] = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* TC is enabled */
|
||||
vsi->tc_cfg.tc_info[i].qoffset = offset;
|
||||
vsi->tc_cfg.tc_info[i].qcount = qcount;
|
||||
|
||||
qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
|
||||
ICE_AQ_VSI_TC_Q_OFFSET_M) |
|
||||
((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
|
||||
ICE_AQ_VSI_TC_Q_NUM_M);
|
||||
offset += qcount;
|
||||
ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
|
||||
}
|
||||
|
||||
vsi->num_txq = qcount_tx;
|
||||
vsi->num_rxq = offset;
|
||||
|
||||
/* Rx queue mapping */
|
||||
ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
|
||||
/* q_mapping buffer holds the info for the first queue allocated for
|
||||
* this VSI in the PF space and also the number of queues associated
|
||||
* with this VSI.
|
||||
*/
|
||||
ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
|
||||
ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
|
||||
* @ctxt: the VSI context being set
|
||||
* @vsi: the VSI being configured
|
||||
*/
|
||||
static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
|
||||
{
|
||||
u8 lut_type, hash_type;
|
||||
|
||||
switch (vsi->type) {
|
||||
case ICE_VSI_PF:
|
||||
/* PF VSI will inherit RSS instance of PF */
|
||||
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
|
||||
hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
|
||||
break;
|
||||
default:
|
||||
dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
|
||||
vsi->type);
|
||||
return;
|
||||
}
|
||||
|
||||
ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
|
||||
ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
|
||||
((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
|
||||
ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_init - Create and initialize a VSI
|
||||
* @vsi: the VSI being configured
|
||||
*
|
||||
* This initializes a VSI context depending on the VSI type to be added and
|
||||
* passes it down to the add_vsi aq command to create a new VSI.
|
||||
*/
|
||||
int ice_vsi_init(struct ice_vsi *vsi)
|
||||
{
|
||||
struct ice_vsi_ctx ctxt = { 0 };
|
||||
struct ice_pf *pf = vsi->back;
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
int ret = 0;
|
||||
|
||||
switch (vsi->type) {
|
||||
case ICE_VSI_PF:
|
||||
ctxt.flags = ICE_AQ_VSI_TYPE_PF;
|
||||
break;
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ice_set_dflt_vsi_ctx(&ctxt);
|
||||
/* if the switch is in VEB mode, allow VSI loopback */
|
||||
if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
|
||||
ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
|
||||
|
||||
/* Set LUT type and HASH type if RSS is enabled */
|
||||
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
|
||||
ice_set_rss_vsi_ctx(&ctxt, vsi);
|
||||
|
||||
ctxt.info.sw_id = vsi->port_info->sw_id;
|
||||
ice_vsi_setup_q_map(vsi, &ctxt);
|
||||
|
||||
ret = ice_add_vsi(hw, vsi->idx, &ctxt, NULL);
|
||||
if (ret) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"Add VSI failed, err %d\n", ret);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* keep context for update VSI operations */
|
||||
vsi->info = ctxt.info;
|
||||
|
||||
/* record VSI number returned */
|
||||
vsi->vsi_num = ctxt.vsi_num;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
|
||||
* @vsi: the VSI having rings deallocated
|
||||
*/
|
||||
void ice_vsi_clear_rings(struct ice_vsi *vsi)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (vsi->tx_rings) {
|
||||
for (i = 0; i < vsi->alloc_txq; i++) {
|
||||
if (vsi->tx_rings[i]) {
|
||||
kfree_rcu(vsi->tx_rings[i], rcu);
|
||||
vsi->tx_rings[i] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (vsi->rx_rings) {
|
||||
for (i = 0; i < vsi->alloc_rxq; i++) {
|
||||
if (vsi->rx_rings[i]) {
|
||||
kfree_rcu(vsi->rx_rings[i], rcu);
|
||||
vsi->rx_rings[i] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
|
||||
* @vsi: VSI which is having rings allocated
|
||||
*/
|
||||
int ice_vsi_alloc_rings(struct ice_vsi *vsi)
|
||||
{
|
||||
struct ice_pf *pf = vsi->back;
|
||||
int i;
|
||||
|
||||
/* Allocate tx_rings */
|
||||
for (i = 0; i < vsi->alloc_txq; i++) {
|
||||
struct ice_ring *ring;
|
||||
|
||||
/* allocate with kzalloc(), free with kfree_rcu() */
|
||||
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
||||
|
||||
if (!ring)
|
||||
goto err_out;
|
||||
|
||||
ring->q_index = i;
|
||||
ring->reg_idx = vsi->txq_map[i];
|
||||
ring->ring_active = false;
|
||||
ring->vsi = vsi;
|
||||
ring->dev = &pf->pdev->dev;
|
||||
ring->count = vsi->num_desc;
|
||||
vsi->tx_rings[i] = ring;
|
||||
}
|
||||
|
||||
/* Allocate rx_rings */
|
||||
for (i = 0; i < vsi->alloc_rxq; i++) {
|
||||
struct ice_ring *ring;
|
||||
|
||||
/* allocate with kzalloc(), free with kfree_rcu() */
|
||||
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
||||
if (!ring)
|
||||
goto err_out;
|
||||
|
||||
ring->q_index = i;
|
||||
ring->reg_idx = vsi->rxq_map[i];
|
||||
ring->ring_active = false;
|
||||
ring->vsi = vsi;
|
||||
ring->netdev = vsi->netdev;
|
||||
ring->dev = &pf->pdev->dev;
|
||||
ring->count = vsi->num_desc;
|
||||
vsi->rx_rings[i] = ring;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
ice_vsi_clear_rings(vsi);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_add_mac_to_list - Add a mac address filter entry to the list
|
||||
* @vsi: the VSI to be forwarded to
|
||||
|
|
|
@ -6,6 +6,20 @@
|
|||
|
||||
#include "ice.h"
|
||||
|
||||
int ice_vsi_alloc_rings(struct ice_vsi *vsi);
|
||||
|
||||
void ice_vsi_set_rss_params(struct ice_vsi *vsi);
|
||||
|
||||
void ice_vsi_set_num_qs(struct ice_vsi *vsi);
|
||||
|
||||
int ice_get_free_slot(void *array, int size, int curr);
|
||||
|
||||
int ice_vsi_init(struct ice_vsi *vsi);
|
||||
|
||||
void ice_vsi_clear_rings(struct ice_vsi *vsi);
|
||||
|
||||
int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors);
|
||||
|
||||
int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
|
||||
const u8 *macaddr);
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@ static const struct net_device_ops ice_netdev_ops;
|
|||
static void ice_pf_dis_all_vsi(struct ice_pf *pf);
|
||||
static void ice_rebuild(struct ice_pf *pf);
|
||||
static int ice_vsi_release(struct ice_vsi *vsi);
|
||||
|
||||
static void ice_vsi_release_all(struct ice_pf *pf);
|
||||
static void ice_update_vsi_stats(struct ice_vsi *vsi);
|
||||
static void ice_update_pf_stats(struct ice_pf *pf);
|
||||
|
@ -112,35 +113,6 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_get_free_slot - get the next non-NULL location index in array
|
||||
* @array: array to search
|
||||
* @size: size of the array
|
||||
* @curr: last known occupied index to be used as a search hint
|
||||
*
|
||||
* void * is being used to keep the functionality generic. This lets us use this
|
||||
* function on any array of pointers.
|
||||
*/
|
||||
static int ice_get_free_slot(void *array, int size, int curr)
|
||||
{
|
||||
int **tmp_array = (int **)array;
|
||||
int next;
|
||||
|
||||
if (curr < (size - 1) && !tmp_array[curr + 1]) {
|
||||
next = curr + 1;
|
||||
} else {
|
||||
int i = 0;
|
||||
|
||||
while ((i < size) && (tmp_array[i]))
|
||||
i++;
|
||||
if (i == size)
|
||||
next = ICE_NO_VSI;
|
||||
else
|
||||
next = i;
|
||||
}
|
||||
return next;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_add_mac_to_sync_list - creates list of mac addresses to be synced
|
||||
* @netdev: the net device on which the sync is happening
|
||||
|
@ -1210,327 +1182,6 @@ free_q_irqs:
|
|||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
|
||||
* @vsi: the VSI being configured
|
||||
*/
|
||||
static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
|
||||
{
|
||||
struct ice_hw_common_caps *cap;
|
||||
struct ice_pf *pf = vsi->back;
|
||||
|
||||
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
|
||||
vsi->rss_size = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
cap = &pf->hw.func_caps.common_cap;
|
||||
switch (vsi->type) {
|
||||
case ICE_VSI_PF:
|
||||
/* PF VSI will inherit RSS instance of PF */
|
||||
vsi->rss_table_size = cap->rss_table_size;
|
||||
vsi->rss_size = min_t(int, num_online_cpus(),
|
||||
BIT(cap->rss_table_entry_width));
|
||||
vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
|
||||
break;
|
||||
default:
|
||||
dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_setup_q_map - Setup a VSI queue map
|
||||
* @vsi: the VSI being configured
|
||||
* @ctxt: VSI context structure
|
||||
*/
|
||||
static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
|
||||
{
|
||||
u16 offset = 0, qmap = 0, numq_tc;
|
||||
u16 pow = 0, max_rss = 0, qcount;
|
||||
u16 qcount_tx = vsi->alloc_txq;
|
||||
u16 qcount_rx = vsi->alloc_rxq;
|
||||
bool ena_tc0 = false;
|
||||
int i;
|
||||
|
||||
/* at least TC0 should be enabled by default */
|
||||
if (vsi->tc_cfg.numtc) {
|
||||
if (!(vsi->tc_cfg.ena_tc & BIT(0)))
|
||||
ena_tc0 = true;
|
||||
} else {
|
||||
ena_tc0 = true;
|
||||
}
|
||||
|
||||
if (ena_tc0) {
|
||||
vsi->tc_cfg.numtc++;
|
||||
vsi->tc_cfg.ena_tc |= 1;
|
||||
}
|
||||
|
||||
numq_tc = qcount_rx / vsi->tc_cfg.numtc;
|
||||
|
||||
/* TC mapping is a function of the number of Rx queues assigned to the
|
||||
* VSI for each traffic class and the offset of these queues.
|
||||
* The first 10 bits are for queue offset for TC0, next 4 bits for no:of
|
||||
* queues allocated to TC0. No:of queues is a power-of-2.
|
||||
*
|
||||
* If TC is not enabled, the queue offset is set to 0, and allocate one
|
||||
* queue, this way, traffic for the given TC will be sent to the default
|
||||
* queue.
|
||||
*
|
||||
* Setup number and offset of Rx queues for all TCs for the VSI
|
||||
*/
|
||||
|
||||
/* qcount will change if RSS is enabled */
|
||||
if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
|
||||
if (vsi->type == ICE_VSI_PF)
|
||||
max_rss = ICE_MAX_LG_RSS_QS;
|
||||
else
|
||||
max_rss = ICE_MAX_SMALL_RSS_QS;
|
||||
|
||||
qcount = min_t(int, numq_tc, max_rss);
|
||||
qcount = min_t(int, qcount, vsi->rss_size);
|
||||
} else {
|
||||
qcount = numq_tc;
|
||||
}
|
||||
|
||||
/* find the (rounded up) power-of-2 of qcount */
|
||||
pow = order_base_2(qcount);
|
||||
|
||||
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
|
||||
if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
|
||||
/* TC is not enabled */
|
||||
vsi->tc_cfg.tc_info[i].qoffset = 0;
|
||||
vsi->tc_cfg.tc_info[i].qcount = 1;
|
||||
ctxt->info.tc_mapping[i] = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* TC is enabled */
|
||||
vsi->tc_cfg.tc_info[i].qoffset = offset;
|
||||
vsi->tc_cfg.tc_info[i].qcount = qcount;
|
||||
|
||||
qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
|
||||
ICE_AQ_VSI_TC_Q_OFFSET_M) |
|
||||
((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
|
||||
ICE_AQ_VSI_TC_Q_NUM_M);
|
||||
offset += qcount;
|
||||
ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
|
||||
}
|
||||
|
||||
vsi->num_txq = qcount_tx;
|
||||
vsi->num_rxq = offset;
|
||||
|
||||
/* Rx queue mapping */
|
||||
ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
|
||||
/* q_mapping buffer holds the info for the first queue allocated for
|
||||
* this VSI in the PF space and also the number of queues associated
|
||||
* with this VSI.
|
||||
*/
|
||||
ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
|
||||
ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
|
||||
* @ctxt: the VSI context being set
|
||||
*
|
||||
* This initializes a default VSI context for all sections except the Queues.
|
||||
*/
|
||||
static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
|
||||
{
|
||||
u32 table = 0;
|
||||
|
||||
memset(&ctxt->info, 0, sizeof(ctxt->info));
|
||||
/* VSI's should be allocated from shared pool */
|
||||
ctxt->alloc_from_pool = true;
|
||||
/* Src pruning enabled by default */
|
||||
ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
|
||||
/* Traffic from VSI can be sent to LAN */
|
||||
ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
|
||||
|
||||
/* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
|
||||
* behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
|
||||
* packets untagged/tagged.
|
||||
*/
|
||||
ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
|
||||
ICE_AQ_VSI_VLAN_MODE_M) >>
|
||||
ICE_AQ_VSI_VLAN_MODE_S);
|
||||
|
||||
/* Have 1:1 UP mapping for both ingress/egress tables */
|
||||
table |= ICE_UP_TABLE_TRANSLATE(0, 0);
|
||||
table |= ICE_UP_TABLE_TRANSLATE(1, 1);
|
||||
table |= ICE_UP_TABLE_TRANSLATE(2, 2);
|
||||
table |= ICE_UP_TABLE_TRANSLATE(3, 3);
|
||||
table |= ICE_UP_TABLE_TRANSLATE(4, 4);
|
||||
table |= ICE_UP_TABLE_TRANSLATE(5, 5);
|
||||
table |= ICE_UP_TABLE_TRANSLATE(6, 6);
|
||||
table |= ICE_UP_TABLE_TRANSLATE(7, 7);
|
||||
ctxt->info.ingress_table = cpu_to_le32(table);
|
||||
ctxt->info.egress_table = cpu_to_le32(table);
|
||||
/* Have 1:1 UP mapping for outer to inner UP table */
|
||||
ctxt->info.outer_up_table = cpu_to_le32(table);
|
||||
/* No Outer tag support outer_tag_flags remains to zero */
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
|
||||
* @ctxt: the VSI context being set
|
||||
* @vsi: the VSI being configured
|
||||
*/
|
||||
static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
|
||||
{
|
||||
u8 lut_type, hash_type;
|
||||
|
||||
switch (vsi->type) {
|
||||
case ICE_VSI_PF:
|
||||
/* PF VSI will inherit RSS instance of PF */
|
||||
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
|
||||
hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
|
||||
break;
|
||||
default:
|
||||
dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
|
||||
vsi->type);
|
||||
return;
|
||||
}
|
||||
|
||||
ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
|
||||
ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
|
||||
((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
|
||||
ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_init - Create and initialize a VSI
|
||||
* @vsi: the VSI being configured
|
||||
*
|
||||
* This initializes a VSI context depending on the VSI type to be added and
|
||||
* passes it down to the add_vsi aq command to create a new VSI.
|
||||
*/
|
||||
static int ice_vsi_init(struct ice_vsi *vsi)
|
||||
{
|
||||
struct ice_vsi_ctx ctxt = { 0 };
|
||||
struct ice_pf *pf = vsi->back;
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
int ret = 0;
|
||||
|
||||
switch (vsi->type) {
|
||||
case ICE_VSI_PF:
|
||||
ctxt.flags = ICE_AQ_VSI_TYPE_PF;
|
||||
break;
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ice_set_dflt_vsi_ctx(&ctxt);
|
||||
/* if the switch is in VEB mode, allow VSI loopback */
|
||||
if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
|
||||
ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
|
||||
|
||||
/* Set LUT type and HASH type if RSS is enabled */
|
||||
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
|
||||
ice_set_rss_vsi_ctx(&ctxt, vsi);
|
||||
|
||||
ctxt.info.sw_id = vsi->port_info->sw_id;
|
||||
ice_vsi_setup_q_map(vsi, &ctxt);
|
||||
|
||||
ret = ice_add_vsi(hw, vsi->idx, &ctxt, NULL);
|
||||
if (ret) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"Add VSI failed, err %d\n", ret);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* keep context for update VSI operations */
|
||||
vsi->info = ctxt.info;
|
||||
|
||||
/* record VSI number returned */
|
||||
vsi->vsi_num = ctxt.vsi_num;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
|
||||
* @vsi: the VSI having rings deallocated
|
||||
*/
|
||||
static void ice_vsi_clear_rings(struct ice_vsi *vsi)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (vsi->tx_rings) {
|
||||
for (i = 0; i < vsi->alloc_txq; i++) {
|
||||
if (vsi->tx_rings[i]) {
|
||||
kfree_rcu(vsi->tx_rings[i], rcu);
|
||||
vsi->tx_rings[i] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (vsi->rx_rings) {
|
||||
for (i = 0; i < vsi->alloc_rxq; i++) {
|
||||
if (vsi->rx_rings[i]) {
|
||||
kfree_rcu(vsi->rx_rings[i], rcu);
|
||||
vsi->rx_rings[i] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
|
||||
* @vsi: VSI which is having rings allocated
|
||||
*/
|
||||
static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
|
||||
{
|
||||
struct ice_pf *pf = vsi->back;
|
||||
int i;
|
||||
|
||||
/* Allocate tx_rings */
|
||||
for (i = 0; i < vsi->alloc_txq; i++) {
|
||||
struct ice_ring *ring;
|
||||
|
||||
/* allocate with kzalloc(), free with kfree_rcu() */
|
||||
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
||||
|
||||
if (!ring)
|
||||
goto err_out;
|
||||
|
||||
ring->q_index = i;
|
||||
ring->reg_idx = vsi->txq_map[i];
|
||||
ring->ring_active = false;
|
||||
ring->vsi = vsi;
|
||||
ring->netdev = vsi->netdev;
|
||||
ring->dev = &pf->pdev->dev;
|
||||
ring->count = vsi->num_desc;
|
||||
|
||||
vsi->tx_rings[i] = ring;
|
||||
}
|
||||
|
||||
/* Allocate rx_rings */
|
||||
for (i = 0; i < vsi->alloc_rxq; i++) {
|
||||
struct ice_ring *ring;
|
||||
|
||||
/* allocate with kzalloc(), free with kfree_rcu() */
|
||||
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
||||
if (!ring)
|
||||
goto err_out;
|
||||
|
||||
ring->q_index = i;
|
||||
ring->reg_idx = vsi->rxq_map[i];
|
||||
ring->ring_active = false;
|
||||
ring->vsi = vsi;
|
||||
ring->netdev = vsi->netdev;
|
||||
ring->dev = &pf->pdev->dev;
|
||||
ring->count = vsi->num_desc;
|
||||
vsi->rx_rings[i] = ring;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
ice_vsi_clear_rings(vsi);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_ena_misc_vector - enable the non-queue interrupts
|
||||
* @pf: board private structure
|
||||
|
@ -1716,73 +1367,6 @@ static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI
|
||||
* @vsi: the VSI being configured
|
||||
*
|
||||
* Return 0 on success and a negative value on error
|
||||
*/
|
||||
static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
|
||||
{
|
||||
struct ice_pf *pf = vsi->back;
|
||||
|
||||
switch (vsi->type) {
|
||||
case ICE_VSI_PF:
|
||||
vsi->alloc_txq = pf->num_lan_tx;
|
||||
vsi->alloc_rxq = pf->num_lan_rx;
|
||||
vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE);
|
||||
vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
|
||||
break;
|
||||
default:
|
||||
dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
|
||||
vsi->type);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
|
||||
* @vsi: VSI pointer
|
||||
* @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
|
||||
*
|
||||
* On error: returns error code (negative)
|
||||
* On success: returns 0
|
||||
*/
|
||||
static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors)
|
||||
{
|
||||
struct ice_pf *pf = vsi->back;
|
||||
|
||||
/* allocate memory for both Tx and Rx ring pointers */
|
||||
vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
|
||||
sizeof(struct ice_ring *), GFP_KERNEL);
|
||||
if (!vsi->tx_rings)
|
||||
goto err_txrings;
|
||||
|
||||
vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
|
||||
sizeof(struct ice_ring *), GFP_KERNEL);
|
||||
if (!vsi->rx_rings)
|
||||
goto err_rxrings;
|
||||
|
||||
if (alloc_qvectors) {
|
||||
/* allocate memory for q_vector pointers */
|
||||
vsi->q_vectors = devm_kcalloc(&pf->pdev->dev,
|
||||
vsi->num_q_vectors,
|
||||
sizeof(struct ice_q_vector *),
|
||||
GFP_KERNEL);
|
||||
if (!vsi->q_vectors)
|
||||
goto err_vectors;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_vectors:
|
||||
devm_kfree(&pf->pdev->dev, vsi->rx_rings);
|
||||
err_rxrings:
|
||||
devm_kfree(&pf->pdev->dev, vsi->tx_rings);
|
||||
err_txrings:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_vsi_alloc - Allocates the next available struct vsi in the PF
|
||||
* @pf: board private structure
|
||||
|
|
Загрузка…
Ссылка в новой задаче