net/mlx5e: Support RX multi-packet WQE (Striding RQ)
Introduce the feature of multi-packet WQE (RX Work Queue Element) referred to as (MPWQE or Striding RQ), in which WQEs are larger and serve multiple packets each. Every WQE consists of many strides of the same size, every received packet is aligned to a beginning of a stride and is written to consecutive strides within a WQE. In the regular approach, each regular WQE is big enough to be capable of serving one received packet of any size up to MTU or 64K in case of device LRO is enabled, making it very wasteful when dealing with small packets or device LRO is enabled. For its flexibility, MPWQE allows a better memory utilization (implying improvements in CPU utilization and packet rate) as packets consume strides according to their size, preserving the rest of the WQE to be available for other packets. MPWQE default configuration: Num of WQEs = 16 Strides Per WQE = 2048 Stride Size = 64 byte The default WQEs memory footprint went from 1024*mtu (~1.5MB) to 16 * 2048 * 64 = 2MB per ring. However, HW LRO can now be supported at no additional cost in memory footprint, and hence we turn it on by default and get an even better performance. Performance tested on ConnectX4-Lx 50G. To isolate the feature under test, the numbers below were measured with HW LRO turned off. We verified that the performance just improves when LRO is turned back on. * Netperf single TCP stream: - BW raised by 10-15% for representative packet sizes: default, 64B, 1024B, 1478B, 65536B. * Netperf multi TCP stream: - No degradation, line rate reached. * Pktgen: packet rate raised by 2-10% for traffic of different message sizes: 64B, 128B, 256B, 1024B, and 1500B. * Pktgen: packet loss in bursts of small messages (64byte), single stream: - | num packets | packets loss before | packets loss after | 2K | ~ 1K | 0 | 8K | ~ 6K | 0 | 16K | ~13K | 0 | 32K | ~28K | 0 | 64K | ~57K | ~24K As expected as the driver can receive as many small packets (<=64B) as the number of total strides in the ring (default = 2048 * 16) vs. 1024 (default ring size regardless of packets size) before this feature. Signed-off-by: Tariq Toukan <tariqt@mellanox.com> Signed-off-by: Achiad Shochat <achiad@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Родитель
2f48af128d
Коммит
461017cb00
|
@ -57,12 +57,30 @@
|
|||
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
|
||||
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
|
||||
|
||||
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x1
|
||||
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x4
|
||||
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6
|
||||
|
||||
#define MLX5_MPWRQ_LOG_NUM_STRIDES 11 /* >= 9, HW restriction */
|
||||
#define MLX5_MPWRQ_LOG_STRIDE_SIZE 6 /* >= 6, HW restriction */
|
||||
#define MLX5_MPWRQ_NUM_STRIDES BIT(MLX5_MPWRQ_LOG_NUM_STRIDES)
|
||||
#define MLX5_MPWRQ_STRIDE_SIZE BIT(MLX5_MPWRQ_LOG_STRIDE_SIZE)
|
||||
#define MLX5_MPWRQ_LOG_WQE_SZ (MLX5_MPWRQ_LOG_NUM_STRIDES +\
|
||||
MLX5_MPWRQ_LOG_STRIDE_SIZE)
|
||||
#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
|
||||
MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
|
||||
#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
|
||||
#define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \
|
||||
MLX5_MPWRQ_WQE_PAGE_ORDER)
|
||||
#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
|
||||
|
||||
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
|
||||
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
|
||||
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
|
||||
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
|
||||
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
|
||||
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
|
||||
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2
|
||||
|
||||
#define MLX5E_LOG_INDIR_RQT_SIZE 0x7
|
||||
#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
|
||||
|
@ -74,6 +92,38 @@
|
|||
#define MLX5E_NUM_MAIN_GROUPS 9
|
||||
#define MLX5E_NET_IP_ALIGN 2
|
||||
|
||||
static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
|
||||
{
|
||||
switch (wq_type) {
|
||||
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
|
||||
return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW,
|
||||
wq_size / 2);
|
||||
default:
|
||||
return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES,
|
||||
wq_size / 2);
|
||||
}
|
||||
}
|
||||
|
||||
static inline int mlx5_min_log_rq_size(int wq_type)
|
||||
{
|
||||
switch (wq_type) {
|
||||
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
|
||||
return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
|
||||
default:
|
||||
return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int mlx5_max_log_rq_size(int wq_type)
|
||||
{
|
||||
switch (wq_type) {
|
||||
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
|
||||
return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW;
|
||||
default:
|
||||
return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
struct mlx5e_tx_wqe {
|
||||
struct mlx5_wqe_ctrl_seg ctrl;
|
||||
struct mlx5_wqe_eth_seg eth;
|
||||
|
@ -128,6 +178,7 @@ static const char vport_strings[][ETH_GSTRING_LEN] = {
|
|||
"tx_queue_wake",
|
||||
"tx_queue_dropped",
|
||||
"rx_wqe_err",
|
||||
"rx_mpwqe_filler",
|
||||
};
|
||||
|
||||
struct mlx5e_vport_stats {
|
||||
|
@ -169,8 +220,9 @@ struct mlx5e_vport_stats {
|
|||
u64 tx_queue_wake;
|
||||
u64 tx_queue_dropped;
|
||||
u64 rx_wqe_err;
|
||||
u64 rx_mpwqe_filler;
|
||||
|
||||
#define NUM_VPORT_COUNTERS 35
|
||||
#define NUM_VPORT_COUNTERS 36
|
||||
};
|
||||
|
||||
static const char pport_strings[][ETH_GSTRING_LEN] = {
|
||||
|
@ -263,7 +315,8 @@ static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
|
|||
"csum_sw",
|
||||
"lro_packets",
|
||||
"lro_bytes",
|
||||
"wqe_err"
|
||||
"wqe_err",
|
||||
"mpwqe_filler",
|
||||
};
|
||||
|
||||
struct mlx5e_rq_stats {
|
||||
|
@ -274,7 +327,8 @@ struct mlx5e_rq_stats {
|
|||
u64 lro_packets;
|
||||
u64 lro_bytes;
|
||||
u64 wqe_err;
|
||||
#define NUM_RQ_STATS 7
|
||||
u64 mpwqe_filler;
|
||||
#define NUM_RQ_STATS 8
|
||||
};
|
||||
|
||||
static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
|
||||
|
@ -318,6 +372,7 @@ struct mlx5e_stats {
|
|||
|
||||
struct mlx5e_params {
|
||||
u8 log_sq_size;
|
||||
u8 rq_wq_type;
|
||||
u8 log_rq_size;
|
||||
u16 num_channels;
|
||||
u8 num_tc;
|
||||
|
@ -374,11 +429,23 @@ typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq,
|
|||
typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,
|
||||
u16 ix);
|
||||
|
||||
struct mlx5e_dma_info {
|
||||
struct page *page;
|
||||
dma_addr_t addr;
|
||||
};
|
||||
|
||||
struct mlx5e_mpw_info {
|
||||
struct mlx5e_dma_info dma_info;
|
||||
u16 consumed_strides;
|
||||
u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
|
||||
};
|
||||
|
||||
struct mlx5e_rq {
|
||||
/* data path */
|
||||
struct mlx5_wq_ll wq;
|
||||
u32 wqe_sz;
|
||||
struct sk_buff **skb;
|
||||
struct mlx5e_mpw_info *wqe_info;
|
||||
|
||||
struct device *pdev;
|
||||
struct net_device *netdev;
|
||||
|
@ -393,6 +460,7 @@ struct mlx5e_rq {
|
|||
|
||||
/* control */
|
||||
struct mlx5_wq_ctrl wq_ctrl;
|
||||
u8 wq_type;
|
||||
u32 rqn;
|
||||
struct mlx5e_channel *channel;
|
||||
struct mlx5e_priv *priv;
|
||||
|
@ -649,9 +717,12 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
|
|||
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
|
||||
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
|
||||
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
|
||||
|
||||
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
|
||||
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
|
||||
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
|
||||
int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
|
||||
int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
|
||||
struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
|
||||
|
||||
void mlx5e_update_stats(struct mlx5e_priv *priv);
|
||||
|
|
|
@ -273,8 +273,9 @@ static void mlx5e_get_ringparam(struct net_device *dev,
|
|||
struct ethtool_ringparam *param)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
int rq_wq_type = priv->params.rq_wq_type;
|
||||
|
||||
param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
|
||||
param->rx_max_pending = 1 << mlx5_max_log_rq_size(rq_wq_type);
|
||||
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
|
||||
param->rx_pending = 1 << priv->params.log_rq_size;
|
||||
param->tx_pending = 1 << priv->params.log_sq_size;
|
||||
|
@ -285,6 +286,7 @@ static int mlx5e_set_ringparam(struct net_device *dev,
|
|||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
bool was_opened;
|
||||
int rq_wq_type = priv->params.rq_wq_type;
|
||||
u16 min_rx_wqes;
|
||||
u8 log_rq_size;
|
||||
u8 log_sq_size;
|
||||
|
@ -300,16 +302,16 @@ static int mlx5e_set_ringparam(struct net_device *dev,
|
|||
__func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
|
||||
if (param->rx_pending < (1 << mlx5_min_log_rq_size(rq_wq_type))) {
|
||||
netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n",
|
||||
__func__, param->rx_pending,
|
||||
1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
|
||||
1 << mlx5_min_log_rq_size(rq_wq_type));
|
||||
return -EINVAL;
|
||||
}
|
||||
if (param->rx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE)) {
|
||||
if (param->rx_pending > (1 << mlx5_max_log_rq_size(rq_wq_type))) {
|
||||
netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n",
|
||||
__func__, param->rx_pending,
|
||||
1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE);
|
||||
1 << mlx5_max_log_rq_size(rq_wq_type));
|
||||
return -EINVAL;
|
||||
}
|
||||
if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
|
||||
|
@ -327,8 +329,7 @@ static int mlx5e_set_ringparam(struct net_device *dev,
|
|||
|
||||
log_rq_size = order_base_2(param->rx_pending);
|
||||
log_sq_size = order_base_2(param->tx_pending);
|
||||
min_rx_wqes = min_t(u16, param->rx_pending - 1,
|
||||
MLX5E_PARAMS_DEFAULT_MIN_RX_WQES);
|
||||
min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, param->rx_pending);
|
||||
|
||||
if (log_rq_size == priv->params.log_rq_size &&
|
||||
log_sq_size == priv->params.log_sq_size &&
|
||||
|
|
|
@ -175,6 +175,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
|
|||
s->rx_csum_none = 0;
|
||||
s->rx_csum_sw = 0;
|
||||
s->rx_wqe_err = 0;
|
||||
s->rx_mpwqe_filler = 0;
|
||||
for (i = 0; i < priv->params.num_channels; i++) {
|
||||
rq_stats = &priv->channel[i]->rq.stats;
|
||||
|
||||
|
@ -185,6 +186,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
|
|||
s->rx_csum_none += rq_stats->csum_none;
|
||||
s->rx_csum_sw += rq_stats->csum_sw;
|
||||
s->rx_wqe_err += rq_stats->wqe_err;
|
||||
s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
|
||||
|
||||
for (j = 0; j < priv->params.num_tc; j++) {
|
||||
sq_stats = &priv->channel[i]->sq[j].stats;
|
||||
|
@ -323,6 +325,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
|
|||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
void *rqc = param->rqc;
|
||||
void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
|
||||
u32 byte_count;
|
||||
int wq_sz;
|
||||
int err;
|
||||
int i;
|
||||
|
@ -337,28 +340,47 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
|
|||
rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
|
||||
|
||||
wq_sz = mlx5_wq_ll_get_size(&rq->wq);
|
||||
rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
|
||||
cpu_to_node(c->cpu));
|
||||
if (!rq->skb) {
|
||||
err = -ENOMEM;
|
||||
goto err_rq_wq_destroy;
|
||||
}
|
||||
|
||||
rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz :
|
||||
MLX5E_SW2HW_MTU(priv->netdev->mtu);
|
||||
rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN);
|
||||
switch (priv->params.rq_wq_type) {
|
||||
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
|
||||
rq->wqe_info = kzalloc_node(wq_sz * sizeof(*rq->wqe_info),
|
||||
GFP_KERNEL, cpu_to_node(c->cpu));
|
||||
if (!rq->wqe_info) {
|
||||
err = -ENOMEM;
|
||||
goto err_rq_wq_destroy;
|
||||
}
|
||||
rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
|
||||
rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
|
||||
|
||||
rq->wqe_sz = MLX5_MPWRQ_NUM_STRIDES * MLX5_MPWRQ_STRIDE_SIZE;
|
||||
byte_count = rq->wqe_sz;
|
||||
break;
|
||||
default: /* MLX5_WQ_TYPE_LINKED_LIST */
|
||||
rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
|
||||
cpu_to_node(c->cpu));
|
||||
if (!rq->skb) {
|
||||
err = -ENOMEM;
|
||||
goto err_rq_wq_destroy;
|
||||
}
|
||||
rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
|
||||
rq->alloc_wqe = mlx5e_alloc_rx_wqe;
|
||||
|
||||
rq->wqe_sz = (priv->params.lro_en) ?
|
||||
priv->params.lro_wqe_sz :
|
||||
MLX5E_SW2HW_MTU(priv->netdev->mtu);
|
||||
rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN);
|
||||
byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
|
||||
byte_count |= MLX5_HW_START_PADDING;
|
||||
}
|
||||
|
||||
for (i = 0; i < wq_sz; i++) {
|
||||
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
|
||||
u32 byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
|
||||
|
||||
wqe->data.lkey = c->mkey_be;
|
||||
wqe->data.byte_count =
|
||||
cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
|
||||
wqe->data.byte_count = cpu_to_be32(byte_count);
|
||||
}
|
||||
|
||||
rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
|
||||
rq->alloc_wqe = mlx5e_alloc_rx_wqe;
|
||||
rq->wq_type = priv->params.rq_wq_type;
|
||||
rq->pdev = c->pdev;
|
||||
rq->netdev = c->netdev;
|
||||
rq->tstamp = &priv->tstamp;
|
||||
|
@ -376,7 +398,14 @@ err_rq_wq_destroy:
|
|||
|
||||
static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
|
||||
{
|
||||
kfree(rq->skb);
|
||||
switch (rq->wq_type) {
|
||||
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
|
||||
kfree(rq->wqe_info);
|
||||
break;
|
||||
default: /* MLX5_WQ_TYPE_LINKED_LIST */
|
||||
kfree(rq->skb);
|
||||
}
|
||||
|
||||
mlx5_wq_destroy(&rq->wq_ctrl);
|
||||
}
|
||||
|
||||
|
@ -1065,7 +1094,18 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
|
|||
void *rqc = param->rqc;
|
||||
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
|
||||
|
||||
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
|
||||
switch (priv->params.rq_wq_type) {
|
||||
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
|
||||
MLX5_SET(wq, wq, log_wqe_num_of_strides,
|
||||
MLX5_MPWRQ_LOG_NUM_STRIDES - 9);
|
||||
MLX5_SET(wq, wq, log_wqe_stride_size,
|
||||
MLX5_MPWRQ_LOG_STRIDE_SIZE - 6);
|
||||
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
|
||||
break;
|
||||
default: /* MLX5_WQ_TYPE_LINKED_LIST */
|
||||
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
|
||||
}
|
||||
|
||||
MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
|
||||
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
|
||||
MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
|
||||
|
@ -1111,8 +1151,18 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
|
|||
struct mlx5e_cq_param *param)
|
||||
{
|
||||
void *cqc = param->cqc;
|
||||
u8 log_cq_size;
|
||||
|
||||
MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size);
|
||||
switch (priv->params.rq_wq_type) {
|
||||
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
|
||||
log_cq_size = priv->params.log_rq_size +
|
||||
MLX5_MPWRQ_LOG_NUM_STRIDES;
|
||||
break;
|
||||
default: /* MLX5_WQ_TYPE_LINKED_LIST */
|
||||
log_cq_size = priv->params.log_rq_size;
|
||||
}
|
||||
|
||||
MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
|
||||
|
||||
mlx5e_build_common_cq_param(priv, param);
|
||||
}
|
||||
|
@ -1983,7 +2033,8 @@ static int mlx5e_set_features(struct net_device *netdev,
|
|||
if (changes & NETIF_F_LRO) {
|
||||
bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
|
||||
|
||||
if (was_opened)
|
||||
if (was_opened && (priv->params.rq_wq_type ==
|
||||
MLX5_WQ_TYPE_LINKED_LIST))
|
||||
mlx5e_close_locked(priv->netdev);
|
||||
|
||||
priv->params.lro_en = !!(features & NETIF_F_LRO);
|
||||
|
@ -1992,7 +2043,8 @@ static int mlx5e_set_features(struct net_device *netdev,
|
|||
mlx5_core_warn(priv->mdev, "lro modify failed, %d\n",
|
||||
err);
|
||||
|
||||
if (was_opened)
|
||||
if (was_opened && (priv->params.rq_wq_type ==
|
||||
MLX5_WQ_TYPE_LINKED_LIST))
|
||||
err = mlx5e_open_locked(priv->netdev);
|
||||
}
|
||||
|
||||
|
@ -2327,8 +2379,21 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
|
|||
|
||||
priv->params.log_sq_size =
|
||||
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
|
||||
priv->params.log_rq_size =
|
||||
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
|
||||
priv->params.rq_wq_type = MLX5_CAP_GEN(mdev, striding_rq) ?
|
||||
MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
|
||||
MLX5_WQ_TYPE_LINKED_LIST;
|
||||
|
||||
switch (priv->params.rq_wq_type) {
|
||||
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
|
||||
priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
|
||||
priv->params.lro_en = true;
|
||||
break;
|
||||
default: /* MLX5_WQ_TYPE_LINKED_LIST */
|
||||
priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
|
||||
}
|
||||
|
||||
priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
|
||||
BIT(priv->params.log_rq_size));
|
||||
priv->params.rx_cq_moderation_usec =
|
||||
MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
|
||||
priv->params.rx_cq_moderation_pkts =
|
||||
|
@ -2338,8 +2403,6 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
|
|||
priv->params.tx_cq_moderation_pkts =
|
||||
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
|
||||
priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
|
||||
priv->params.min_rx_wqes =
|
||||
MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
|
||||
priv->params.num_tc = 1;
|
||||
priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
|
||||
|
||||
|
|
|
@ -76,6 +76,41 @@ err_free_skb:
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
|
||||
{
|
||||
struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
|
||||
gfp_t gfp_mask;
|
||||
int i;
|
||||
|
||||
gfp_mask = GFP_ATOMIC | __GFP_COLD | __GFP_MEMALLOC;
|
||||
wi->dma_info.page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
|
||||
MLX5_MPWRQ_WQE_PAGE_ORDER);
|
||||
if (unlikely(!wi->dma_info.page))
|
||||
return -ENOMEM;
|
||||
|
||||
wi->dma_info.addr = dma_map_page(rq->pdev, wi->dma_info.page, 0,
|
||||
rq->wqe_sz, PCI_DMA_FROMDEVICE);
|
||||
if (unlikely(dma_mapping_error(rq->pdev, wi->dma_info.addr))) {
|
||||
put_page(wi->dma_info.page);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* We split the high-order page into order-0 ones and manage their
|
||||
* reference counter to minimize the memory held by small skb fragments
|
||||
*/
|
||||
split_page(wi->dma_info.page, MLX5_MPWRQ_WQE_PAGE_ORDER);
|
||||
for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
|
||||
atomic_add(MLX5_MPWRQ_STRIDES_PER_PAGE,
|
||||
&wi->dma_info.page[i]._count);
|
||||
wi->skbs_frags[i] = 0;
|
||||
}
|
||||
|
||||
wi->consumed_strides = 0;
|
||||
wqe->data.addr = cpu_to_be64(wi->dma_info.addr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
|
||||
{
|
||||
struct mlx5_wq_ll *wq = &rq->wq;
|
||||
|
@ -100,7 +135,8 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
|
|||
return !mlx5_wq_ll_is_full(wq);
|
||||
}
|
||||
|
||||
static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe)
|
||||
static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
|
||||
u32 cqe_bcnt)
|
||||
{
|
||||
struct ethhdr *eth = (struct ethhdr *)(skb->data);
|
||||
struct iphdr *ipv4 = (struct iphdr *)(skb->data + ETH_HLEN);
|
||||
|
@ -111,7 +147,7 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe)
|
|||
int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) ||
|
||||
(CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
|
||||
|
||||
u16 tot_len = be32_to_cpu(cqe->byte_cnt) - ETH_HLEN;
|
||||
u16 tot_len = cqe_bcnt - ETH_HLEN;
|
||||
|
||||
if (eth->h_proto == htons(ETH_P_IP)) {
|
||||
tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
|
||||
|
@ -191,19 +227,17 @@ csum_none:
|
|||
}
|
||||
|
||||
static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
|
||||
u32 cqe_bcnt,
|
||||
struct mlx5e_rq *rq,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct net_device *netdev = rq->netdev;
|
||||
u32 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
|
||||
struct mlx5e_tstamp *tstamp = rq->tstamp;
|
||||
int lro_num_seg;
|
||||
|
||||
skb_put(skb, cqe_bcnt);
|
||||
|
||||
lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
|
||||
if (lro_num_seg > 1) {
|
||||
mlx5e_lro_update_hdr(skb, cqe);
|
||||
mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
|
||||
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
|
||||
rq->stats.lro_packets++;
|
||||
rq->stats.lro_bytes += cqe_bcnt;
|
||||
|
@ -228,12 +262,24 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
|
|||
skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
|
||||
}
|
||||
|
||||
static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
|
||||
struct mlx5_cqe64 *cqe,
|
||||
u32 cqe_bcnt,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
rq->stats.packets++;
|
||||
rq->stats.bytes += cqe_bcnt;
|
||||
mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
|
||||
napi_gro_receive(rq->cq.napi, skb);
|
||||
}
|
||||
|
||||
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
|
||||
{
|
||||
struct mlx5e_rx_wqe *wqe;
|
||||
struct sk_buff *skb;
|
||||
__be16 wqe_counter_be;
|
||||
u16 wqe_counter;
|
||||
u32 cqe_bcnt;
|
||||
|
||||
wqe_counter_be = cqe->wqe_counter;
|
||||
wqe_counter = be16_to_cpu(wqe_counter_be);
|
||||
|
@ -253,16 +299,103 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
|
|||
goto wq_ll_pop;
|
||||
}
|
||||
|
||||
mlx5e_build_rx_skb(cqe, rq, skb);
|
||||
rq->stats.packets++;
|
||||
rq->stats.bytes += be32_to_cpu(cqe->byte_cnt);
|
||||
napi_gro_receive(rq->cq.napi, skb);
|
||||
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
|
||||
skb_put(skb, cqe_bcnt);
|
||||
|
||||
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
|
||||
|
||||
wq_ll_pop:
|
||||
mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
|
||||
&wqe->next.next_wqe_index);
|
||||
}
|
||||
|
||||
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
|
||||
{
|
||||
u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
|
||||
u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
|
||||
u16 wqe_id = be16_to_cpu(cqe->wqe_id);
|
||||
struct mlx5e_mpw_info *wi = &rq->wqe_info[wqe_id];
|
||||
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id);
|
||||
struct sk_buff *skb;
|
||||
u32 consumed_bytes;
|
||||
u32 head_offset;
|
||||
u32 frag_offset;
|
||||
u32 wqe_offset;
|
||||
u32 page_idx;
|
||||
u16 byte_cnt;
|
||||
u16 cqe_bcnt;
|
||||
u16 headlen;
|
||||
int i;
|
||||
|
||||
wi->consumed_strides += cstrides;
|
||||
|
||||
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
|
||||
rq->stats.wqe_err++;
|
||||
goto mpwrq_cqe_out;
|
||||
}
|
||||
|
||||
if (unlikely(mpwrq_is_filler_cqe(cqe))) {
|
||||
rq->stats.mpwqe_filler++;
|
||||
goto mpwrq_cqe_out;
|
||||
}
|
||||
|
||||
skb = netdev_alloc_skb(rq->netdev,
|
||||
ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD,
|
||||
sizeof(long)));
|
||||
if (unlikely(!skb))
|
||||
goto mpwrq_cqe_out;
|
||||
|
||||
prefetch(skb->data);
|
||||
wqe_offset = stride_ix * MLX5_MPWRQ_STRIDE_SIZE;
|
||||
consumed_bytes = cstrides * MLX5_MPWRQ_STRIDE_SIZE;
|
||||
dma_sync_single_for_cpu(rq->pdev, wi->dma_info.addr + wqe_offset,
|
||||
consumed_bytes, DMA_FROM_DEVICE);
|
||||
|
||||
head_offset = wqe_offset & (PAGE_SIZE - 1);
|
||||
page_idx = wqe_offset >> PAGE_SHIFT;
|
||||
cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
|
||||
headlen = min_t(u16, MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, cqe_bcnt);
|
||||
frag_offset = head_offset + headlen;
|
||||
|
||||
byte_cnt = cqe_bcnt - headlen;
|
||||
while (byte_cnt) {
|
||||
u32 pg_consumed_bytes =
|
||||
min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
|
||||
unsigned int truesize =
|
||||
ALIGN(pg_consumed_bytes, MLX5_MPWRQ_STRIDE_SIZE);
|
||||
|
||||
wi->skbs_frags[page_idx]++;
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||
&wi->dma_info.page[page_idx], frag_offset,
|
||||
pg_consumed_bytes, truesize);
|
||||
byte_cnt -= pg_consumed_bytes;
|
||||
frag_offset = 0;
|
||||
page_idx++;
|
||||
}
|
||||
|
||||
skb_copy_to_linear_data(skb,
|
||||
page_address(wi->dma_info.page) + wqe_offset,
|
||||
ALIGN(headlen, sizeof(long)));
|
||||
/* skb linear part was allocated with headlen and aligned to long */
|
||||
skb->tail += headlen;
|
||||
skb->len += headlen;
|
||||
|
||||
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
|
||||
|
||||
mpwrq_cqe_out:
|
||||
if (likely(wi->consumed_strides < MLX5_MPWRQ_NUM_STRIDES))
|
||||
return;
|
||||
|
||||
dma_unmap_page(rq->pdev, wi->dma_info.addr, rq->wqe_sz,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
|
||||
atomic_sub(MLX5_MPWRQ_STRIDES_PER_PAGE - wi->skbs_frags[i],
|
||||
&wi->dma_info.page[i]._count);
|
||||
put_page(&wi->dma_info.page[i]);
|
||||
}
|
||||
mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index);
|
||||
}
|
||||
|
||||
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
|
||||
{
|
||||
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
|
||||
|
|
|
@ -644,7 +644,8 @@ struct mlx5_err_cqe {
|
|||
};
|
||||
|
||||
struct mlx5_cqe64 {
|
||||
u8 rsvd0[4];
|
||||
u8 rsvd0[2];
|
||||
__be16 wqe_id;
|
||||
u8 lro_tcppsh_abort_dupack;
|
||||
u8 lro_min_ttl;
|
||||
__be16 lro_tcp_win;
|
||||
|
@ -696,6 +697,42 @@ static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
|
|||
return (u64)lo | ((u64)hi << 32);
|
||||
}
|
||||
|
||||
struct mpwrq_cqe_bc {
|
||||
__be16 filler_consumed_strides;
|
||||
__be16 byte_cnt;
|
||||
};
|
||||
|
||||
static inline u16 mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 *cqe)
|
||||
{
|
||||
struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
|
||||
|
||||
return be16_to_cpu(bc->byte_cnt);
|
||||
}
|
||||
|
||||
static inline u16 mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc *bc)
|
||||
{
|
||||
return 0x7fff & be16_to_cpu(bc->filler_consumed_strides);
|
||||
}
|
||||
|
||||
static inline u16 mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 *cqe)
|
||||
{
|
||||
struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
|
||||
|
||||
return mpwrq_get_cqe_bc_consumed_strides(bc);
|
||||
}
|
||||
|
||||
static inline bool mpwrq_is_filler_cqe(struct mlx5_cqe64 *cqe)
|
||||
{
|
||||
struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
|
||||
|
||||
return 0x8000 & be16_to_cpu(bc->filler_consumed_strides);
|
||||
}
|
||||
|
||||
static inline u16 mpwrq_get_cqe_stride_index(struct mlx5_cqe64 *cqe)
|
||||
{
|
||||
return be16_to_cpu(cqe->wqe_counter);
|
||||
}
|
||||
|
||||
enum {
|
||||
CQE_L4_HDR_TYPE_NONE = 0x0,
|
||||
CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1,
|
||||
|
|
Загрузка…
Ссылка в новой задаче