Merge branch 'qed-bandwidth-config'

Manish Chopra says:

====================
qed: Bandwidth configuration support

This series adds support in driver for min/max bandwidth configuration
for the PF on the link change notification or on explicit request of
bandwidth update from the MFW [management firmware].

The same infrastructure would later be used by user based flows
[for example, rate shaping for the VFs]

Please consider applying this series to "net-next"
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2016-04-26 13:56:27 -04:00
Родитель 101e63b454 a64b02d530
Коммит 713d4ddc99
7 изменённых файлов: 491 добавлений и 62 удалений

Просмотреть файл

@ -32,6 +32,8 @@ extern const struct qed_common_ops qed_common_ops_pass;
#define NAME_SIZE 16 #define NAME_SIZE 16
#define VER_SIZE 16 #define VER_SIZE 16
#define QED_WFQ_UNIT 100
/* cau states */ /* cau states */
enum qed_coalescing_mode { enum qed_coalescing_mode {
QED_COAL_MODE_DISABLE, QED_COAL_MODE_DISABLE,
@ -237,6 +239,12 @@ struct qed_dmae_info {
struct dmae_cmd *p_dmae_cmd; struct dmae_cmd *p_dmae_cmd;
}; };
struct qed_wfq_data {
/* when feature is configured for at least 1 vport */
u32 min_speed;
bool configured;
};
struct qed_qm_info { struct qed_qm_info {
struct init_qm_pq_params *qm_pq_params; struct init_qm_pq_params *qm_pq_params;
struct init_qm_vport_params *qm_vport_params; struct init_qm_vport_params *qm_vport_params;
@ -257,6 +265,7 @@ struct qed_qm_info {
bool vport_wfq_en; bool vport_wfq_en;
u8 pf_wfq; u8 pf_wfq;
u32 pf_rl; u32 pf_rl;
struct qed_wfq_data *wfq_data;
}; };
struct storm_stats { struct storm_stats {
@ -526,6 +535,8 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
#define PURE_LB_TC 8 #define PURE_LB_TC 8
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate);
#define QED_LEADING_HWFN(dev) (&dev->hwfns[0]) #define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
/* Other Linux specific common definitions */ /* Other Linux specific common definitions */

Просмотреть файл

@ -105,6 +105,8 @@ static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
qm_info->qm_vport_params = NULL; qm_info->qm_vport_params = NULL;
kfree(qm_info->qm_port_params); kfree(qm_info->qm_port_params);
qm_info->qm_port_params = NULL; qm_info->qm_port_params = NULL;
kfree(qm_info->wfq_data);
qm_info->wfq_data = NULL;
} }
void qed_resc_free(struct qed_dev *cdev) void qed_resc_free(struct qed_dev *cdev)
@ -175,6 +177,11 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
if (!qm_info->qm_port_params) if (!qm_info->qm_port_params)
goto alloc_err; goto alloc_err;
qm_info->wfq_data = kcalloc(num_vports, sizeof(*qm_info->wfq_data),
GFP_KERNEL);
if (!qm_info->wfq_data)
goto alloc_err;
vport_id = (u8)RESC_START(p_hwfn, QED_VPORT); vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
/* First init per-TC PQs */ /* First init per-TC PQs */
@ -213,18 +220,19 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT); qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
for (i = 0; i < qm_info->num_vports; i++)
qm_info->qm_vport_params[i].vport_wfq = 1;
qm_info->pf_wfq = 0; qm_info->pf_wfq = 0;
qm_info->pf_rl = 0; qm_info->pf_rl = 0;
qm_info->vport_rl_en = 1; qm_info->vport_rl_en = 1;
qm_info->vport_wfq_en = 1;
return 0; return 0;
alloc_err: alloc_err:
DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n"); DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
kfree(qm_info->qm_pq_params); qed_qm_info_free(p_hwfn);
kfree(qm_info->qm_vport_params);
kfree(qm_info->qm_port_params);
return -ENOMEM; return -ENOMEM;
} }
@ -575,7 +583,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min; p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
/* Update rate limit once we'll actually have a link */ /* Update rate limit once we'll actually have a link */
p_hwfn->qm_info.pf_rl = 100; p_hwfn->qm_info.pf_rl = 100000;
} }
qed_cxt_hw_init_pf(p_hwfn); qed_cxt_hw_init_pf(p_hwfn);
@ -1595,3 +1603,312 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
return 0; return 0;
} }
/* Calculate final WFQ values for all vports and configure them.
* After this configuration each vport will have
* approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
*/
static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 min_pf_rate)
{
struct init_qm_vport_params *vport_params;
int i;
vport_params = p_hwfn->qm_info.qm_vport_params;
for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) /
min_pf_rate;
qed_init_vport_wfq(p_hwfn, p_ptt,
vport_params[i].first_tx_pq_id,
vport_params[i].vport_wfq);
}
}
static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn,
u32 min_pf_rate)
{
int i;
for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
}
static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 min_pf_rate)
{
struct init_qm_vport_params *vport_params;
int i;
vport_params = p_hwfn->qm_info.qm_vport_params;
for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
qed_init_wfq_default_param(p_hwfn, min_pf_rate);
qed_init_vport_wfq(p_hwfn, p_ptt,
vport_params[i].first_tx_pq_id,
vport_params[i].vport_wfq);
}
}
/* This function performs several validations for WFQ
* configuration and required min rate for a given vport
* 1. req_rate must be greater than one percent of min_pf_rate.
* 2. req_rate should not cause other vports [not configured for WFQ explicitly]
* rates to get less than one percent of min_pf_rate.
* 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
*/
static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
u16 vport_id, u32 req_rate,
u32 min_pf_rate)
{
u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
int non_requested_count = 0, req_count = 0, i, num_vports;
num_vports = p_hwfn->qm_info.num_vports;
/* Accounting for the vports which are configured for WFQ explicitly */
for (i = 0; i < num_vports; i++) {
u32 tmp_speed;
if ((i != vport_id) &&
p_hwfn->qm_info.wfq_data[i].configured) {
req_count++;
tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
total_req_min_rate += tmp_speed;
}
}
/* Include current vport data as well */
req_count++;
total_req_min_rate += req_rate;
non_requested_count = num_vports - req_count;
if (req_rate < min_pf_rate / QED_WFQ_UNIT) {
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
vport_id, req_rate, min_pf_rate);
return -EINVAL;
}
if (num_vports > QED_WFQ_UNIT) {
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Number of vports is greater than %d\n",
QED_WFQ_UNIT);
return -EINVAL;
}
if (total_req_min_rate > min_pf_rate) {
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
total_req_min_rate, min_pf_rate);
return -EINVAL;
}
total_left_rate = min_pf_rate - total_req_min_rate;
left_rate_per_vp = total_left_rate / non_requested_count;
if (left_rate_per_vp < min_pf_rate / QED_WFQ_UNIT) {
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
left_rate_per_vp, min_pf_rate);
return -EINVAL;
}
p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
p_hwfn->qm_info.wfq_data[vport_id].configured = true;
for (i = 0; i < num_vports; i++) {
if (p_hwfn->qm_info.wfq_data[i].configured)
continue;
p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
}
return 0;
}
static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 min_pf_rate)
{
bool use_wfq = false;
int rc = 0;
u16 i;
/* Validate all pre configured vports for wfq */
for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
u32 rate;
if (!p_hwfn->qm_info.wfq_data[i].configured)
continue;
rate = p_hwfn->qm_info.wfq_data[i].min_speed;
use_wfq = true;
rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
if (rc) {
DP_NOTICE(p_hwfn,
"WFQ validation failed while configuring min rate\n");
break;
}
}
if (!rc && use_wfq)
qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
else
qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
return rc;
}
/* API to configure WFQ from mcp link change */
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
{
int i;
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
__qed_configure_vp_wfq_on_link_change(p_hwfn,
p_hwfn->p_dpc_ptt,
min_pf_rate);
}
}
int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_mcp_link_state *p_link,
u8 max_bw)
{
int rc = 0;
p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
if (!p_link->line_speed && (max_bw != 100))
return rc;
p_link->speed = (p_link->line_speed * max_bw) / 100;
p_hwfn->qm_info.pf_rl = p_link->speed;
/* Since the limiter also affects Tx-switched traffic, we don't want it
* to limit such traffic in case there's no actual limit.
* In that case, set limit to imaginary high boundary.
*/
if (max_bw == 100)
p_hwfn->qm_info.pf_rl = 100000;
rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
p_hwfn->qm_info.pf_rl);
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Configured MAX bandwidth to be %08x Mb/sec\n",
p_link->speed);
return rc;
}
/* Main API to configure PF max bandwidth where bw range is [1 - 100] */
int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw)
{
int i, rc = -EINVAL;
if (max_bw < 1 || max_bw > 100) {
DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n");
return rc;
}
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
struct qed_mcp_link_state *p_link;
struct qed_ptt *p_ptt;
p_link = &p_lead->mcp_info->link_output;
p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt)
return -EBUSY;
rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt,
p_link, max_bw);
qed_ptt_release(p_hwfn, p_ptt);
if (rc)
break;
}
return rc;
}
int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_mcp_link_state *p_link,
u8 min_bw)
{
int rc = 0;
p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
p_hwfn->qm_info.pf_wfq = min_bw;
if (!p_link->line_speed)
return rc;
p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Configured MIN bandwidth to be %d Mb/sec\n",
p_link->min_pf_rate);
return rc;
}
/* Main API to configure PF min bandwidth where bw range is [1-100] */
int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw)
{
int i, rc = -EINVAL;
if (min_bw < 1 || min_bw > 100) {
DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n");
return rc;
}
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
struct qed_mcp_link_state *p_link;
struct qed_ptt *p_ptt;
p_link = &p_lead->mcp_info->link_output;
p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt)
return -EBUSY;
rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt,
p_link, min_bw);
if (rc) {
qed_ptt_release(p_hwfn, p_ptt);
return rc;
}
if (p_link->min_pf_rate) {
u32 min_rate = p_link->min_pf_rate;
rc = __qed_configure_vp_wfq_on_link_change(p_hwfn,
p_ptt,
min_rate);
}
qed_ptt_release(p_hwfn, p_ptt);
}
return rc;
}

Просмотреть файл

@ -3837,7 +3837,7 @@ struct public_drv_mb {
#define DRV_MSG_CODE_SET_LLDP 0x24000000 #define DRV_MSG_CODE_SET_LLDP 0x24000000
#define DRV_MSG_CODE_SET_DCBX 0x25000000 #define DRV_MSG_CODE_SET_DCBX 0x25000000
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_NIG_DRAIN 0x30000000 #define DRV_MSG_CODE_NIG_DRAIN 0x30000000
#define DRV_MSG_CODE_INITIATE_FLR 0x02000000 #define DRV_MSG_CODE_INITIATE_FLR 0x02000000
@ -5116,4 +5116,8 @@ struct hw_set_image {
struct hw_set_info hw_sets[1]; struct hw_set_info hw_sets[1];
}; };
int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u8 pf_id, u16 pf_wfq);
int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq);
#endif #endif

Просмотреть файл

@ -712,6 +712,21 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
return 0; return 0;
} }
int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u8 pf_id, u16 pf_wfq)
{
u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
return -1;
}
qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
return 0;
}
int qed_init_pf_rl(struct qed_hwfn *p_hwfn, int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
u8 pf_id, u8 pf_id,
@ -732,6 +747,31 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
return 0; return 0;
} }
int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 first_tx_pq_id[NUM_OF_TCS],
u16 vport_wfq)
{
u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
u8 tc;
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration");
return -1;
}
for (tc = 0; tc < NUM_OF_TCS; tc++) {
u16 vport_pq_id = first_tx_pq_id[tc];
if (vport_pq_id != QM_INVALID_PQ_ID)
qed_wr(p_hwfn, p_ptt,
QM_REG_WFQVPWEIGHT + vport_pq_id * 4,
inc_val);
}
return 0;
}
int qed_init_vport_rl(struct qed_hwfn *p_hwfn, int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
u8 vport_id, u8 vport_id,

Просмотреть файл

@ -472,6 +472,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
bool b_reset) bool b_reset)
{ {
struct qed_mcp_link_state *p_link; struct qed_mcp_link_state *p_link;
u8 max_bw, min_bw;
u32 status = 0; u32 status = 0;
p_link = &p_hwfn->mcp_info->link_output; p_link = &p_hwfn->mcp_info->link_output;
@ -527,17 +528,20 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
p_link->speed = 0; p_link->speed = 0;
} }
/* Correct speed according to bandwidth allocation */ if (p_link->link_up && p_link->speed)
if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) { p_link->line_speed = p_link->speed;
p_link->speed = p_link->speed * else
p_hwfn->mcp_info->func_info.bandwidth_max / p_link->line_speed = 0;
100;
qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
p_link->speed); min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Configured MAX bandwidth to be %08x Mb/sec\n", /* Max bandwidth configuration */
p_link->speed); __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
}
/* Min bandwidth configuration */
__qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_link->min_pf_rate);
p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED); p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
p_link->an_complete = !!(status & p_link->an_complete = !!(status &
@ -648,6 +652,77 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
return 0; return 0;
} }
static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
struct public_func *p_shmem_info)
{
struct qed_mcp_function_info *p_info;
p_info = &p_hwfn->mcp_info->func_info;
p_info->bandwidth_min = (p_shmem_info->config &
FUNC_MF_CFG_MIN_BW_MASK) >>
FUNC_MF_CFG_MIN_BW_SHIFT;
if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
DP_INFO(p_hwfn,
"bandwidth minimum out of bounds [%02x]. Set to 1\n",
p_info->bandwidth_min);
p_info->bandwidth_min = 1;
}
p_info->bandwidth_max = (p_shmem_info->config &
FUNC_MF_CFG_MAX_BW_MASK) >>
FUNC_MF_CFG_MAX_BW_SHIFT;
if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
DP_INFO(p_hwfn,
"bandwidth maximum out of bounds [%02x]. Set to 100\n",
p_info->bandwidth_max);
p_info->bandwidth_max = 100;
}
}
static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct public_func *p_data,
int pfid)
{
u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
PUBLIC_FUNC);
u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
u32 i, size;
memset(p_data, 0, sizeof(*p_data));
size = min_t(u32, sizeof(*p_data),
QED_SECTION_SIZE(mfw_path_offsize));
for (i = 0; i < size / sizeof(u32); i++)
((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
func_addr + (i << 2));
return size;
}
static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
struct qed_mcp_function_info *p_info;
struct public_func shmem_info;
u32 resp = 0, param = 0;
qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
MCP_PF_ID(p_hwfn));
qed_read_pf_bandwidth(p_hwfn, &shmem_info);
p_info = &p_hwfn->mcp_info->func_info;
qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
/* Acknowledge the MFW */
qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
&param);
}
int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt) struct qed_ptt *p_ptt)
{ {
@ -679,6 +754,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
qed_mcp_handle_transceiver_change(p_hwfn, p_ptt); qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
break; break;
case MFW_DRV_MSG_BW_UPDATE:
qed_mcp_update_bw(p_hwfn, p_ptt);
break;
default: default:
DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i); DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
rc = -EINVAL; rc = -EINVAL;
@ -758,28 +836,6 @@ int qed_mcp_get_media_type(struct qed_dev *cdev,
return 0; return 0;
} }
static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct public_func *p_data,
int pfid)
{
u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
PUBLIC_FUNC);
u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
u32 i, size;
memset(p_data, 0, sizeof(*p_data));
size = min_t(u32, sizeof(*p_data),
QED_SECTION_SIZE(mfw_path_offsize));
for (i = 0; i < size / sizeof(u32); i++)
((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
func_addr + (i << 2));
return size;
}
static int static int
qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn, qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
struct public_func *p_info, struct public_func *p_info,
@ -818,26 +874,7 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
return -EINVAL; return -EINVAL;
} }
qed_read_pf_bandwidth(p_hwfn, &shmem_info);
info->bandwidth_min = (shmem_info.config &
FUNC_MF_CFG_MIN_BW_MASK) >>
FUNC_MF_CFG_MIN_BW_SHIFT;
if (info->bandwidth_min < 1 || info->bandwidth_min > 100) {
DP_INFO(p_hwfn,
"bandwidth minimum out of bounds [%02x]. Set to 1\n",
info->bandwidth_min);
info->bandwidth_min = 1;
}
info->bandwidth_max = (shmem_info.config &
FUNC_MF_CFG_MAX_BW_MASK) >>
FUNC_MF_CFG_MAX_BW_SHIFT;
if (info->bandwidth_max < 1 || info->bandwidth_max > 100) {
DP_INFO(p_hwfn,
"bandwidth maximum out of bounds [%02x]. Set to 100\n",
info->bandwidth_max);
info->bandwidth_max = 100;
}
if (shmem_info.mac_upper || shmem_info.mac_lower) { if (shmem_info.mac_upper || shmem_info.mac_lower) {
info->mac[0] = (u8)(shmem_info.mac_upper >> 8); info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
@ -938,9 +975,10 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
p_drv_version = &union_data.drv_version; p_drv_version = &union_data.drv_version;
p_drv_version->version = p_ver->version; p_drv_version->version = p_ver->version;
for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) { for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) {
val = cpu_to_be32(p_ver->name[i]); val = cpu_to_be32(p_ver->name[i]);
*(u32 *)&p_drv_version->name[i * sizeof(u32)] = val; *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val;
} }
memset(&mb_params, 0, sizeof(mb_params)); memset(&mb_params, 0, sizeof(mb_params));

Просмотреть файл

@ -40,7 +40,15 @@ struct qed_mcp_link_capabilities {
struct qed_mcp_link_state { struct qed_mcp_link_state {
bool link_up; bool link_up;
u32 speed; /* In Mb/s */ u32 min_pf_rate;
/* Actual link speed in Mb/s */
u32 line_speed;
/* PF max speed in Mb/s, deduced from line_speed
* according to PF max bandwidth configuration.
*/
u32 speed;
bool full_duplex; bool full_duplex;
bool an; bool an;
@ -388,5 +396,14 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn,
* @return true iff MFW is running and mcp_info is initialized * @return true iff MFW is running and mcp_info is initialized
*/ */
bool qed_mcp_is_init(struct qed_hwfn *p_hwfn); bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw);
int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw);
int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_mcp_link_state *p_link,
u8 max_bw);
int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_mcp_link_state *p_link,
u8 min_bw);
#endif #endif

Просмотреть файл

@ -458,4 +458,6 @@
#define PBF_REG_NGE_COMP_VER 0xd80524UL #define PBF_REG_NGE_COMP_VER 0xd80524UL
#define PRS_REG_NGE_COMP_VER 0x1f0878UL #define PRS_REG_NGE_COMP_VER 0x1f0878UL
#define QM_REG_WFQPFWEIGHT 0x2f4e80UL
#define QM_REG_WFQVPWEIGHT 0x2fa000UL
#endif #endif