Merge branch 'qed-firmware-TLV'

Sudarsana Reddy Kalluru says:

====================
qed*: Add support for management firmware TLV request.

Management firmware (MFW) requires config and state information from
the driver. It queries this via TLV (type-length-value) request wherein
mfw specificies the list of required TLVs. Driver fills the TLV data
and responds back to MFW.
This patch series adds qed/qede/qedf/qedi driver implementation for
supporting the TLV queries from MFW.

Changes from previous versions:
-------------------------------
v2: Split patch (2) into multiple simpler patches.
v2: Update qed_tlv_parsed_buf->p_val datatype to void pointer to avoid
    bunch of unnecessary typecasts.

Please consider applying this series to "net-next".
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-05-22 23:29:55 -04:00
Родитель 9c803cfd5f 269afb3603
Коммит 1fe8c06c4a
16 изменённых файлов: 2502 добавлений и 2 удалений

Просмотреть файл

@ -3,7 +3,7 @@ obj-$(CONFIG_QED) := qed.o
qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \
qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o
qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o qed_mng_tlv.o
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
qed-$(CONFIG_QED_LL2) += qed_ll2.o
qed-$(CONFIG_QED_RDMA) += qed_roce.o qed_rdma.o qed_iwarp.o

Просмотреть файл

@ -92,6 +92,8 @@ struct qed_eth_cb_ops;
struct qed_dev_info;
union qed_mcp_protocol_stats;
enum qed_mcp_protocol_type;
enum qed_mfw_tlv_type;
union qed_mfw_tlv_data;
/* helpers */
#define QED_MFW_GET_FIELD(name, field) \
@ -513,6 +515,10 @@ struct qed_simd_fp_handler {
void (*func)(void *);
};
enum qed_slowpath_wq_flag {
QED_SLOWPATH_MFW_TLV_REQ,
};
struct qed_hwfn {
struct qed_dev *cdev;
u8 my_id; /* ID inside the PF */
@ -642,6 +648,9 @@ struct qed_hwfn {
#endif
struct z_stream_s *stream;
struct workqueue_struct *slowpath_wq;
struct delayed_work slowpath_task;
unsigned long slowpath_task_flags;
};
struct pci_params {
@ -906,5 +915,9 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
union qed_mcp_protocol_stats *stats);
int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn);
int qed_mfw_tlv_req(struct qed_hwfn *hwfn);
int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn,
enum qed_mfw_tlv_type type,
union qed_mfw_tlv_data *tlv_data);
#endif /* _QED_H */

Просмотреть файл

@ -11863,6 +11863,8 @@ struct public_global {
u32 running_bundle_id;
s32 external_temperature;
u32 mdump_reason;
u32 data_ptr;
u32 data_size;
};
struct fw_flr_mb {
@ -12322,6 +12324,7 @@ struct public_drv_mb {
#define DRV_MSG_CODE_BIST_TEST 0x001e0000
#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000
#define DRV_MSG_CODE_GET_TLV_DONE 0x002f0000
#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
#define RESOURCE_CMD_REQ_RESC_SHIFT 0
@ -12523,6 +12526,7 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
MFW_DRV_MSG_BW_UPDATE11,
MFW_DRV_MSG_OEM_CFG_UPDATE,
MFW_DRV_MSG_GET_TLV_REQ,
MFW_DRV_MSG_MAX
};
@ -12558,6 +12562,233 @@ struct mcp_public_data {
struct public_func func[MCP_GLOB_FUNC_MAX];
};
/* OCBB definitions */
enum tlvs {
/* Category 1: Device Properties */
DRV_TLV_CLP_STR,
DRV_TLV_CLP_STR_CTD,
/* Category 6: Device Configuration */
DRV_TLV_SCSI_TO,
DRV_TLV_R_T_TOV,
DRV_TLV_R_A_TOV,
DRV_TLV_E_D_TOV,
DRV_TLV_CR_TOV,
DRV_TLV_BOOT_TYPE,
/* Category 8: Port Configuration */
DRV_TLV_NPIV_ENABLED,
/* Category 10: Function Configuration */
DRV_TLV_FEATURE_FLAGS,
DRV_TLV_LOCAL_ADMIN_ADDR,
DRV_TLV_ADDITIONAL_MAC_ADDR_1,
DRV_TLV_ADDITIONAL_MAC_ADDR_2,
DRV_TLV_LSO_MAX_OFFLOAD_SIZE,
DRV_TLV_LSO_MIN_SEGMENT_COUNT,
DRV_TLV_PROMISCUOUS_MODE,
DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE,
DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE,
DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG,
DRV_TLV_FLEX_NIC_OUTER_VLAN_ID,
DRV_TLV_OS_DRIVER_STATES,
DRV_TLV_PXE_BOOT_PROGRESS,
/* Category 12: FC/FCoE Configuration */
DRV_TLV_NPIV_STATE,
DRV_TLV_NUM_OF_NPIV_IDS,
DRV_TLV_SWITCH_NAME,
DRV_TLV_SWITCH_PORT_NUM,
DRV_TLV_SWITCH_PORT_ID,
DRV_TLV_VENDOR_NAME,
DRV_TLV_SWITCH_MODEL,
DRV_TLV_SWITCH_FW_VER,
DRV_TLV_QOS_PRIORITY_PER_802_1P,
DRV_TLV_PORT_ALIAS,
DRV_TLV_PORT_STATE,
DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE,
DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE,
DRV_TLV_LINK_FAILURE_COUNT,
DRV_TLV_FCOE_BOOT_PROGRESS,
/* Category 13: iSCSI Configuration */
DRV_TLV_TARGET_LLMNR_ENABLED,
DRV_TLV_HEADER_DIGEST_FLAG_ENABLED,
DRV_TLV_DATA_DIGEST_FLAG_ENABLED,
DRV_TLV_AUTHENTICATION_METHOD,
DRV_TLV_ISCSI_BOOT_TARGET_PORTAL,
DRV_TLV_MAX_FRAME_SIZE,
DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE,
DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE,
DRV_TLV_ISCSI_BOOT_PROGRESS,
/* Category 20: Device Data */
DRV_TLV_PCIE_BUS_RX_UTILIZATION,
DRV_TLV_PCIE_BUS_TX_UTILIZATION,
DRV_TLV_DEVICE_CPU_CORES_UTILIZATION,
DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED,
DRV_TLV_NCSI_RX_BYTES_RECEIVED,
DRV_TLV_NCSI_TX_BYTES_SENT,
/* Category 22: Base Port Data */
DRV_TLV_RX_DISCARDS,
DRV_TLV_RX_ERRORS,
DRV_TLV_TX_ERRORS,
DRV_TLV_TX_DISCARDS,
DRV_TLV_RX_FRAMES_RECEIVED,
DRV_TLV_TX_FRAMES_SENT,
/* Category 23: FC/FCoE Port Data */
DRV_TLV_RX_BROADCAST_PACKETS,
DRV_TLV_TX_BROADCAST_PACKETS,
/* Category 28: Base Function Data */
DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4,
DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6,
DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
DRV_TLV_PF_RX_FRAMES_RECEIVED,
DRV_TLV_RX_BYTES_RECEIVED,
DRV_TLV_PF_TX_FRAMES_SENT,
DRV_TLV_TX_BYTES_SENT,
DRV_TLV_IOV_OFFLOAD,
DRV_TLV_PCI_ERRORS_CAP_ID,
DRV_TLV_UNCORRECTABLE_ERROR_STATUS,
DRV_TLV_UNCORRECTABLE_ERROR_MASK,
DRV_TLV_CORRECTABLE_ERROR_STATUS,
DRV_TLV_CORRECTABLE_ERROR_MASK,
DRV_TLV_PCI_ERRORS_AECC_REGISTER,
DRV_TLV_TX_QUEUES_EMPTY,
DRV_TLV_RX_QUEUES_EMPTY,
DRV_TLV_TX_QUEUES_FULL,
DRV_TLV_RX_QUEUES_FULL,
/* Category 29: FC/FCoE Function Data */
DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
DRV_TLV_FCOE_RX_FRAMES_RECEIVED,
DRV_TLV_FCOE_RX_BYTES_RECEIVED,
DRV_TLV_FCOE_TX_FRAMES_SENT,
DRV_TLV_FCOE_TX_BYTES_SENT,
DRV_TLV_CRC_ERROR_COUNT,
DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID,
DRV_TLV_CRC_ERROR_1_TIMESTAMP,
DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID,
DRV_TLV_CRC_ERROR_2_TIMESTAMP,
DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID,
DRV_TLV_CRC_ERROR_3_TIMESTAMP,
DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID,
DRV_TLV_CRC_ERROR_4_TIMESTAMP,
DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID,
DRV_TLV_CRC_ERROR_5_TIMESTAMP,
DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT,
DRV_TLV_LOSS_OF_SIGNAL_ERRORS,
DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT,
DRV_TLV_DISPARITY_ERROR_COUNT,
DRV_TLV_CODE_VIOLATION_ERROR_COUNT,
DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1,
DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2,
DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3,
DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4,
DRV_TLV_LAST_FLOGI_TIMESTAMP,
DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1,
DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2,
DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3,
DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4,
DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP,
DRV_TLV_LAST_FLOGI_RJT,
DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP,
DRV_TLV_FDISCS_SENT_COUNT,
DRV_TLV_FDISC_ACCS_RECEIVED,
DRV_TLV_FDISC_RJTS_RECEIVED,
DRV_TLV_PLOGI_SENT_COUNT,
DRV_TLV_PLOGI_ACCS_RECEIVED,
DRV_TLV_PLOGI_RJTS_RECEIVED,
DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID,
DRV_TLV_PLOGI_1_TIMESTAMP,
DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID,
DRV_TLV_PLOGI_2_TIMESTAMP,
DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID,
DRV_TLV_PLOGI_3_TIMESTAMP,
DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID,
DRV_TLV_PLOGI_4_TIMESTAMP,
DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID,
DRV_TLV_PLOGI_5_TIMESTAMP,
DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID,
DRV_TLV_PLOGI_1_ACC_TIMESTAMP,
DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID,
DRV_TLV_PLOGI_2_ACC_TIMESTAMP,
DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID,
DRV_TLV_PLOGI_3_ACC_TIMESTAMP,
DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID,
DRV_TLV_PLOGI_4_ACC_TIMESTAMP,
DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID,
DRV_TLV_PLOGI_5_ACC_TIMESTAMP,
DRV_TLV_LOGOS_ISSUED,
DRV_TLV_LOGO_ACCS_RECEIVED,
DRV_TLV_LOGO_RJTS_RECEIVED,
DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID,
DRV_TLV_LOGO_1_TIMESTAMP,
DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID,
DRV_TLV_LOGO_2_TIMESTAMP,
DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID,
DRV_TLV_LOGO_3_TIMESTAMP,
DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID,
DRV_TLV_LOGO_4_TIMESTAMP,
DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID,
DRV_TLV_LOGO_5_TIMESTAMP,
DRV_TLV_LOGOS_RECEIVED,
DRV_TLV_ACCS_ISSUED,
DRV_TLV_PRLIS_ISSUED,
DRV_TLV_ACCS_RECEIVED,
DRV_TLV_ABTS_SENT_COUNT,
DRV_TLV_ABTS_ACCS_RECEIVED,
DRV_TLV_ABTS_RJTS_RECEIVED,
DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID,
DRV_TLV_ABTS_1_TIMESTAMP,
DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID,
DRV_TLV_ABTS_2_TIMESTAMP,
DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID,
DRV_TLV_ABTS_3_TIMESTAMP,
DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID,
DRV_TLV_ABTS_4_TIMESTAMP,
DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID,
DRV_TLV_ABTS_5_TIMESTAMP,
DRV_TLV_RSCNS_RECEIVED,
DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1,
DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2,
DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3,
DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4,
DRV_TLV_LUN_RESETS_ISSUED,
DRV_TLV_ABORT_TASK_SETS_ISSUED,
DRV_TLV_TPRLOS_SENT,
DRV_TLV_NOS_SENT_COUNT,
DRV_TLV_NOS_RECEIVED_COUNT,
DRV_TLV_OLS_COUNT,
DRV_TLV_LR_COUNT,
DRV_TLV_LRR_COUNT,
DRV_TLV_LIP_SENT_COUNT,
DRV_TLV_LIP_RECEIVED_COUNT,
DRV_TLV_EOFA_COUNT,
DRV_TLV_EOFNI_COUNT,
DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT,
DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT,
DRV_TLV_SCSI_STATUS_BUSY_COUNT,
DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT,
DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT,
DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT,
DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT,
DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT,
DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT,
DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ,
DRV_TLV_SCSI_CHECK_1_TIMESTAMP,
DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ,
DRV_TLV_SCSI_CHECK_2_TIMESTAMP,
DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ,
DRV_TLV_SCSI_CHECK_3_TIMESTAMP,
DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ,
DRV_TLV_SCSI_CHECK_4_TIMESTAMP,
DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ,
DRV_TLV_SCSI_CHECK_5_TIMESTAMP,
/* Category 30: iSCSI Function Data */
DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED,
DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED,
DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT,
DRV_TLV_ISCSI_PDU_TX_BYTES_SENT
};
struct nvm_cfg_mac_address {
u32 mac_addr_hi;
#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF

Просмотреть файл

@ -946,6 +946,68 @@ static void qed_update_pf_params(struct qed_dev *cdev,
}
}
static void qed_slowpath_wq_stop(struct qed_dev *cdev)
{
int i;
if (IS_VF(cdev))
return;
for_each_hwfn(cdev, i) {
if (!cdev->hwfns[i].slowpath_wq)
continue;
flush_workqueue(cdev->hwfns[i].slowpath_wq);
destroy_workqueue(cdev->hwfns[i].slowpath_wq);
}
}
static void qed_slowpath_task(struct work_struct *work)
{
struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
slowpath_task.work);
struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
if (!ptt) {
queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
return;
}
if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ,
&hwfn->slowpath_task_flags))
qed_mfw_process_tlv_req(hwfn, ptt);
qed_ptt_release(hwfn, ptt);
}
static int qed_slowpath_wq_start(struct qed_dev *cdev)
{
struct qed_hwfn *hwfn;
char name[NAME_SIZE];
int i;
if (IS_VF(cdev))
return 0;
for_each_hwfn(cdev, i) {
hwfn = &cdev->hwfns[i];
snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
cdev->pdev->bus->number,
PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
if (!hwfn->slowpath_wq) {
DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
return -ENOMEM;
}
INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task);
}
return 0;
}
static int qed_slowpath_start(struct qed_dev *cdev,
struct qed_slowpath_params *params)
{
@ -961,6 +1023,9 @@ static int qed_slowpath_start(struct qed_dev *cdev,
if (qed_iov_wq_start(cdev))
goto err;
if (qed_slowpath_wq_start(cdev))
goto err;
if (IS_PF(cdev)) {
rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
&cdev->pdev->dev);
@ -1095,6 +1160,8 @@ err:
qed_iov_wq_stop(cdev, false);
qed_slowpath_wq_stop(cdev);
return rc;
}
@ -1103,6 +1170,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
if (!cdev)
return -ENODEV;
qed_slowpath_wq_stop(cdev);
qed_ll2_dealloc_if(cdev);
if (IS_PF(cdev)) {
@ -2088,3 +2157,89 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
return;
}
}
int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
{
DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
"Scheduling slowpath task [Flag: %d]\n",
QED_SLOWPATH_MFW_TLV_REQ);
smp_mb__before_atomic();
set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
smp_mb__after_atomic();
queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
return 0;
}
static void
qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv)
{
struct qed_common_cb_ops *op = cdev->protocol_ops.common;
struct qed_eth_stats_common *p_common;
struct qed_generic_tlvs gen_tlvs;
struct qed_eth_stats stats;
int i;
memset(&gen_tlvs, 0, sizeof(gen_tlvs));
op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs);
if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM)
tlv->flags.ipv4_csum_offload = true;
if (gen_tlvs.feat_flags & QED_TLV_LSO)
tlv->flags.lso_supported = true;
tlv->flags.b_set = true;
for (i = 0; i < QED_TLV_MAC_COUNT; i++) {
if (is_valid_ether_addr(gen_tlvs.mac[i])) {
ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]);
tlv->mac_set[i] = true;
}
}
qed_get_vport_stats(cdev, &stats);
p_common = &stats.common;
tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
p_common->rx_bcast_pkts;
tlv->rx_frames_set = true;
tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
p_common->rx_bcast_bytes;
tlv->rx_bytes_set = true;
tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
p_common->tx_bcast_pkts;
tlv->tx_frames_set = true;
tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
p_common->tx_bcast_bytes;
tlv->rx_bytes_set = true;
}
int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
union qed_mfw_tlv_data *tlv_buf)
{
struct qed_dev *cdev = hwfn->cdev;
struct qed_common_cb_ops *ops;
ops = cdev->protocol_ops.common;
if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) {
DP_NOTICE(hwfn, "Can't collect TLV management info\n");
return -EINVAL;
}
switch (type) {
case QED_MFW_TLV_GENERIC:
qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic);
break;
case QED_MFW_TLV_ETH:
ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth);
break;
case QED_MFW_TLV_FCOE:
ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe);
break;
case QED_MFW_TLV_ISCSI:
ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi);
break;
default:
break;
}
return 0;
}

Просмотреть файл

@ -1622,6 +1622,8 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
case MFW_DRV_MSG_S_TAG_UPDATE:
qed_mcp_update_stag(p_hwfn, p_ptt);
break;
case MFW_DRV_MSG_GET_TLV_REQ:
qed_mfw_tlv_req(p_hwfn);
break;
default:
DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);

Просмотреть файл

@ -213,6 +213,44 @@ enum qed_ov_wol {
QED_OV_WOL_ENABLED
};
enum qed_mfw_tlv_type {
QED_MFW_TLV_GENERIC = 0x1, /* Core driver TLVs */
QED_MFW_TLV_ETH = 0x2, /* L2 driver TLVs */
QED_MFW_TLV_FCOE = 0x4, /* FCoE protocol TLVs */
QED_MFW_TLV_ISCSI = 0x8, /* SCSI protocol TLVs */
QED_MFW_TLV_MAX = 0x16,
};
struct qed_mfw_tlv_generic {
#define QED_MFW_TLV_FLAGS_SIZE 2
struct {
u8 ipv4_csum_offload;
u8 lso_supported;
bool b_set;
} flags;
#define QED_MFW_TLV_MAC_COUNT 3
/* First entry for primary MAC, 2 secondary MACs possible */
u8 mac[QED_MFW_TLV_MAC_COUNT][6];
bool mac_set[QED_MFW_TLV_MAC_COUNT];
u64 rx_frames;
bool rx_frames_set;
u64 rx_bytes;
bool rx_bytes_set;
u64 tx_frames;
bool tx_frames_set;
u64 tx_bytes;
bool tx_bytes_set;
};
union qed_mfw_tlv_data {
struct qed_mfw_tlv_generic generic;
struct qed_mfw_tlv_eth eth;
struct qed_mfw_tlv_fcoe fcoe;
struct qed_mfw_tlv_iscsi iscsi;
};
/**
* @brief - returns the link params of the hw function
*
@ -561,6 +599,17 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
struct bist_nvm_image_att *p_image_att,
u32 image_index);
/**
* @brief - Processes the TLV request from MFW i.e., get the required TLV info
* from the qed client and send it to the MFW.
*
* @param p_hwfn
* @param p_ptt
*
* @param return 0 upon success.
*/
int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/* Using hwfn number (and not pf_num) is required since in CMT mode,
* same pf_num may be used by two different hwfn
* TODO - this shouldn't really be in .h file, but until all fields
@ -621,6 +670,14 @@ struct qed_mcp_mb_params {
u32 mcp_param;
};
struct qed_drv_tlv_hdr {
u8 tlv_type;
u8 tlv_length; /* In dwords - not including this header */
u8 tlv_reserved;
#define QED_DRV_TLV_FLAGS_CHANGED 0x01
u8 tlv_flags;
};
/**
* @brief Initialize the interface with the MCP
*

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -133,6 +133,9 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
static void qede_remove(struct pci_dev *pdev);
static void qede_shutdown(struct pci_dev *pdev);
static void qede_link_update(void *dev, struct qed_link_output *link);
static void qede_get_eth_tlv_data(void *edev, void *data);
static void qede_get_generic_tlv_data(void *edev,
struct qed_generic_tlvs *data);
/* The qede lock is used to protect driver state change and driver flows that
* are not reentrant.
@ -228,6 +231,8 @@ static struct qed_eth_cb_ops qede_ll_ops = {
.arfs_filter_op = qede_arfs_filter_op,
#endif
.link_update = qede_link_update,
.get_generic_tlv_data = qede_get_generic_tlv_data,
.get_protocol_tlv_data = qede_get_eth_tlv_data,
},
.force_mac = qede_force_mac,
.ports_update = qede_udp_ports_update,
@ -2131,3 +2136,99 @@ static void qede_link_update(void *dev, struct qed_link_output *link)
}
}
}
static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
{
struct netdev_queue *netdev_txq;
netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
if (netif_xmit_stopped(netdev_txq))
return true;
return false;
}
static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
{
struct qede_dev *edev = dev;
struct netdev_hw_addr *ha;
int i;
if (edev->ndev->features & NETIF_F_IP_CSUM)
data->feat_flags |= QED_TLV_IP_CSUM;
if (edev->ndev->features & NETIF_F_TSO)
data->feat_flags |= QED_TLV_LSO;
ether_addr_copy(data->mac[0], edev->ndev->dev_addr);
memset(data->mac[1], 0, ETH_ALEN);
memset(data->mac[2], 0, ETH_ALEN);
/* Copy the first two UC macs */
netif_addr_lock_bh(edev->ndev);
i = 1;
netdev_for_each_uc_addr(ha, edev->ndev) {
ether_addr_copy(data->mac[i++], ha->addr);
if (i == QED_TLV_MAC_COUNT)
break;
}
netif_addr_unlock_bh(edev->ndev);
}
static void qede_get_eth_tlv_data(void *dev, void *data)
{
struct qed_mfw_tlv_eth *etlv = data;
struct qede_dev *edev = dev;
struct qede_fastpath *fp;
int i;
etlv->lso_maxoff_size = 0XFFFF;
etlv->lso_maxoff_size_set = true;
etlv->lso_minseg_size = (u16)ETH_TX_LSO_WINDOW_MIN_LEN;
etlv->lso_minseg_size_set = true;
etlv->prom_mode = !!(edev->ndev->flags & IFF_PROMISC);
etlv->prom_mode_set = true;
etlv->tx_descr_size = QEDE_TSS_COUNT(edev);
etlv->tx_descr_size_set = true;
etlv->rx_descr_size = QEDE_RSS_COUNT(edev);
etlv->rx_descr_size_set = true;
etlv->iov_offload = QED_MFW_TLV_IOV_OFFLOAD_VEB;
etlv->iov_offload_set = true;
/* Fill information regarding queues; Should be done under the qede
* lock to guarantee those don't change beneath our feet.
*/
etlv->txqs_empty = true;
etlv->rxqs_empty = true;
etlv->num_txqs_full = 0;
etlv->num_rxqs_full = 0;
__qede_lock(edev);
for_each_queue(i) {
fp = &edev->fp_array[i];
if (fp->type & QEDE_FASTPATH_TX) {
if (fp->txq->sw_tx_cons != fp->txq->sw_tx_prod)
etlv->txqs_empty = false;
if (qede_is_txq_full(edev, fp->txq))
etlv->num_txqs_full++;
}
if (fp->type & QEDE_FASTPATH_RX) {
if (qede_has_rx_work(fp->rxq))
etlv->rxqs_empty = false;
/* This one is a bit tricky; Firmware might stop
* placing packets if ring is not yet full.
* Give an approximation.
*/
if (le16_to_cpu(*fp->rxq->hw_cons_ptr) -
qed_chain_get_cons_idx(&fp->rxq->rx_comp_ring) >
RX_RING_SIZE - 100)
etlv->num_rxqs_full++;
}
}
__qede_unlock(edev);
etlv->txqs_empty_set = true;
etlv->rxqs_empty_set = true;
etlv->num_txqs_full_set = true;
etlv->num_rxqs_full_set = true;
}

Просмотреть файл

@ -383,11 +383,16 @@ struct qedf_ctx {
u32 flogi_failed;
/* Used for fc statistics */
struct mutex stats_mutex;
u64 input_requests;
u64 output_requests;
u64 control_requests;
u64 packet_aborts;
u64 alloc_failures;
u8 lun_resets;
u8 target_resets;
u8 task_set_fulls;
u8 busy;
};
struct io_bdt {
@ -496,7 +501,9 @@ extern int qedf_post_io_req(struct qedf_rport *fcport,
extern void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf,
struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
extern int qedf_send_flogi(struct qedf_ctx *qedf);
extern void qedf_get_protocol_tlv_data(void *dev, void *data);
extern void qedf_fp_io_handler(struct work_struct *work);
extern void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data);
#define FCOE_WORD_TO_BYTE 4
#define QEDF_MAX_TASK_NUM 0xFFFF

Просмотреть файл

@ -439,7 +439,6 @@ qedf_dbg_offload_stats_open(struct inode *inode, struct file *file)
return single_open(file, qedf_offload_stats_show, qedf);
}
const struct file_operations qedf_dbg_fops[] = {
qedf_dbg_fileops(qedf, fp_int),
qedf_dbg_fileops_seq(qedf, io_trace),

Просмотреть файл

@ -1200,6 +1200,12 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
fcport->retry_delay_timestamp =
jiffies + (qualifier * HZ / 10);
}
/* Record stats */
if (io_req->cdb_status ==
SAM_STAT_TASK_SET_FULL)
qedf->task_set_fulls++;
else
qedf->busy++;
}
}
if (io_req->fcp_resid)
@ -1866,6 +1872,11 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
goto reset_tmf_err;
}
if (tm_flags == FCP_TMF_LUN_RESET)
qedf->lun_resets++;
else if (tm_flags == FCP_TMF_TGT_RESET)
qedf->target_resets++;
/* Initialize rest of io_req fields */
io_req->sc_cmd = sc_cmd;
io_req->fcport = fcport;

Просмотреть файл

@ -566,6 +566,8 @@ static struct qed_fcoe_cb_ops qedf_cb_ops = {
{
.link_update = qedf_link_update,
.dcbx_aen = qedf_dcbx_handler,
.get_generic_tlv_data = qedf_get_generic_tlv_data,
.get_protocol_tlv_data = qedf_get_protocol_tlv_data,
}
};
@ -1746,6 +1748,8 @@ static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host
goto out;
}
mutex_lock(&qedf->stats_mutex);
/* Query firmware for offload stats */
qed_ops->get_stats(qedf->cdev, fw_fcoe_stats);
@ -1779,6 +1783,7 @@ static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host
qedf_stats->fcp_packet_aborts += qedf->packet_aborts;
qedf_stats->fcp_frame_alloc_failures += qedf->alloc_failures;
mutex_unlock(&qedf->stats_mutex);
kfree(fw_fcoe_stats);
out:
return qedf_stats;
@ -2948,6 +2953,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
qedf->stop_io_on_error = false;
pci_set_drvdata(pdev, qedf);
init_completion(&qedf->fipvlan_compl);
mutex_init(&qedf->stats_mutex);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
"QLogic FastLinQ FCoE Module qedf %s, "
@ -3392,6 +3398,104 @@ static void qedf_remove(struct pci_dev *pdev)
__qedf_remove(pdev, QEDF_MODE_NORMAL);
}
/*
* Protocol TLV handler
*/
void qedf_get_protocol_tlv_data(void *dev, void *data)
{
struct qedf_ctx *qedf = dev;
struct qed_mfw_tlv_fcoe *fcoe = data;
struct fc_lport *lport = qedf->lport;
struct Scsi_Host *host = lport->host;
struct fc_host_attrs *fc_host = shost_to_fc_host(host);
struct fc_host_statistics *hst;
/* Force a refresh of the fc_host stats including offload stats */
hst = qedf_fc_get_host_stats(host);
fcoe->qos_pri_set = true;
fcoe->qos_pri = 3; /* Hard coded to 3 in driver */
fcoe->ra_tov_set = true;
fcoe->ra_tov = lport->r_a_tov;
fcoe->ed_tov_set = true;
fcoe->ed_tov = lport->e_d_tov;
fcoe->npiv_state_set = true;
fcoe->npiv_state = 1; /* NPIV always enabled */
fcoe->num_npiv_ids_set = true;
fcoe->num_npiv_ids = fc_host->npiv_vports_inuse;
/* Certain attributes we only want to set if we've selected an FCF */
if (qedf->ctlr.sel_fcf) {
fcoe->switch_name_set = true;
u64_to_wwn(qedf->ctlr.sel_fcf->switch_name, fcoe->switch_name);
}
fcoe->port_state_set = true;
/* For qedf we're either link down or fabric attach */
if (lport->link_up)
fcoe->port_state = QED_MFW_TLV_PORT_STATE_FABRIC;
else
fcoe->port_state = QED_MFW_TLV_PORT_STATE_OFFLINE;
fcoe->link_failures_set = true;
fcoe->link_failures = (u16)hst->link_failure_count;
fcoe->fcoe_txq_depth_set = true;
fcoe->fcoe_rxq_depth_set = true;
fcoe->fcoe_rxq_depth = FCOE_PARAMS_NUM_TASKS;
fcoe->fcoe_txq_depth = FCOE_PARAMS_NUM_TASKS;
fcoe->fcoe_rx_frames_set = true;
fcoe->fcoe_rx_frames = hst->rx_frames;
fcoe->fcoe_tx_frames_set = true;
fcoe->fcoe_tx_frames = hst->tx_frames;
fcoe->fcoe_rx_bytes_set = true;
fcoe->fcoe_rx_bytes = hst->fcp_input_megabytes * 1000000;
fcoe->fcoe_tx_bytes_set = true;
fcoe->fcoe_tx_bytes = hst->fcp_output_megabytes * 1000000;
fcoe->crc_count_set = true;
fcoe->crc_count = hst->invalid_crc_count;
fcoe->tx_abts_set = true;
fcoe->tx_abts = hst->fcp_packet_aborts;
fcoe->tx_lun_rst_set = true;
fcoe->tx_lun_rst = qedf->lun_resets;
fcoe->abort_task_sets_set = true;
fcoe->abort_task_sets = qedf->packet_aborts;
fcoe->scsi_busy_set = true;
fcoe->scsi_busy = qedf->busy;
fcoe->scsi_tsk_full_set = true;
fcoe->scsi_tsk_full = qedf->task_set_fulls;
}
/* Generic TLV data callback */
void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
{
struct qedf_ctx *qedf;
if (!dev) {
QEDF_INFO(NULL, QEDF_LOG_EVT,
"dev is NULL so ignoring get_generic_tlv_data request.\n");
return;
}
qedf = (struct qedf_ctx *)dev;
memset(data, 0, sizeof(struct qed_generic_tlvs));
ether_addr_copy(data->mac[0], qedf->mac);
}
/*
* Module Init/Remove
*/

Просмотреть файл

@ -353,6 +353,9 @@ struct qedi_ctx {
#define IPV6_LEN 41
#define IPV4_LEN 17
struct iscsi_boot_kset *boot_kset;
/* Used for iscsi statistics */
struct mutex stats_lock;
};
struct qedi_work {

Просмотреть файл

@ -223,6 +223,12 @@ struct qedi_work_map {
struct work_struct *ptr_tmf_work;
};
struct qedi_boot_target {
char ip_addr[64];
char iscsi_name[255];
u32 ipv6_en;
};
#define qedi_set_itt(task_id, itt) ((u32)(((task_id) & 0xffff) | ((itt) << 16)))
#define qedi_get_itt(cqe) (cqe.iscsi_hdr.cmd.itt >> 16)

Просмотреть файл

@ -55,6 +55,7 @@ static void qedi_free_global_queues(struct qedi_ctx *qedi);
static struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid);
static void qedi_reset_uio_rings(struct qedi_uio_dev *udev);
static void qedi_ll2_free_skbs(struct qedi_ctx *qedi);
static struct nvm_iscsi_block *qedi_get_nvram_block(struct qedi_ctx *qedi);
static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
{
@ -879,6 +880,201 @@ static void qedi_free_iscsi_pf_param(struct qedi_ctx *qedi)
kfree(qedi->global_queues);
}
static void qedi_get_boot_tgt_info(struct nvm_iscsi_block *block,
struct qedi_boot_target *tgt, u8 index)
{
u32 ipv6_en;
ipv6_en = !!(block->generic.ctrl_flags &
NVM_ISCSI_CFG_GEN_IPV6_ENABLED);
snprintf(tgt->iscsi_name, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
block->target[index].target_name.byte);
tgt->ipv6_en = ipv6_en;
if (ipv6_en)
snprintf(tgt->ip_addr, IPV6_LEN, "%pI6\n",
block->target[index].ipv6_addr.byte);
else
snprintf(tgt->ip_addr, IPV4_LEN, "%pI4\n",
block->target[index].ipv4_addr.byte);
}
static int qedi_find_boot_info(struct qedi_ctx *qedi,
struct qed_mfw_tlv_iscsi *iscsi,
struct nvm_iscsi_block *block)
{
struct qedi_boot_target *pri_tgt = NULL, *sec_tgt = NULL;
u32 pri_ctrl_flags = 0, sec_ctrl_flags = 0, found = 0;
struct iscsi_cls_session *cls_sess;
struct iscsi_cls_conn *cls_conn;
struct qedi_conn *qedi_conn;
struct iscsi_session *sess;
struct iscsi_conn *conn;
char ep_ip_addr[64];
int i, ret = 0;
pri_ctrl_flags = !!(block->target[0].ctrl_flags &
NVM_ISCSI_CFG_TARGET_ENABLED);
if (pri_ctrl_flags) {
pri_tgt = kzalloc(sizeof(*pri_tgt), GFP_KERNEL);
if (!pri_tgt)
return -1;
qedi_get_boot_tgt_info(block, pri_tgt, 0);
}
sec_ctrl_flags = !!(block->target[1].ctrl_flags &
NVM_ISCSI_CFG_TARGET_ENABLED);
if (sec_ctrl_flags) {
sec_tgt = kzalloc(sizeof(*sec_tgt), GFP_KERNEL);
if (!sec_tgt) {
ret = -1;
goto free_tgt;
}
qedi_get_boot_tgt_info(block, sec_tgt, 1);
}
for (i = 0; i < qedi->max_active_conns; i++) {
qedi_conn = qedi_get_conn_from_id(qedi, i);
if (!qedi_conn)
continue;
if (qedi_conn->ep->ip_type == TCP_IPV4)
snprintf(ep_ip_addr, IPV4_LEN, "%pI4\n",
qedi_conn->ep->dst_addr);
else
snprintf(ep_ip_addr, IPV6_LEN, "%pI6\n",
qedi_conn->ep->dst_addr);
cls_conn = qedi_conn->cls_conn;
conn = cls_conn->dd_data;
cls_sess = iscsi_conn_to_session(cls_conn);
sess = cls_sess->dd_data;
if (pri_ctrl_flags) {
if (!strcmp(pri_tgt->iscsi_name, sess->targetname) &&
!strcmp(pri_tgt->ip_addr, ep_ip_addr)) {
found = 1;
break;
}
}
if (sec_ctrl_flags) {
if (!strcmp(sec_tgt->iscsi_name, sess->targetname) &&
!strcmp(sec_tgt->ip_addr, ep_ip_addr)) {
found = 1;
break;
}
}
}
if (found) {
if (conn->hdrdgst_en) {
iscsi->header_digest_set = true;
iscsi->header_digest = 1;
}
if (conn->datadgst_en) {
iscsi->data_digest_set = true;
iscsi->data_digest = 1;
}
iscsi->boot_taget_portal_set = true;
iscsi->boot_taget_portal = sess->tpgt;
} else {
ret = -1;
}
if (sec_ctrl_flags)
kfree(sec_tgt);
free_tgt:
if (pri_ctrl_flags)
kfree(pri_tgt);
return ret;
}
static void qedi_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
{
struct qedi_ctx *qedi;
if (!dev) {
QEDI_INFO(NULL, QEDI_LOG_EVT,
"dev is NULL so ignoring get_generic_tlv_data request.\n");
return;
}
qedi = (struct qedi_ctx *)dev;
memset(data, 0, sizeof(struct qed_generic_tlvs));
ether_addr_copy(data->mac[0], qedi->mac);
}
/*
* Protocol TLV handler
*/
static void qedi_get_protocol_tlv_data(void *dev, void *data)
{
struct qed_mfw_tlv_iscsi *iscsi = data;
struct qed_iscsi_stats *fw_iscsi_stats;
struct nvm_iscsi_block *block = NULL;
u32 chap_en = 0, mchap_en = 0;
struct qedi_ctx *qedi = dev;
int rval = 0;
fw_iscsi_stats = kmalloc(sizeof(*fw_iscsi_stats), GFP_KERNEL);
if (!fw_iscsi_stats) {
QEDI_ERR(&qedi->dbg_ctx,
"Could not allocate memory for fw_iscsi_stats.\n");
goto exit_get_data;
}
mutex_lock(&qedi->stats_lock);
/* Query firmware for offload stats */
qedi_ops->get_stats(qedi->cdev, fw_iscsi_stats);
mutex_unlock(&qedi->stats_lock);
iscsi->rx_frames_set = true;
iscsi->rx_frames = fw_iscsi_stats->iscsi_rx_packet_cnt;
iscsi->rx_bytes_set = true;
iscsi->rx_bytes = fw_iscsi_stats->iscsi_rx_bytes_cnt;
iscsi->tx_frames_set = true;
iscsi->tx_frames = fw_iscsi_stats->iscsi_tx_packet_cnt;
iscsi->tx_bytes_set = true;
iscsi->tx_bytes = fw_iscsi_stats->iscsi_tx_bytes_cnt;
iscsi->frame_size_set = true;
iscsi->frame_size = qedi->ll2_mtu;
block = qedi_get_nvram_block(qedi);
if (block) {
chap_en = !!(block->generic.ctrl_flags &
NVM_ISCSI_CFG_GEN_CHAP_ENABLED);
mchap_en = !!(block->generic.ctrl_flags &
NVM_ISCSI_CFG_GEN_CHAP_MUTUAL_ENABLED);
iscsi->auth_method_set = (chap_en || mchap_en) ? true : false;
iscsi->auth_method = 1;
if (chap_en)
iscsi->auth_method = 2;
if (mchap_en)
iscsi->auth_method = 3;
iscsi->tx_desc_size_set = true;
iscsi->tx_desc_size = QEDI_SQ_SIZE;
iscsi->rx_desc_size_set = true;
iscsi->rx_desc_size = QEDI_CQ_SIZE;
/* tpgt, hdr digest, data digest */
rval = qedi_find_boot_info(qedi, iscsi, block);
if (rval)
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"Boot target not set");
}
kfree(fw_iscsi_stats);
exit_get_data:
return;
}
static void qedi_link_update(void *dev, struct qed_link_output *link)
{
struct qedi_ctx *qedi = (struct qedi_ctx *)dev;
@ -896,6 +1092,8 @@ static void qedi_link_update(void *dev, struct qed_link_output *link)
static struct qed_iscsi_cb_ops qedi_cb_ops = {
{
.link_update = qedi_link_update,
.get_protocol_tlv_data = qedi_get_protocol_tlv_data,
.get_generic_tlv_data = qedi_get_generic_tlv_data,
}
};

Просмотреть файл

@ -182,6 +182,272 @@ enum qed_led_mode {
QED_LED_MODE_RESTORE
};
struct qed_mfw_tlv_eth {
u16 lso_maxoff_size;
bool lso_maxoff_size_set;
u16 lso_minseg_size;
bool lso_minseg_size_set;
u8 prom_mode;
bool prom_mode_set;
u16 tx_descr_size;
bool tx_descr_size_set;
u16 rx_descr_size;
bool rx_descr_size_set;
u16 netq_count;
bool netq_count_set;
u32 tcp4_offloads;
bool tcp4_offloads_set;
u32 tcp6_offloads;
bool tcp6_offloads_set;
u16 tx_descr_qdepth;
bool tx_descr_qdepth_set;
u16 rx_descr_qdepth;
bool rx_descr_qdepth_set;
u8 iov_offload;
#define QED_MFW_TLV_IOV_OFFLOAD_NONE (0)
#define QED_MFW_TLV_IOV_OFFLOAD_MULTIQUEUE (1)
#define QED_MFW_TLV_IOV_OFFLOAD_VEB (2)
#define QED_MFW_TLV_IOV_OFFLOAD_VEPA (3)
bool iov_offload_set;
u8 txqs_empty;
bool txqs_empty_set;
u8 rxqs_empty;
bool rxqs_empty_set;
u8 num_txqs_full;
bool num_txqs_full_set;
u8 num_rxqs_full;
bool num_rxqs_full_set;
};
#define QED_MFW_TLV_TIME_SIZE 14
struct qed_mfw_tlv_time {
bool b_set;
u8 month;
u8 day;
u8 hour;
u8 min;
u16 msec;
u16 usec;
};
struct qed_mfw_tlv_fcoe {
u8 scsi_timeout;
bool scsi_timeout_set;
u32 rt_tov;
bool rt_tov_set;
u32 ra_tov;
bool ra_tov_set;
u32 ed_tov;
bool ed_tov_set;
u32 cr_tov;
bool cr_tov_set;
u8 boot_type;
bool boot_type_set;
u8 npiv_state;
bool npiv_state_set;
u32 num_npiv_ids;
bool num_npiv_ids_set;
u8 switch_name[8];
bool switch_name_set;
u16 switch_portnum;
bool switch_portnum_set;
u8 switch_portid[3];
bool switch_portid_set;
u8 vendor_name[8];
bool vendor_name_set;
u8 switch_model[8];
bool switch_model_set;
u8 switch_fw_version[8];
bool switch_fw_version_set;
u8 qos_pri;
bool qos_pri_set;
u8 port_alias[3];
bool port_alias_set;
u8 port_state;
#define QED_MFW_TLV_PORT_STATE_OFFLINE (0)
#define QED_MFW_TLV_PORT_STATE_LOOP (1)
#define QED_MFW_TLV_PORT_STATE_P2P (2)
#define QED_MFW_TLV_PORT_STATE_FABRIC (3)
bool port_state_set;
u16 fip_tx_descr_size;
bool fip_tx_descr_size_set;
u16 fip_rx_descr_size;
bool fip_rx_descr_size_set;
u16 link_failures;
bool link_failures_set;
u8 fcoe_boot_progress;
bool fcoe_boot_progress_set;
u64 rx_bcast;
bool rx_bcast_set;
u64 tx_bcast;
bool tx_bcast_set;
u16 fcoe_txq_depth;
bool fcoe_txq_depth_set;
u16 fcoe_rxq_depth;
bool fcoe_rxq_depth_set;
u64 fcoe_rx_frames;
bool fcoe_rx_frames_set;
u64 fcoe_rx_bytes;
bool fcoe_rx_bytes_set;
u64 fcoe_tx_frames;
bool fcoe_tx_frames_set;
u64 fcoe_tx_bytes;
bool fcoe_tx_bytes_set;
u16 crc_count;
bool crc_count_set;
u32 crc_err_src_fcid[5];
bool crc_err_src_fcid_set[5];
struct qed_mfw_tlv_time crc_err[5];
u16 losync_err;
bool losync_err_set;
u16 losig_err;
bool losig_err_set;
u16 primtive_err;
bool primtive_err_set;
u16 disparity_err;
bool disparity_err_set;
u16 code_violation_err;
bool code_violation_err_set;
u32 flogi_param[4];
bool flogi_param_set[4];
struct qed_mfw_tlv_time flogi_tstamp;
u32 flogi_acc_param[4];
bool flogi_acc_param_set[4];
struct qed_mfw_tlv_time flogi_acc_tstamp;
u32 flogi_rjt;
bool flogi_rjt_set;
struct qed_mfw_tlv_time flogi_rjt_tstamp;
u32 fdiscs;
bool fdiscs_set;
u8 fdisc_acc;
bool fdisc_acc_set;
u8 fdisc_rjt;
bool fdisc_rjt_set;
u8 plogi;
bool plogi_set;
u8 plogi_acc;
bool plogi_acc_set;
u8 plogi_rjt;
bool plogi_rjt_set;
u32 plogi_dst_fcid[5];
bool plogi_dst_fcid_set[5];
struct qed_mfw_tlv_time plogi_tstamp[5];
u32 plogi_acc_src_fcid[5];
bool plogi_acc_src_fcid_set[5];
struct qed_mfw_tlv_time plogi_acc_tstamp[5];
u8 tx_plogos;
bool tx_plogos_set;
u8 plogo_acc;
bool plogo_acc_set;
u8 plogo_rjt;
bool plogo_rjt_set;
u32 plogo_src_fcid[5];
bool plogo_src_fcid_set[5];
struct qed_mfw_tlv_time plogo_tstamp[5];
u8 rx_logos;
bool rx_logos_set;
u8 tx_accs;
bool tx_accs_set;
u8 tx_prlis;
bool tx_prlis_set;
u8 rx_accs;
bool rx_accs_set;
u8 tx_abts;
bool tx_abts_set;
u8 rx_abts_acc;
bool rx_abts_acc_set;
u8 rx_abts_rjt;
bool rx_abts_rjt_set;
u32 abts_dst_fcid[5];
bool abts_dst_fcid_set[5];
struct qed_mfw_tlv_time abts_tstamp[5];
u8 rx_rscn;
bool rx_rscn_set;
u32 rx_rscn_nport[4];
bool rx_rscn_nport_set[4];
u8 tx_lun_rst;
bool tx_lun_rst_set;
u8 abort_task_sets;
bool abort_task_sets_set;
u8 tx_tprlos;
bool tx_tprlos_set;
u8 tx_nos;
bool tx_nos_set;
u8 rx_nos;
bool rx_nos_set;
u8 ols;
bool ols_set;
u8 lr;
bool lr_set;
u8 lrr;
bool lrr_set;
u8 tx_lip;
bool tx_lip_set;
u8 rx_lip;
bool rx_lip_set;
u8 eofa;
bool eofa_set;
u8 eofni;
bool eofni_set;
u8 scsi_chks;
bool scsi_chks_set;
u8 scsi_cond_met;
bool scsi_cond_met_set;
u8 scsi_busy;
bool scsi_busy_set;
u8 scsi_inter;
bool scsi_inter_set;
u8 scsi_inter_cond_met;
bool scsi_inter_cond_met_set;
u8 scsi_rsv_conflicts;
bool scsi_rsv_conflicts_set;
u8 scsi_tsk_full;
bool scsi_tsk_full_set;
u8 scsi_aca_active;
bool scsi_aca_active_set;
u8 scsi_tsk_abort;
bool scsi_tsk_abort_set;
u32 scsi_rx_chk[5];
bool scsi_rx_chk_set[5];
struct qed_mfw_tlv_time scsi_chk_tstamp[5];
};
struct qed_mfw_tlv_iscsi {
u8 target_llmnr;
bool target_llmnr_set;
u8 header_digest;
bool header_digest_set;
u8 data_digest;
bool data_digest_set;
u8 auth_method;
#define QED_MFW_TLV_AUTH_METHOD_NONE (1)
#define QED_MFW_TLV_AUTH_METHOD_CHAP (2)
#define QED_MFW_TLV_AUTH_METHOD_MUTUAL_CHAP (3)
bool auth_method_set;
u16 boot_taget_portal;
bool boot_taget_portal_set;
u16 frame_size;
bool frame_size_set;
u16 tx_desc_size;
bool tx_desc_size_set;
u16 rx_desc_size;
bool rx_desc_size_set;
u8 boot_progress;
bool boot_progress_set;
u16 tx_desc_qdepth;
bool tx_desc_qdepth_set;
u16 rx_desc_qdepth;
bool rx_desc_qdepth_set;
u64 rx_frames;
bool rx_frames_set;
u64 rx_bytes;
bool rx_bytes_set;
u64 tx_frames;
bool tx_frames_set;
u64 tx_bytes;
bool tx_bytes_set;
};
#define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
(void __iomem *)(reg_addr))
@ -485,6 +751,14 @@ struct qed_int_info {
u8 used_cnt;
};
struct qed_generic_tlvs {
#define QED_TLV_IP_CSUM BIT(0)
#define QED_TLV_LSO BIT(1)
u16 feat_flags;
#define QED_TLV_MAC_COUNT 3
u8 mac[QED_TLV_MAC_COUNT][ETH_ALEN];
};
#define QED_NVM_SIGNATURE 0x12435687
enum qed_nvm_flash_cmd {
@ -499,6 +773,8 @@ struct qed_common_cb_ops {
void (*link_update)(void *dev,
struct qed_link_output *link);
void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type);
void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data);
void (*get_protocol_tlv_data)(void *dev, void *data);
};
struct qed_selftest_ops {