Merge branch 'liquidio-next'
Raghu Vatsavayi says: ==================== liquidio: updates and bug fixes Please consider following patch series for liquidio bug fixes and updates on top of net-next. Following patches should be applied in the following order as some of them depend on earlier patches in the series. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Коммит
36195d869e
|
@ -367,7 +367,8 @@ void lio_cn6xxx_enable_io_queues(struct octeon_device *oct)
|
|||
|
||||
void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
|
||||
{
|
||||
u32 mask, i, loop = HZ;
|
||||
int i;
|
||||
u32 mask, loop = HZ;
|
||||
u32 d32;
|
||||
|
||||
/* Reset the Enable bits for Input Queues. */
|
||||
|
@ -376,7 +377,7 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
|
|||
octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask);
|
||||
|
||||
/* Wait until hardware indicates that the queues are out of reset. */
|
||||
mask = oct->io_qmask.iq;
|
||||
mask = (u32)oct->io_qmask.iq;
|
||||
d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ);
|
||||
while (((d32 & mask) != mask) && loop--) {
|
||||
d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ);
|
||||
|
@ -384,8 +385,8 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
|
|||
}
|
||||
|
||||
/* Reset the doorbell register for each Input queue. */
|
||||
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
|
||||
if (!(oct->io_qmask.iq & (1UL << i)))
|
||||
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
|
||||
if (!(oct->io_qmask.iq & (1ULL << i)))
|
||||
continue;
|
||||
octeon_write_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i), 0xFFFFFFFF);
|
||||
d32 = octeon_read_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i));
|
||||
|
@ -398,7 +399,7 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
|
|||
|
||||
/* Wait until hardware indicates that the queues are out of reset. */
|
||||
loop = HZ;
|
||||
mask = oct->io_qmask.oq;
|
||||
mask = (u32)oct->io_qmask.oq;
|
||||
d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ);
|
||||
while (((d32 & mask) != mask) && loop--) {
|
||||
d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ);
|
||||
|
@ -408,8 +409,8 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
|
|||
|
||||
/* Reset the doorbell register for each Output queue. */
|
||||
/* for (i = 0; i < oct->num_oqs; i++) { */
|
||||
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
|
||||
if (!(oct->io_qmask.oq & (1UL << i)))
|
||||
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
|
||||
if (!(oct->io_qmask.oq & (1ULL << i)))
|
||||
continue;
|
||||
octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i), 0xFFFFFFFF);
|
||||
d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i));
|
||||
|
@ -429,16 +430,16 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
|
|||
|
||||
void lio_cn6xxx_reinit_regs(struct octeon_device *oct)
|
||||
{
|
||||
u32 i;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
|
||||
if (!(oct->io_qmask.iq & (1UL << i)))
|
||||
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
|
||||
if (!(oct->io_qmask.iq & (1ULL << i)))
|
||||
continue;
|
||||
oct->fn_list.setup_iq_regs(oct, i);
|
||||
}
|
||||
|
||||
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
|
||||
if (!(oct->io_qmask.oq & (1UL << i)))
|
||||
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
|
||||
if (!(oct->io_qmask.oq & (1ULL << i)))
|
||||
continue;
|
||||
oct->fn_list.setup_oq_regs(oct, i);
|
||||
}
|
||||
|
@ -450,8 +451,8 @@ void lio_cn6xxx_reinit_regs(struct octeon_device *oct)
|
|||
oct->fn_list.enable_io_queues(oct);
|
||||
|
||||
/* for (i = 0; i < oct->num_oqs; i++) { */
|
||||
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
|
||||
if (!(oct->io_qmask.oq & (1UL << i)))
|
||||
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
|
||||
if (!(oct->io_qmask.oq & (1ULL << i)))
|
||||
continue;
|
||||
writel(oct->droq[i]->max_count, oct->droq[i]->pkts_credit_reg);
|
||||
}
|
||||
|
@ -495,8 +496,7 @@ u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx)
|
|||
}
|
||||
|
||||
u32
|
||||
lio_cn6xxx_update_read_index(struct octeon_device *oct __attribute__((unused)),
|
||||
struct octeon_instr_queue *iq)
|
||||
lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq)
|
||||
{
|
||||
u32 new_idx = readl(iq->inst_cnt_reg);
|
||||
|
||||
|
@ -557,7 +557,8 @@ lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64)
|
|||
int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
|
||||
{
|
||||
struct octeon_droq *droq;
|
||||
u32 oq_no, pkt_count, droq_time_mask, droq_mask, droq_int_enb;
|
||||
int oq_no;
|
||||
u32 pkt_count, droq_time_mask, droq_mask, droq_int_enb;
|
||||
u32 droq_cnt_enb, droq_cnt_mask;
|
||||
|
||||
droq_cnt_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB);
|
||||
|
@ -573,8 +574,8 @@ int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
|
|||
oct->droq_intr = 0;
|
||||
|
||||
/* for (oq_no = 0; oq_no < oct->num_oqs; oq_no++) { */
|
||||
for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES; oq_no++) {
|
||||
if (!(droq_mask & (1 << oq_no)))
|
||||
for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); oq_no++) {
|
||||
if (!(droq_mask & (1ULL << oq_no)))
|
||||
continue;
|
||||
|
||||
droq = oct->droq[oq_no];
|
||||
|
|
|
@ -91,8 +91,7 @@ void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
|
|||
void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask);
|
||||
u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx);
|
||||
u32
|
||||
lio_cn6xxx_update_read_index(struct octeon_device *oct __attribute__((unused)),
|
||||
struct octeon_instr_queue *iq);
|
||||
lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq);
|
||||
void lio_cn6xxx_enable_interrupt(void *chip);
|
||||
void lio_cn6xxx_disable_interrupt(void *chip);
|
||||
void cn6xxx_get_pcie_qlmport(struct octeon_device *oct);
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -72,6 +72,9 @@ MODULE_PARM_DESC(console_bitmask,
|
|||
|
||||
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
|
||||
|
||||
#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
|
||||
(octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
|
||||
|
||||
static int debug = -1;
|
||||
module_param(debug, int, 0644);
|
||||
MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
|
||||
|
@ -224,8 +227,8 @@ static void octeon_droq_bh(unsigned long pdev)
|
|||
(struct octeon_device_priv *)oct->priv;
|
||||
|
||||
/* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */
|
||||
for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES; q_no++) {
|
||||
if (!(oct->io_qmask.oq & (1UL << q_no)))
|
||||
for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
|
||||
if (!(oct->io_qmask.oq & (1ULL << q_no)))
|
||||
continue;
|
||||
reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
|
||||
MAX_PACKET_BUDGET);
|
||||
|
@ -245,8 +248,8 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct)
|
|||
do {
|
||||
pending_pkts = 0;
|
||||
|
||||
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
|
||||
if (!(oct->io_qmask.oq & (1UL << i)))
|
||||
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
|
||||
if (!(oct->io_qmask.oq & (1ULL << i)))
|
||||
continue;
|
||||
pkt_cnt += octeon_droq_check_hw_for_pkts(oct,
|
||||
oct->droq[i]);
|
||||
|
@ -365,7 +368,7 @@ static int wait_for_pending_requests(struct octeon_device *oct)
|
|||
[OCTEON_ORDERED_SC_LIST].pending_req_count);
|
||||
if (pcount)
|
||||
schedule_timeout_uninterruptible(HZ / 10);
|
||||
else
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -396,10 +399,10 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct)
|
|||
dev_err(&oct->pci_dev->dev, "There were pending requests\n");
|
||||
|
||||
/* Force all requests waiting to be fetched by OCTEON to complete. */
|
||||
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
|
||||
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
|
||||
struct octeon_instr_queue *iq;
|
||||
|
||||
if (!(oct->io_qmask.iq & (1UL << i)))
|
||||
if (!(oct->io_qmask.iq & (1ULL << i)))
|
||||
continue;
|
||||
iq = oct->instr_queue[i];
|
||||
|
||||
|
@ -409,7 +412,7 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct)
|
|||
iq->octeon_read_index = iq->host_write_index;
|
||||
iq->stats.instr_processed +=
|
||||
atomic_read(&iq->instr_pending);
|
||||
lio_process_iq_request_list(oct, iq);
|
||||
lio_process_iq_request_list(oct, iq, 0);
|
||||
spin_unlock_bh(&iq->lock);
|
||||
}
|
||||
}
|
||||
|
@ -682,13 +685,24 @@ static inline void txqs_start(struct net_device *netdev)
|
|||
*/
|
||||
static inline void txqs_wake(struct net_device *netdev)
|
||||
{
|
||||
struct lio *lio = GET_LIO(netdev);
|
||||
|
||||
if (netif_is_multiqueue(netdev)) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < netdev->num_tx_queues; i++)
|
||||
if (__netif_subqueue_stopped(netdev, i))
|
||||
for (i = 0; i < netdev->num_tx_queues; i++) {
|
||||
int qno = lio->linfo.txpciq[i %
|
||||
(lio->linfo.num_txpciq)].s.q_no;
|
||||
|
||||
if (__netif_subqueue_stopped(netdev, i)) {
|
||||
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
|
||||
tx_restart, 1);
|
||||
netif_wake_subqueue(netdev, i);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
|
||||
tx_restart, 1);
|
||||
netif_wake_queue(netdev);
|
||||
}
|
||||
}
|
||||
|
@ -763,6 +777,8 @@ static inline int check_txq_status(struct lio *lio)
|
|||
continue;
|
||||
if (__netif_subqueue_stopped(lio->netdev, q)) {
|
||||
wake_q(lio->netdev, q);
|
||||
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
|
||||
tx_restart, 1);
|
||||
ret_val++;
|
||||
}
|
||||
}
|
||||
|
@ -770,6 +786,8 @@ static inline int check_txq_status(struct lio *lio)
|
|||
if (octnet_iq_is_full(lio->oct_dev, lio->txq))
|
||||
return 0;
|
||||
wake_q(lio->netdev, lio->txq);
|
||||
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
|
||||
tx_restart, 1);
|
||||
ret_val = 1;
|
||||
}
|
||||
return ret_val;
|
||||
|
@ -959,6 +977,42 @@ static inline void update_link_status(struct net_device *netdev,
|
|||
}
|
||||
}
|
||||
|
||||
/* Runs in interrupt context. */
|
||||
static void update_txq_status(struct octeon_device *oct, int iq_num)
|
||||
{
|
||||
struct net_device *netdev;
|
||||
struct lio *lio;
|
||||
struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
|
||||
|
||||
/*octeon_update_iq_read_idx(oct, iq);*/
|
||||
|
||||
netdev = oct->props[iq->ifidx].netdev;
|
||||
|
||||
/* This is needed because the first IQ does not have
|
||||
* a netdev associated with it.
|
||||
*/
|
||||
if (!netdev)
|
||||
return;
|
||||
|
||||
lio = GET_LIO(netdev);
|
||||
if (netif_is_multiqueue(netdev)) {
|
||||
if (__netif_subqueue_stopped(netdev, iq->q_index) &&
|
||||
lio->linfo.link.s.link_up &&
|
||||
(!octnet_iq_is_full(oct, iq_num))) {
|
||||
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
|
||||
tx_restart, 1);
|
||||
netif_wake_subqueue(netdev, iq->q_index);
|
||||
} else {
|
||||
if (!octnet_iq_is_full(oct, lio->txq)) {
|
||||
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
|
||||
lio->txq,
|
||||
tx_restart, 1);
|
||||
wake_q(netdev, lio->txq);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Droq packet processor sceduler
|
||||
* @param oct octeon device
|
||||
|
@ -972,8 +1026,9 @@ void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
|
|||
struct octeon_droq *droq;
|
||||
|
||||
if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
|
||||
for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES; oq_no++) {
|
||||
if (!(oct->droq_intr & (1 << oq_no)))
|
||||
for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
|
||||
oq_no++) {
|
||||
if (!(oct->droq_intr & (1ULL << oq_no)))
|
||||
continue;
|
||||
|
||||
droq = oct->droq[oq_no];
|
||||
|
@ -1084,6 +1139,9 @@ static int liquidio_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
oct_dev->rx_pause = 1;
|
||||
oct_dev->tx_pause = 1;
|
||||
|
||||
dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
|
||||
|
||||
return 0;
|
||||
|
@ -1149,19 +1207,13 @@ static void octeon_destroy_resources(struct octeon_device *oct)
|
|||
if (oct->flags & LIO_FLAG_MSI_ENABLED)
|
||||
pci_disable_msi(oct->pci_dev);
|
||||
|
||||
/* Soft reset the octeon device before exiting */
|
||||
oct->fn_list.soft_reset(oct);
|
||||
|
||||
/* Disable the device, releasing the PCI INT */
|
||||
pci_disable_device(oct->pci_dev);
|
||||
|
||||
/* fallthrough */
|
||||
case OCT_DEV_IN_RESET:
|
||||
case OCT_DEV_DROQ_INIT_DONE:
|
||||
/*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/
|
||||
mdelay(100);
|
||||
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
|
||||
if (!(oct->io_qmask.oq & (1UL << i)))
|
||||
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
|
||||
if (!(oct->io_qmask.oq & (1ULL << i)))
|
||||
continue;
|
||||
octeon_delete_droq(oct, i);
|
||||
}
|
||||
|
@ -1188,8 +1240,8 @@ static void octeon_destroy_resources(struct octeon_device *oct)
|
|||
|
||||
/* fallthrough */
|
||||
case OCT_DEV_INSTR_QUEUE_INIT_DONE:
|
||||
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
|
||||
if (!(oct->io_qmask.iq & (1UL << i)))
|
||||
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
|
||||
if (!(oct->io_qmask.iq & (1ULL << i)))
|
||||
continue;
|
||||
octeon_delete_instr_queue(oct, i);
|
||||
}
|
||||
|
@ -1201,11 +1253,18 @@ static void octeon_destroy_resources(struct octeon_device *oct)
|
|||
|
||||
/* fallthrough */
|
||||
case OCT_DEV_PCI_MAP_DONE:
|
||||
|
||||
/* Soft reset the octeon device before exiting */
|
||||
oct->fn_list.soft_reset(oct);
|
||||
|
||||
octeon_unmap_pci_barx(oct, 0);
|
||||
octeon_unmap_pci_barx(oct, 1);
|
||||
|
||||
/* fallthrough */
|
||||
case OCT_DEV_BEGIN_STATE:
|
||||
/* Disable the device, releasing the PCI INT */
|
||||
pci_disable_device(oct->pci_dev);
|
||||
|
||||
/* Nothing to be done here either */
|
||||
break;
|
||||
} /* end switch(oct->status) */
|
||||
|
@ -1245,6 +1304,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
|
|||
{
|
||||
struct net_device *netdev = oct->props[ifidx].netdev;
|
||||
struct lio *lio;
|
||||
struct napi_struct *napi, *n;
|
||||
|
||||
if (!netdev) {
|
||||
dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
|
||||
|
@ -1261,6 +1321,13 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
|
|||
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
|
||||
txqs_stop(netdev);
|
||||
|
||||
if (oct->props[lio->ifidx].napi_enabled == 1) {
|
||||
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
|
||||
napi_disable(napi);
|
||||
|
||||
oct->props[lio->ifidx].napi_enabled = 0;
|
||||
}
|
||||
|
||||
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
|
||||
unregister_netdev(netdev);
|
||||
|
||||
|
@ -1288,6 +1355,10 @@ static int liquidio_stop_nic_module(struct octeon_device *oct)
|
|||
return 1;
|
||||
}
|
||||
|
||||
spin_lock_bh(&oct->cmd_resp_wqlock);
|
||||
oct->cmd_resp_state = OCT_DRV_OFFLINE;
|
||||
spin_unlock_bh(&oct->cmd_resp_wqlock);
|
||||
|
||||
for (i = 0; i < oct->ifcount; i++) {
|
||||
lio = GET_LIO(oct->props[i].netdev);
|
||||
for (j = 0; j < lio->linfo.num_rxpciq; j++)
|
||||
|
@ -1336,6 +1407,7 @@ static int octeon_chip_specific_setup(struct octeon_device *oct)
|
|||
{
|
||||
u32 dev_id, rev_id;
|
||||
int ret = 1;
|
||||
char *s;
|
||||
|
||||
pci_read_config_dword(oct->pci_dev, 0, &dev_id);
|
||||
pci_read_config_dword(oct->pci_dev, 8, &rev_id);
|
||||
|
@ -1345,22 +1417,27 @@ static int octeon_chip_specific_setup(struct octeon_device *oct)
|
|||
case OCTEON_CN68XX_PCIID:
|
||||
oct->chip_id = OCTEON_CN68XX;
|
||||
ret = lio_setup_cn68xx_octeon_device(oct);
|
||||
s = "CN68XX";
|
||||
break;
|
||||
|
||||
case OCTEON_CN66XX_PCIID:
|
||||
oct->chip_id = OCTEON_CN66XX;
|
||||
ret = lio_setup_cn66xx_octeon_device(oct);
|
||||
s = "CN66XX";
|
||||
break;
|
||||
|
||||
default:
|
||||
s = "?";
|
||||
dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
|
||||
dev_id);
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
dev_info(&oct->pci_dev->dev, "CN68XX PASS%d.%d %s\n",
|
||||
dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
|
||||
OCTEON_MAJOR_REV(oct),
|
||||
OCTEON_MINOR_REV(oct),
|
||||
octeon_get_conf(oct)->card_name);
|
||||
octeon_get_conf(oct)->card_name,
|
||||
LIQUIDIO_VERSION);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1418,8 +1495,10 @@ static inline int check_txq_state(struct lio *lio, struct sk_buff *skb)
|
|||
if (octnet_iq_is_full(lio->oct_dev, iq))
|
||||
return 0;
|
||||
|
||||
if (__netif_subqueue_stopped(lio->netdev, q))
|
||||
if (__netif_subqueue_stopped(lio->netdev, q)) {
|
||||
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1);
|
||||
wake_q(lio->netdev, q);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -1733,6 +1812,7 @@ static int load_firmware(struct octeon_device *oct)
|
|||
if (ret) {
|
||||
dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.",
|
||||
fw_name);
|
||||
release_firmware(fw);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1802,6 +1882,9 @@ static void if_cfg_callback(struct octeon_device *oct,
|
|||
CVM_CAST64(resp->status));
|
||||
ACCESS_ONCE(ctx->cond) = 1;
|
||||
|
||||
snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
|
||||
resp->cfg_info.liquidio_firmware_version);
|
||||
|
||||
/* This barrier is required to be sure that the response has been
|
||||
* written fully before waking up the handler
|
||||
*/
|
||||
|
@ -1848,6 +1931,7 @@ liquidio_push_packet(u32 octeon_id,
|
|||
struct sk_buff *skb = (struct sk_buff *)skbuff;
|
||||
struct skb_shared_hwtstamps *shhwtstamps;
|
||||
u64 ns;
|
||||
u16 vtag = 0;
|
||||
struct net_device *netdev = (struct net_device *)arg;
|
||||
struct octeon_droq *droq = container_of(param, struct octeon_droq,
|
||||
napi);
|
||||
|
@ -1924,6 +2008,16 @@ liquidio_push_packet(u32 octeon_id,
|
|||
else
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
|
||||
/* inbound VLAN tag */
|
||||
if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
|
||||
(rh->r_dh.vlan != 0)) {
|
||||
u16 vid = rh->r_dh.vlan;
|
||||
u16 priority = rh->r_dh.priority;
|
||||
|
||||
vtag = priority << 13 | vid;
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
|
||||
}
|
||||
|
||||
packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP;
|
||||
|
||||
if (packet_was_received) {
|
||||
|
@ -1977,39 +2071,6 @@ static void liquidio_napi_drv_callback(void *arg)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Main NAPI poll function
|
||||
* @param droq octeon output queue
|
||||
* @param budget maximum number of items to process
|
||||
*/
|
||||
static int liquidio_napi_do_rx(struct octeon_droq *droq, int budget)
|
||||
{
|
||||
int work_done;
|
||||
struct lio *lio = GET_LIO(droq->napi.dev);
|
||||
struct octeon_device *oct = lio->oct_dev;
|
||||
|
||||
work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
|
||||
POLL_EVENT_PROCESS_PKTS,
|
||||
budget);
|
||||
if (work_done < 0) {
|
||||
netif_info(lio, rx_err, lio->netdev,
|
||||
"Receive work_done < 0, rxq:%d\n", droq->q_no);
|
||||
goto octnet_napi_finish;
|
||||
}
|
||||
|
||||
if (work_done > budget)
|
||||
dev_err(&oct->pci_dev->dev, ">>>> %s work_done: %d budget: %d\n",
|
||||
__func__, work_done, budget);
|
||||
|
||||
return work_done;
|
||||
|
||||
octnet_napi_finish:
|
||||
napi_complete(&droq->napi);
|
||||
octeon_process_droq_poll_cmd(oct, droq->q_no, POLL_EVENT_ENABLE_INTR,
|
||||
0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Entry point for NAPI polling
|
||||
* @param napi NAPI structure
|
||||
|
@ -2019,19 +2080,41 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
|
|||
{
|
||||
struct octeon_droq *droq;
|
||||
int work_done;
|
||||
int tx_done = 0, iq_no;
|
||||
struct octeon_instr_queue *iq;
|
||||
struct octeon_device *oct;
|
||||
|
||||
droq = container_of(napi, struct octeon_droq, napi);
|
||||
oct = droq->oct_dev;
|
||||
iq_no = droq->q_no;
|
||||
/* Handle Droq descriptors */
|
||||
work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
|
||||
POLL_EVENT_PROCESS_PKTS,
|
||||
budget);
|
||||
|
||||
work_done = liquidio_napi_do_rx(droq, budget);
|
||||
/* Flush the instruction queue */
|
||||
iq = oct->instr_queue[iq_no];
|
||||
if (iq) {
|
||||
/* Process iq buffers with in the budget limits */
|
||||
tx_done = octeon_flush_iq(oct, iq, 1, budget);
|
||||
/* Update iq read-index rather than waiting for next interrupt.
|
||||
* Return back if tx_done is false.
|
||||
*/
|
||||
update_txq_status(oct, iq_no);
|
||||
/*tx_done = (iq->flush_index == iq->octeon_read_index);*/
|
||||
} else {
|
||||
dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
|
||||
__func__, iq_no);
|
||||
}
|
||||
|
||||
if (work_done < budget) {
|
||||
if ((work_done < budget) && (tx_done)) {
|
||||
napi_complete(napi);
|
||||
octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
|
||||
POLL_EVENT_ENABLE_INTR, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return work_done;
|
||||
return (!tx_done) ? (budget) : (work_done);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2165,6 +2248,14 @@ static inline void setup_tx_poll_fn(struct net_device *netdev)
|
|||
&lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
|
||||
}
|
||||
|
||||
static inline void cleanup_tx_poll_fn(struct net_device *netdev)
|
||||
{
|
||||
struct lio *lio = GET_LIO(netdev);
|
||||
|
||||
cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
|
||||
destroy_workqueue(lio->txq_status_wq.wq);
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Net device open for LiquidIO
|
||||
* @param netdev network device
|
||||
|
@ -2175,17 +2266,22 @@ static int liquidio_open(struct net_device *netdev)
|
|||
struct octeon_device *oct = lio->oct_dev;
|
||||
struct napi_struct *napi, *n;
|
||||
|
||||
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
|
||||
napi_enable(napi);
|
||||
if (oct->props[lio->ifidx].napi_enabled == 0) {
|
||||
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
|
||||
napi_enable(napi);
|
||||
|
||||
oct->props[lio->ifidx].napi_enabled = 1;
|
||||
}
|
||||
|
||||
oct_ptp_open(netdev);
|
||||
|
||||
ifstate_set(lio, LIO_IFSTATE_RUNNING);
|
||||
|
||||
setup_tx_poll_fn(netdev);
|
||||
|
||||
start_txq(netdev);
|
||||
|
||||
netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
|
||||
try_module_get(THIS_MODULE);
|
||||
|
||||
/* tell Octeon to start forwarding packets to host */
|
||||
send_rx_ctrl_cmd(lio, 1);
|
||||
|
@ -2205,39 +2301,35 @@ static int liquidio_open(struct net_device *netdev)
|
|||
*/
|
||||
static int liquidio_stop(struct net_device *netdev)
|
||||
{
|
||||
struct napi_struct *napi, *n;
|
||||
struct lio *lio = GET_LIO(netdev);
|
||||
struct octeon_device *oct = lio->oct_dev;
|
||||
|
||||
netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
|
||||
ifstate_reset(lio, LIO_IFSTATE_RUNNING);
|
||||
|
||||
netif_tx_disable(netdev);
|
||||
|
||||
/* Inform that netif carrier is down */
|
||||
netif_carrier_off(netdev);
|
||||
lio->intf_open = 0;
|
||||
lio->linfo.link.s.link_up = 0;
|
||||
lio->link_changes++;
|
||||
|
||||
netif_carrier_off(netdev);
|
||||
/* Pause for a moment and wait for Octeon to flush out (to the wire) any
|
||||
* egress packets that are in-flight.
|
||||
*/
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
schedule_timeout(msecs_to_jiffies(100));
|
||||
|
||||
/* tell Octeon to stop forwarding packets to host */
|
||||
/* Now it should be safe to tell Octeon that nic interface is down. */
|
||||
send_rx_ctrl_cmd(lio, 0);
|
||||
|
||||
cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
|
||||
destroy_workqueue(lio->txq_status_wq.wq);
|
||||
cleanup_tx_poll_fn(netdev);
|
||||
|
||||
if (lio->ptp_clock) {
|
||||
ptp_clock_unregister(lio->ptp_clock);
|
||||
lio->ptp_clock = NULL;
|
||||
}
|
||||
|
||||
ifstate_reset(lio, LIO_IFSTATE_RUNNING);
|
||||
|
||||
/* This is a hack that allows DHCP to continue working. */
|
||||
set_bit(__LINK_STATE_START, &lio->netdev->state);
|
||||
|
||||
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
|
||||
napi_disable(napi);
|
||||
|
||||
txqs_stop(netdev);
|
||||
|
||||
dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
|
||||
module_put(THIS_MODULE);
|
||||
|
||||
|
@ -2298,12 +2390,31 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
|
|||
netdev->name);
|
||||
break;
|
||||
|
||||
case OCTNET_CMD_ENABLE_VLAN_FILTER:
|
||||
dev_info(&oct->pci_dev->dev, "%s VLAN filter enabled\n",
|
||||
netdev->name);
|
||||
break;
|
||||
|
||||
case OCTNET_CMD_ADD_VLAN_FILTER:
|
||||
dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
|
||||
netdev->name, nctrl->ncmd.s.param1);
|
||||
break;
|
||||
|
||||
case OCTNET_CMD_DEL_VLAN_FILTER:
|
||||
dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
|
||||
netdev->name, nctrl->ncmd.s.param1);
|
||||
break;
|
||||
|
||||
case OCTNET_CMD_SET_SETTINGS:
|
||||
dev_info(&oct->pci_dev->dev, "%s settings changed\n",
|
||||
netdev->name);
|
||||
|
||||
break;
|
||||
|
||||
case OCTNET_CMD_SET_FLOW_CTL:
|
||||
netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
|
||||
break;
|
||||
|
||||
default:
|
||||
dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
|
||||
nctrl->ncmd.s.cmd);
|
||||
|
@ -2898,6 +3009,13 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
if (skb_shinfo(skb)->gso_size) {
|
||||
tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
|
||||
tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
|
||||
stats->tx_gso++;
|
||||
}
|
||||
|
||||
/* HW insert VLAN tag */
|
||||
if (skb_vlan_tag_present(skb)) {
|
||||
irh->priority = skb_vlan_tag_get(skb) >> 13;
|
||||
irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
|
||||
}
|
||||
|
||||
xmit_more = skb->xmit_more;
|
||||
|
@ -2916,7 +3034,10 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
|
||||
netif_trans_update(netdev);
|
||||
|
||||
stats->tx_done++;
|
||||
if (skb_shinfo(skb)->gso_size)
|
||||
stats->tx_done += skb_shinfo(skb)->gso_segs;
|
||||
else
|
||||
stats->tx_done++;
|
||||
stats->tx_tot_bytes += skb->len;
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
@ -2948,6 +3069,61 @@ static void liquidio_tx_timeout(struct net_device *netdev)
|
|||
txqs_wake(netdev);
|
||||
}
|
||||
|
||||
static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
|
||||
__be16 proto __attribute__((unused)),
|
||||
u16 vid)
|
||||
{
|
||||
struct lio *lio = GET_LIO(netdev);
|
||||
struct octeon_device *oct = lio->oct_dev;
|
||||
struct octnic_ctrl_pkt nctrl;
|
||||
int ret = 0;
|
||||
|
||||
memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
|
||||
|
||||
nctrl.ncmd.u64 = 0;
|
||||
nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
|
||||
nctrl.ncmd.s.param1 = vid;
|
||||
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
|
||||
nctrl.wait_time = 100;
|
||||
nctrl.netpndev = (u64)netdev;
|
||||
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
|
||||
|
||||
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
|
||||
if (ret < 0) {
|
||||
dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
|
||||
ret);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
|
||||
__be16 proto __attribute__((unused)),
|
||||
u16 vid)
|
||||
{
|
||||
struct lio *lio = GET_LIO(netdev);
|
||||
struct octeon_device *oct = lio->oct_dev;
|
||||
struct octnic_ctrl_pkt nctrl;
|
||||
int ret = 0;
|
||||
|
||||
memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
|
||||
|
||||
nctrl.ncmd.u64 = 0;
|
||||
nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
|
||||
nctrl.ncmd.s.param1 = vid;
|
||||
nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
|
||||
nctrl.wait_time = 100;
|
||||
nctrl.netpndev = (u64)netdev;
|
||||
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
|
||||
|
||||
ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
|
||||
if (ret < 0) {
|
||||
dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
|
||||
ret);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
|
||||
{
|
||||
struct lio *lio = GET_LIO(netdev);
|
||||
|
@ -3039,6 +3215,9 @@ static struct net_device_ops lionetdevops = {
|
|||
.ndo_set_mac_address = liquidio_set_mac,
|
||||
.ndo_set_rx_mode = liquidio_set_mcast_list,
|
||||
.ndo_tx_timeout = liquidio_tx_timeout,
|
||||
|
||||
.ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
|
||||
.ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
|
||||
.ndo_change_mtu = liquidio_change_mtu,
|
||||
.ndo_do_ioctl = liquidio_ioctl,
|
||||
.ndo_fix_features = liquidio_fix_features,
|
||||
|
@ -3300,11 +3479,18 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
|
|||
| NETIF_F_LRO;
|
||||
netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
|
||||
|
||||
netdev->vlan_features = lio->dev_capability;
|
||||
/* Add any unchangeable hw features */
|
||||
lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
|
||||
NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_HW_VLAN_CTAG_TX;
|
||||
|
||||
netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
|
||||
|
||||
netdev->vlan_features = lio->dev_capability;
|
||||
|
||||
netdev->hw_features = lio->dev_capability;
|
||||
/*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
|
||||
netdev->hw_features = netdev->hw_features &
|
||||
~NETIF_F_HW_VLAN_CTAG_RX;
|
||||
|
||||
/* Point to the properties for octeon device to which this
|
||||
* interface belongs.
|
||||
|
@ -3349,14 +3535,17 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
|
|||
|
||||
/* Register ethtool support */
|
||||
liquidio_set_ethtool_ops(netdev);
|
||||
octeon_dev->priv_flags = 0x0;
|
||||
|
||||
if (netdev->features & NETIF_F_LRO)
|
||||
liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
|
||||
OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
|
||||
|
||||
liquidio_set_feature(netdev, OCTNET_CMD_ENABLE_VLAN_FILTER, 0);
|
||||
|
||||
if ((debug != -1) && (debug & NETIF_MSG_HW))
|
||||
liquidio_set_feature(netdev, OCTNET_CMD_VERBOSE_ENABLE,
|
||||
0);
|
||||
liquidio_set_feature(netdev,
|
||||
OCTNET_CMD_VERBOSE_ENABLE, 0);
|
||||
|
||||
/* Register the network device with the OS */
|
||||
if (register_netdev(netdev)) {
|
||||
|
@ -3429,15 +3618,19 @@ static int liquidio_init_nic_module(struct octeon_device *oct)
|
|||
|
||||
/* Initialize interrupt moderation params */
|
||||
intrmod_cfg = &((struct octeon_device *)oct)->intrmod;
|
||||
intrmod_cfg->intrmod_enable = 1;
|
||||
intrmod_cfg->intrmod_check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
|
||||
intrmod_cfg->intrmod_maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
|
||||
intrmod_cfg->intrmod_minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
|
||||
intrmod_cfg->intrmod_maxcnt_trigger = LIO_INTRMOD_MAXCNT_TRIGGER;
|
||||
intrmod_cfg->intrmod_maxtmr_trigger = LIO_INTRMOD_MAXTMR_TRIGGER;
|
||||
intrmod_cfg->intrmod_mintmr_trigger = LIO_INTRMOD_MINTMR_TRIGGER;
|
||||
intrmod_cfg->intrmod_mincnt_trigger = LIO_INTRMOD_MINCNT_TRIGGER;
|
||||
|
||||
intrmod_cfg->rx_enable = 1;
|
||||
intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
|
||||
intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
|
||||
intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
|
||||
intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER;
|
||||
intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER;
|
||||
intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER;
|
||||
intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER;
|
||||
intrmod_cfg->tx_enable = 1;
|
||||
intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER;
|
||||
intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER;
|
||||
intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
|
||||
intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
|
||||
dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
|
||||
|
||||
return retval;
|
||||
|
@ -3500,6 +3693,7 @@ static void nic_starter(struct work_struct *work)
|
|||
static int octeon_device_init(struct octeon_device *octeon_dev)
|
||||
{
|
||||
int j, ret;
|
||||
char bootcmd[] = "\n";
|
||||
struct octeon_device_priv *oct_priv =
|
||||
(struct octeon_device_priv *)octeon_dev->priv;
|
||||
atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
|
||||
|
@ -3611,14 +3805,19 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
|
|||
|
||||
dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
|
||||
|
||||
if (ddr_timeout == 0) {
|
||||
dev_info(&octeon_dev->pci_dev->dev,
|
||||
"WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
|
||||
}
|
||||
if (ddr_timeout == 0)
|
||||
dev_info(&octeon_dev->pci_dev->dev, "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
|
||||
|
||||
schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
|
||||
|
||||
/* Wait for the octeon to initialize DDR after the soft-reset. */
|
||||
while (ddr_timeout == 0) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (schedule_timeout(HZ / 10)) {
|
||||
/* user probably pressed Control-C */
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
|
||||
if (ret) {
|
||||
dev_err(&octeon_dev->pci_dev->dev,
|
||||
|
@ -3632,6 +3831,9 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
|
|||
return 1;
|
||||
}
|
||||
|
||||
/* Divert uboot to take commands from host instead. */
|
||||
ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
|
||||
|
||||
dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
|
||||
ret = octeon_init_consoles(octeon_dev);
|
||||
if (ret) {
|
||||
|
|
|
@ -30,11 +30,10 @@
|
|||
|
||||
#include "octeon_config.h"
|
||||
|
||||
#define LIQUIDIO_VERSION "1.1.9"
|
||||
#define LIQUIDIO_MAJOR_VERSION 1
|
||||
#define LIQUIDIO_MINOR_VERSION 1
|
||||
#define LIQUIDIO_MICRO_VERSION 9
|
||||
|
||||
#define LIQUIDIO_BASE_VERSION "1.4"
|
||||
#define LIQUIDIO_MICRO_VERSION ".1"
|
||||
#define LIQUIDIO_PACKAGE ""
|
||||
#define LIQUIDIO_VERSION "1.4.1"
|
||||
#define CONTROL_IQ 0
|
||||
/** Tag types used by Octeon cores in its work. */
|
||||
enum octeon_tag_type {
|
||||
|
@ -214,6 +213,10 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
|
|||
#define OCTNET_CMD_VERBOSE_ENABLE 0x14
|
||||
#define OCTNET_CMD_VERBOSE_DISABLE 0x15
|
||||
|
||||
#define OCTNET_CMD_ENABLE_VLAN_FILTER 0x16
|
||||
#define OCTNET_CMD_ADD_VLAN_FILTER 0x17
|
||||
#define OCTNET_CMD_DEL_VLAN_FILTER 0x18
|
||||
|
||||
/* RX(packets coming from wire) Checksum verification flags */
|
||||
/* TCP/UDP csum */
|
||||
#define CNNIC_L4SUM_VERIFIED 0x1
|
||||
|
@ -482,15 +485,15 @@ struct octeon_instr_irh {
|
|||
u64 opcode:4;
|
||||
u64 rflag:1;
|
||||
u64 subcode:7;
|
||||
u64 len:3;
|
||||
u64 rid:13;
|
||||
u64 reserved:4;
|
||||
u64 vlan:12;
|
||||
u64 priority:3;
|
||||
u64 reserved:5;
|
||||
u64 ossp:32; /* opcode/subcode specific parameters */
|
||||
#else
|
||||
u64 ossp:32; /* opcode/subcode specific parameters */
|
||||
u64 reserved:4;
|
||||
u64 rid:13;
|
||||
u64 len:3;
|
||||
u64 reserved:5;
|
||||
u64 priority:3;
|
||||
u64 vlan:12;
|
||||
u64 subcode:7;
|
||||
u64 rflag:1;
|
||||
u64 opcode:4;
|
||||
|
@ -517,28 +520,27 @@ union octeon_rh {
|
|||
struct {
|
||||
u64 opcode:4;
|
||||
u64 subcode:8;
|
||||
u64 len:3; /** additional 64-bit words */
|
||||
u64 rid:13; /** request id in response to pkt sent by host */
|
||||
u64 reserved:4;
|
||||
u64 ossp:32; /** opcode/subcode specific parameters */
|
||||
u64 len:3; /** additional 64-bit words */
|
||||
u64 reserved:17;
|
||||
u64 ossp:32; /** opcode/subcode specific parameters */
|
||||
} r;
|
||||
struct {
|
||||
u64 opcode:4;
|
||||
u64 subcode:8;
|
||||
u64 len:3; /** additional 64-bit words */
|
||||
u64 rid:13; /** request id in response to pkt sent by host */
|
||||
u64 extra:24;
|
||||
u64 link:8;
|
||||
u64 len:3; /** additional 64-bit words */
|
||||
u64 extra:28;
|
||||
u64 vlan:12;
|
||||
u64 priority:3;
|
||||
u64 csum_verified:3; /** checksum verified. */
|
||||
u64 has_hwtstamp:1; /** Has hardware timestamp. 1 = yes. */
|
||||
} r_dh;
|
||||
struct {
|
||||
u64 opcode:4;
|
||||
u64 subcode:8;
|
||||
u64 len:3; /** additional 64-bit words */
|
||||
u64 rid:13; /** request id in response to pkt sent by host */
|
||||
u64 len:3; /** additional 64-bit words */
|
||||
u64 reserved:11;
|
||||
u64 num_gmx_ports:8;
|
||||
u64 max_nic_ports:8;
|
||||
u64 max_nic_ports:10;
|
||||
u64 app_cap_flags:4;
|
||||
u64 app_mode:16;
|
||||
} r_core_drv_init;
|
||||
|
@ -554,8 +556,7 @@ union octeon_rh {
|
|||
u64 u64;
|
||||
struct {
|
||||
u64 ossp:32; /** opcode/subcode specific parameters */
|
||||
u64 reserved:4;
|
||||
u64 rid:13; /** req id in response to pkt sent by host */
|
||||
u64 reserved:17;
|
||||
u64 len:3; /** additional 64-bit words */
|
||||
u64 subcode:8;
|
||||
u64 opcode:4;
|
||||
|
@ -563,9 +564,9 @@ union octeon_rh {
|
|||
struct {
|
||||
u64 has_hwtstamp:1; /** 1 = has hwtstamp */
|
||||
u64 csum_verified:3; /** checksum verified. */
|
||||
u64 link:8;
|
||||
u64 extra:24;
|
||||
u64 rid:13; /** req id in response to pkt sent by host */
|
||||
u64 priority:3;
|
||||
u64 vlan:12;
|
||||
u64 extra:28;
|
||||
u64 len:3; /** additional 64-bit words */
|
||||
u64 subcode:8;
|
||||
u64 opcode:4;
|
||||
|
@ -573,9 +574,9 @@ union octeon_rh {
|
|||
struct {
|
||||
u64 app_mode:16;
|
||||
u64 app_cap_flags:4;
|
||||
u64 max_nic_ports:8;
|
||||
u64 max_nic_ports:10;
|
||||
u64 num_gmx_ports:8;
|
||||
u64 rid:13;
|
||||
u64 reserved:11;
|
||||
u64 len:3; /** additional 64-bit words */
|
||||
u64 subcode:8;
|
||||
u64 opcode:4;
|
||||
|
@ -627,13 +628,13 @@ union oct_link_status {
|
|||
u64 speed:16;
|
||||
u64 link_up:1;
|
||||
u64 autoneg:1;
|
||||
u64 interface:4;
|
||||
u64 if_mode:5;
|
||||
u64 pause:1;
|
||||
u64 reserved:17;
|
||||
u64 reserved:16;
|
||||
#else
|
||||
u64 reserved:17;
|
||||
u64 reserved:16;
|
||||
u64 pause:1;
|
||||
u64 interface:4;
|
||||
u64 if_mode:5;
|
||||
u64 autoneg:1;
|
||||
u64 link_up:1;
|
||||
u64 speed:16;
|
||||
|
@ -710,6 +711,7 @@ struct liquidio_if_cfg_info {
|
|||
u64 iqmask; /** mask for IQs enabled for the port */
|
||||
u64 oqmask; /** mask for OQs enabled for the port */
|
||||
struct oct_link_info linfo; /** initial link information */
|
||||
char liquidio_firmware_version[32];
|
||||
};
|
||||
|
||||
/** Stats for each NIC port in RX direction. */
|
||||
|
@ -734,10 +736,16 @@ struct nic_rx_stats {
|
|||
u64 fw_err_pko;
|
||||
u64 fw_err_link;
|
||||
u64 fw_err_drop;
|
||||
|
||||
/* LRO */
|
||||
u64 fw_lro_pkts; /* Number of packets that are LROed */
|
||||
u64 fw_lro_octs; /* Number of octets that are LROed */
|
||||
u64 fw_total_lro; /* Number of LRO packets formed */
|
||||
u64 fw_lro_aborts; /* Number of times lRO of packet aborted */
|
||||
u64 fw_lro_aborts_port;
|
||||
u64 fw_lro_aborts_seq;
|
||||
u64 fw_lro_aborts_tsval;
|
||||
u64 fw_lro_aborts_timer;
|
||||
/* intrmod: packet forward rate */
|
||||
u64 fwd_rate;
|
||||
};
|
||||
|
@ -761,9 +769,13 @@ struct nic_tx_stats {
|
|||
/* firmware stats */
|
||||
u64 fw_total_sent;
|
||||
u64 fw_total_fwd;
|
||||
u64 fw_total_fwd_bytes;
|
||||
u64 fw_err_pko;
|
||||
u64 fw_err_link;
|
||||
u64 fw_err_drop;
|
||||
u64 fw_err_tso;
|
||||
u64 fw_tso; /* number of tso requests */
|
||||
u64 fw_tso_fwd; /* number of packets segmented in tso */
|
||||
};
|
||||
|
||||
struct oct_link_stats {
|
||||
|
@ -794,23 +806,44 @@ struct oct_mdio_cmd {
|
|||
|
||||
#define OCT_LINK_STATS_SIZE (sizeof(struct oct_link_stats))
|
||||
|
||||
/* intrmod: max. packet rate threshold */
|
||||
#define LIO_INTRMOD_MAXPKT_RATETHR 196608
|
||||
/* intrmod: min. packet rate threshold */
|
||||
#define LIO_INTRMOD_MINPKT_RATETHR 9216
|
||||
/* intrmod: max. packets to trigger interrupt */
|
||||
#define LIO_INTRMOD_RXMAXCNT_TRIGGER 384
|
||||
/* intrmod: min. packets to trigger interrupt */
|
||||
#define LIO_INTRMOD_RXMINCNT_TRIGGER 1
|
||||
/* intrmod: max. time to trigger interrupt */
|
||||
#define LIO_INTRMOD_RXMAXTMR_TRIGGER 128
|
||||
/* 66xx:intrmod: min. time to trigger interrupt
|
||||
* (value of 1 is optimum for TCP_RR)
|
||||
*/
|
||||
#define LIO_INTRMOD_RXMINTMR_TRIGGER 1
|
||||
|
||||
/* intrmod: max. packets to trigger interrupt */
|
||||
#define LIO_INTRMOD_TXMAXCNT_TRIGGER 64
|
||||
/* intrmod: min. packets to trigger interrupt */
|
||||
#define LIO_INTRMOD_TXMINCNT_TRIGGER 0
|
||||
|
||||
/* intrmod: poll interval in seconds */
|
||||
#define LIO_INTRMOD_CHECK_INTERVAL 1
|
||||
#define LIO_INTRMOD_MAXPKT_RATETHR 196608 /* max pkt rate threshold */
|
||||
#define LIO_INTRMOD_MINPKT_RATETHR 9216 /* min pkt rate threshold */
|
||||
#define LIO_INTRMOD_MAXCNT_TRIGGER 384 /* max pkts to trigger interrupt */
|
||||
#define LIO_INTRMOD_MINCNT_TRIGGER 1 /* min pkts to trigger interrupt */
|
||||
#define LIO_INTRMOD_MAXTMR_TRIGGER 128 /* max time to trigger interrupt */
|
||||
#define LIO_INTRMOD_MINTMR_TRIGGER 32 /* min time to trigger interrupt */
|
||||
|
||||
struct oct_intrmod_cfg {
|
||||
u64 intrmod_enable;
|
||||
u64 intrmod_check_intrvl;
|
||||
u64 intrmod_maxpkt_ratethr;
|
||||
u64 intrmod_minpkt_ratethr;
|
||||
u64 intrmod_maxcnt_trigger;
|
||||
u64 intrmod_maxtmr_trigger;
|
||||
u64 intrmod_mincnt_trigger;
|
||||
u64 intrmod_mintmr_trigger;
|
||||
u64 rx_enable;
|
||||
u64 tx_enable;
|
||||
u64 check_intrvl;
|
||||
u64 maxpkt_ratethr;
|
||||
u64 minpkt_ratethr;
|
||||
u64 rx_maxcnt_trigger;
|
||||
u64 rx_mincnt_trigger;
|
||||
u64 rx_maxtmr_trigger;
|
||||
u64 rx_mintmr_trigger;
|
||||
u64 tx_mincnt_trigger;
|
||||
u64 tx_maxcnt_trigger;
|
||||
u64 rx_frames;
|
||||
u64 tx_frames;
|
||||
u64 rx_usecs;
|
||||
};
|
||||
|
||||
#define BASE_QUEUE_NOT_REQUESTED 65535
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
/* Maximum octeon devices defined as MAX_OCTEON_NICIF to support
|
||||
* multiple(<= MAX_OCTEON_NICIF) Miniports
|
||||
*/
|
||||
#define MAX_OCTEON_NICIF 32
|
||||
#define MAX_OCTEON_NICIF 128
|
||||
#define MAX_OCTEON_DEVICES MAX_OCTEON_NICIF
|
||||
#define MAX_OCTEON_LINKS MAX_OCTEON_NICIF
|
||||
#define MAX_OCTEON_MULTICAST_ADDR 32
|
||||
|
@ -135,7 +135,7 @@
|
|||
#define CFG_GET_IS_SLI_BP_ON(cfg) ((cfg)->misc.enable_sli_oq_bp)
|
||||
|
||||
/* Max IOQs per OCTEON Link */
|
||||
#define MAX_IOQS_PER_NICIF 32
|
||||
#define MAX_IOQS_PER_NICIF 64
|
||||
|
||||
enum lio_card_type {
|
||||
LIO_210SV = 0, /* Two port, 66xx */
|
||||
|
@ -416,9 +416,11 @@ struct octeon_config {
|
|||
#define DISPATCH_LIST_SIZE BIT(OPCODE_MASK_BITS)
|
||||
|
||||
/* Maximum number of Octeon Instruction (command) queues */
|
||||
#define MAX_OCTEON_INSTR_QUEUES CN6XXX_MAX_INPUT_QUEUES
|
||||
#define MAX_OCTEON_INSTR_QUEUES(oct) CN6XXX_MAX_INPUT_QUEUES
|
||||
/* Maximum number of Octeon Output queues */
|
||||
#define MAX_OCTEON_OUTPUT_QUEUES(oct) CN6XXX_MAX_OUTPUT_QUEUES
|
||||
|
||||
/* Maximum number of Octeon Instruction (command) queues */
|
||||
#define MAX_OCTEON_OUTPUT_QUEUES CN6XXX_MAX_OUTPUT_QUEUES
|
||||
#define MAX_POSSIBLE_OCTEON_INSTR_QUEUES CN6XXX_MAX_INPUT_QUEUES
|
||||
#define MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES CN6XXX_MAX_OUTPUT_QUEUES
|
||||
|
||||
#endif /* __OCTEON_CONFIG_H__ */
|
||||
|
|
|
@ -549,17 +549,19 @@ static char *get_oct_app_string(u32 app_mode)
|
|||
return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START];
|
||||
}
|
||||
|
||||
u8 fbuf[4 * 1024 * 1024];
|
||||
|
||||
int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
|
||||
size_t size)
|
||||
{
|
||||
int ret = 0;
|
||||
u8 *p;
|
||||
u8 *buffer;
|
||||
u8 *p = fbuf;
|
||||
u32 crc32_result;
|
||||
u64 load_addr;
|
||||
u32 image_len;
|
||||
struct octeon_firmware_file_header *h;
|
||||
u32 i;
|
||||
u32 i, rem, base_len = strlen(LIQUIDIO_BASE_VERSION);
|
||||
char *base;
|
||||
|
||||
if (size < sizeof(struct octeon_firmware_file_header)) {
|
||||
dev_err(&oct->pci_dev->dev, "Firmware file too small (%d < %d).\n",
|
||||
|
@ -575,19 +577,26 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
crc32_result =
|
||||
crc32(~0, data,
|
||||
sizeof(struct octeon_firmware_file_header) -
|
||||
sizeof(u32)) ^ ~0U;
|
||||
crc32_result = crc32((unsigned int)~0, data,
|
||||
sizeof(struct octeon_firmware_file_header) -
|
||||
sizeof(u32)) ^ ~0U;
|
||||
if (crc32_result != be32_to_cpu(h->crc32)) {
|
||||
dev_err(&oct->pci_dev->dev, "Firmware CRC mismatch (0x%08x != 0x%08x).\n",
|
||||
crc32_result, be32_to_cpu(h->crc32));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (memcmp(LIQUIDIO_VERSION, h->version, strlen(LIQUIDIO_VERSION))) {
|
||||
dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s, got %s.\n",
|
||||
LIQUIDIO_VERSION, h->version);
|
||||
if (strncmp(LIQUIDIO_PACKAGE, h->version, strlen(LIQUIDIO_PACKAGE))) {
|
||||
dev_err(&oct->pci_dev->dev, "Unmatched firmware package type. Expected %s, got %s.\n",
|
||||
LIQUIDIO_PACKAGE, h->version);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
base = h->version + strlen(LIQUIDIO_PACKAGE);
|
||||
ret = memcmp(LIQUIDIO_BASE_VERSION, base, base_len);
|
||||
if (ret) {
|
||||
dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s.x, got %s.\n",
|
||||
LIQUIDIO_BASE_VERSION, base);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -601,56 +610,56 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
|
|||
snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s",
|
||||
h->version);
|
||||
|
||||
buffer = kmemdup(data, size, GFP_KERNEL);
|
||||
if (!buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
p = buffer + sizeof(struct octeon_firmware_file_header);
|
||||
data += sizeof(struct octeon_firmware_file_header);
|
||||
|
||||
dev_info(&oct->pci_dev->dev, "%s: Loading %d images\n", __func__,
|
||||
be32_to_cpu(h->num_images));
|
||||
/* load all images */
|
||||
for (i = 0; i < be32_to_cpu(h->num_images); i++) {
|
||||
load_addr = be64_to_cpu(h->desc[i].addr);
|
||||
image_len = be32_to_cpu(h->desc[i].len);
|
||||
|
||||
/* validate the image */
|
||||
crc32_result = crc32(~0, p, image_len) ^ ~0U;
|
||||
if (crc32_result != be32_to_cpu(h->desc[i].crc32)) {
|
||||
dev_err(&oct->pci_dev->dev,
|
||||
"Firmware CRC mismatch in image %d (0x%08x != 0x%08x).\n",
|
||||
i, crc32_result,
|
||||
be32_to_cpu(h->desc[i].crc32));
|
||||
ret = -EINVAL;
|
||||
goto done_downloading;
|
||||
dev_info(&oct->pci_dev->dev, "Loading firmware %d at %llx\n",
|
||||
image_len, load_addr);
|
||||
|
||||
/* Write in 4MB chunks*/
|
||||
rem = image_len;
|
||||
|
||||
while (rem) {
|
||||
if (rem < (4 * 1024 * 1024))
|
||||
size = rem;
|
||||
else
|
||||
size = 4 * 1024 * 1024;
|
||||
|
||||
memcpy(p, data, size);
|
||||
|
||||
/* download the image */
|
||||
octeon_pci_write_core_mem(oct, load_addr, p, (u32)size);
|
||||
|
||||
data += size;
|
||||
rem -= (u32)size;
|
||||
load_addr += size;
|
||||
}
|
||||
|
||||
/* download the image */
|
||||
octeon_pci_write_core_mem(oct, load_addr, p, image_len);
|
||||
|
||||
p += image_len;
|
||||
dev_dbg(&oct->pci_dev->dev,
|
||||
"Downloaded image %d (%d bytes) to address 0x%016llx\n",
|
||||
i, image_len, load_addr);
|
||||
}
|
||||
dev_info(&oct->pci_dev->dev, "Writing boot command: %s\n",
|
||||
h->bootcmd);
|
||||
|
||||
/* Invoke the bootcmd */
|
||||
ret = octeon_console_send_cmd(oct, h->bootcmd, 50);
|
||||
|
||||
done_downloading:
|
||||
kfree(buffer);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void octeon_free_device_mem(struct octeon_device *oct)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
|
||||
for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
|
||||
/* could check mask as well */
|
||||
vfree(oct->droq[i]);
|
||||
}
|
||||
|
||||
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
|
||||
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
|
||||
/* could check mask as well */
|
||||
vfree(oct->instr_queue[i]);
|
||||
}
|
||||
|
@ -734,7 +743,7 @@ struct octeon_device *octeon_allocate_device(u32 pci_id,
|
|||
octeon_device[oct_idx] = oct;
|
||||
|
||||
oct->octeon_id = oct_idx;
|
||||
snprintf((oct->device_name), sizeof(oct->device_name),
|
||||
snprintf(oct->device_name, sizeof(oct->device_name),
|
||||
"LiquidIO%d", (oct->octeon_id));
|
||||
|
||||
return oct;
|
||||
|
@ -1157,8 +1166,8 @@ core_drv_init_err:
|
|||
int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no)
|
||||
|
||||
{
|
||||
if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES) &&
|
||||
(oct->io_qmask.iq & (1UL << q_no)))
|
||||
if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES(oct)) &&
|
||||
(oct->io_qmask.iq & (1ULL << q_no)))
|
||||
return oct->instr_queue[q_no]->max_count;
|
||||
|
||||
return -1;
|
||||
|
@ -1166,8 +1175,8 @@ int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no)
|
|||
|
||||
int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no)
|
||||
{
|
||||
if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES) &&
|
||||
(oct->io_qmask.oq & (1UL << q_no)))
|
||||
if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES(oct)) &&
|
||||
(oct->io_qmask.oq & (1ULL << q_no)))
|
||||
return oct->droq[q_no]->max_count;
|
||||
return -1;
|
||||
}
|
||||
|
@ -1258,10 +1267,10 @@ void lio_pci_writeq(struct octeon_device *oct,
|
|||
int octeon_mem_access_ok(struct octeon_device *oct)
|
||||
{
|
||||
u64 access_okay = 0;
|
||||
u64 lmc0_reset_ctl;
|
||||
|
||||
/* Check to make sure a DDR interface is enabled */
|
||||
u64 lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL);
|
||||
|
||||
lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL);
|
||||
access_okay = (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK);
|
||||
|
||||
return access_okay ? 0 : 1;
|
||||
|
@ -1275,9 +1284,6 @@ int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout)
|
|||
if (!timeout)
|
||||
return ret;
|
||||
|
||||
while (*timeout == 0)
|
||||
schedule_timeout_uninterruptible(HZ / 10);
|
||||
|
||||
for (ms = 0; (ret != 0) && ((*timeout == 0) || (ms <= *timeout));
|
||||
ms += HZ / 10) {
|
||||
ret = octeon_mem_access_ok(oct);
|
||||
|
|
|
@ -152,9 +152,9 @@ struct octeon_mmio {
|
|||
#define MAX_OCTEON_MAPS 32
|
||||
|
||||
struct octeon_io_enable {
|
||||
u32 iq;
|
||||
u32 oq;
|
||||
u32 iq64B;
|
||||
u64 iq;
|
||||
u64 oq;
|
||||
u64 iq64B;
|
||||
};
|
||||
|
||||
struct octeon_reg_list {
|
||||
|
@ -204,8 +204,7 @@ struct octeon_fn_list {
|
|||
void (*bar1_idx_setup)(struct octeon_device *, u64, u32, int);
|
||||
void (*bar1_idx_write)(struct octeon_device *, u32, u32);
|
||||
u32 (*bar1_idx_read)(struct octeon_device *, u32);
|
||||
u32 (*update_iq_read_idx)(struct octeon_device *,
|
||||
struct octeon_instr_queue *);
|
||||
u32 (*update_iq_read_idx)(struct octeon_instr_queue *);
|
||||
|
||||
void (*enable_oq_pkt_time_intr)(struct octeon_device *, u32);
|
||||
void (*disable_oq_pkt_time_intr)(struct octeon_device *, u32);
|
||||
|
@ -267,6 +266,7 @@ struct octdev_props {
|
|||
/* Each interface in the Octeon device has a network
|
||||
* device pointer (used for OS specific calls).
|
||||
*/
|
||||
int napi_enabled;
|
||||
int gmxport;
|
||||
struct net_device *netdev;
|
||||
};
|
||||
|
@ -325,7 +325,8 @@ struct octeon_device {
|
|||
struct octeon_sc_buffer_pool sc_buf_pool;
|
||||
|
||||
/** The input instruction queues */
|
||||
struct octeon_instr_queue *instr_queue[MAX_OCTEON_INSTR_QUEUES];
|
||||
struct octeon_instr_queue *instr_queue
|
||||
[MAX_POSSIBLE_OCTEON_INSTR_QUEUES];
|
||||
|
||||
/** The doubly-linked list of instruction response */
|
||||
struct octeon_response_list response_list[MAX_RESPONSE_LISTS];
|
||||
|
@ -333,7 +334,7 @@ struct octeon_device {
|
|||
u32 num_oqs;
|
||||
|
||||
/** The DROQ output queues */
|
||||
struct octeon_droq *droq[MAX_OCTEON_OUTPUT_QUEUES];
|
||||
struct octeon_droq *droq[MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES];
|
||||
|
||||
struct octeon_io_enable io_qmask;
|
||||
|
||||
|
@ -382,15 +383,29 @@ struct octeon_device {
|
|||
|
||||
struct cavium_wq dma_comp_wq;
|
||||
|
||||
struct cavium_wq check_db_wq[MAX_OCTEON_INSTR_QUEUES];
|
||||
/** Lock for dma response list */
|
||||
spinlock_t cmd_resp_wqlock;
|
||||
u32 cmd_resp_state;
|
||||
|
||||
struct cavium_wq check_db_wq[MAX_POSSIBLE_OCTEON_INSTR_QUEUES];
|
||||
|
||||
struct cavium_wk nic_poll_work;
|
||||
|
||||
struct cavium_wk console_poll_work[MAX_OCTEON_MAPS];
|
||||
|
||||
void *priv;
|
||||
|
||||
int rx_pause;
|
||||
int tx_pause;
|
||||
|
||||
struct oct_link_stats link_stats; /*stastics from firmware*/
|
||||
|
||||
/* private flags to control driver-specific features through ethtool */
|
||||
u32 priv_flags;
|
||||
};
|
||||
|
||||
#define OCT_DRV_ONLINE 1
|
||||
#define OCT_DRV_OFFLINE 2
|
||||
#define OCTEON_CN6XXX(oct) ((oct->chip_id == OCTEON_CN66XX) || \
|
||||
(oct->chip_id == OCTEON_CN68XX))
|
||||
#define CHIP_FIELD(oct, TYPE, field) \
|
||||
|
@ -647,4 +662,17 @@ void *oct_get_config_info(struct octeon_device *oct, u16 card_type);
|
|||
*/
|
||||
struct octeon_config *octeon_get_conf(struct octeon_device *oct);
|
||||
|
||||
/* LiquidIO driver pivate flags */
|
||||
enum {
|
||||
OCT_PRIV_FLAG_TX_BYTES = 0, /* Tx interrupts by pending byte count */
|
||||
};
|
||||
|
||||
static inline void lio_set_priv_flag(struct octeon_device *octdev, u32 flag,
|
||||
u32 val)
|
||||
{
|
||||
if (val)
|
||||
octdev->priv_flags |= (0x1 << flag);
|
||||
else
|
||||
octdev->priv_flags &= ~(0x1 << flag);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -337,7 +337,7 @@ int octeon_init_droq(struct octeon_device *oct,
|
|||
/* For 56xx Pass1, this function won't be called, so no checks. */
|
||||
oct->fn_list.setup_oq_regs(oct, q_no);
|
||||
|
||||
oct->io_qmask.oq |= (1 << q_no);
|
||||
oct->io_qmask.oq |= (1ULL << q_no);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -65,6 +65,10 @@ struct oct_iq_stats {
|
|||
u64 tx_iq_busy;/**< Numof times this iq was found to be full. */
|
||||
u64 tx_dropped;/**< Numof pkts dropped dueto xmitpath errors. */
|
||||
u64 tx_tot_bytes;/**< Total count of bytes sento to network. */
|
||||
u64 tx_gso; /* count of tso */
|
||||
u64 tx_dmamap_fail;
|
||||
u64 tx_restart;
|
||||
/*u64 tx_timeout_count;*/
|
||||
};
|
||||
|
||||
#define OCT_IQ_STATS_SIZE (sizeof(struct oct_iq_stats))
|
||||
|
@ -80,6 +84,12 @@ struct octeon_instr_queue {
|
|||
/** A spinlock to protect access to the input ring. */
|
||||
spinlock_t lock;
|
||||
|
||||
/** A spinlock to protect while posting on the ring. */
|
||||
spinlock_t post_lock;
|
||||
|
||||
/** A spinlock to protect access to the input ring.*/
|
||||
spinlock_t iq_flush_running_lock;
|
||||
|
||||
/** Flag that indicates if the queue uses 64 byte commands. */
|
||||
u32 iqcmd_64B:1;
|
||||
|
||||
|
@ -244,7 +254,7 @@ union octeon_instr_64B {
|
|||
|
||||
/** The size of each buffer in soft command buffer pool
|
||||
*/
|
||||
#define SOFT_COMMAND_BUFFER_SIZE 1024
|
||||
#define SOFT_COMMAND_BUFFER_SIZE 1536
|
||||
|
||||
struct octeon_soft_command {
|
||||
/** Soft command buffer info. */
|
||||
|
@ -282,7 +292,7 @@ struct octeon_soft_command {
|
|||
|
||||
/** Maximum number of buffers to allocate into soft command buffer pool
|
||||
*/
|
||||
#define MAX_SOFT_COMMAND_BUFFERS 16
|
||||
#define MAX_SOFT_COMMAND_BUFFERS 256
|
||||
|
||||
/** Head of a soft command buffer pool.
|
||||
*/
|
||||
|
@ -339,7 +349,7 @@ octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
|
|||
|
||||
int
|
||||
lio_process_iq_request_list(struct octeon_device *oct,
|
||||
struct octeon_instr_queue *iq);
|
||||
struct octeon_instr_queue *iq, u32 napi_budget);
|
||||
|
||||
int octeon_send_command(struct octeon_device *oct, u32 iq_no,
|
||||
u32 force_db, void *cmd, void *buf,
|
||||
|
@ -357,5 +367,7 @@ int octeon_send_soft_command(struct octeon_device *oct,
|
|||
int octeon_setup_iq(struct octeon_device *oct, int ifidx,
|
||||
int q_index, union oct_txpciq iq_no, u32 num_descs,
|
||||
void *app_ctx);
|
||||
|
||||
int
|
||||
octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
|
||||
u32 pending_thresh, u32 napi_budget);
|
||||
#endif /* __OCTEON_IQ_H__ */
|
||||
|
|
|
@ -30,6 +30,17 @@
|
|||
#include <linux/dma-mapping.h>
|
||||
#include <linux/ptp_clock_kernel.h>
|
||||
|
||||
struct oct_nic_stats_resp {
|
||||
u64 rh;
|
||||
struct oct_link_stats stats;
|
||||
u64 status;
|
||||
};
|
||||
|
||||
struct oct_nic_stats_ctrl {
|
||||
struct completion complete;
|
||||
struct net_device *netdev;
|
||||
};
|
||||
|
||||
/** LiquidIO per-interface network private data */
|
||||
struct lio {
|
||||
/** State of the interface. Rx/Tx happens only in the RUNNING state. */
|
||||
|
|
|
@ -171,20 +171,36 @@ octnet_send_nic_ctrl_pkt(struct octeon_device *oct,
|
|||
int retval;
|
||||
struct octeon_soft_command *sc = NULL;
|
||||
|
||||
spin_lock_bh(&oct->cmd_resp_wqlock);
|
||||
/* Allow only rx ctrl command to stop traffic on the chip
|
||||
* during offline operations
|
||||
*/
|
||||
if ((oct->cmd_resp_state == OCT_DRV_OFFLINE) &&
|
||||
(nctrl->ncmd.s.cmd != OCTNET_CMD_RX_CTL)) {
|
||||
spin_unlock_bh(&oct->cmd_resp_wqlock);
|
||||
dev_err(&oct->pci_dev->dev,
|
||||
"%s cmd:%d not processed since driver offline\n",
|
||||
__func__, nctrl->ncmd.s.cmd);
|
||||
return -1;
|
||||
}
|
||||
|
||||
sc = octnic_alloc_ctrl_pkt_sc(oct, nctrl);
|
||||
if (!sc) {
|
||||
dev_err(&oct->pci_dev->dev, "%s soft command alloc failed\n",
|
||||
__func__);
|
||||
spin_unlock_bh(&oct->cmd_resp_wqlock);
|
||||
return -1;
|
||||
}
|
||||
|
||||
retval = octeon_send_soft_command(oct, sc);
|
||||
if (retval == IQ_SEND_FAILED) {
|
||||
octeon_free_soft_command(oct, sc);
|
||||
dev_err(&oct->pci_dev->dev, "%s soft command send failed status: %x\n",
|
||||
__func__, retval);
|
||||
dev_err(&oct->pci_dev->dev, "%s soft command:%d send failed status: %x\n",
|
||||
__func__, nctrl->ncmd.s.cmd, retval);
|
||||
spin_unlock_bh(&oct->cmd_resp_wqlock);
|
||||
return -1;
|
||||
}
|
||||
|
||||
spin_unlock_bh(&oct->cmd_resp_wqlock);
|
||||
return retval;
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ struct iq_post_status {
|
|||
};
|
||||
|
||||
static void check_db_timeout(struct work_struct *work);
|
||||
static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no);
|
||||
static void __check_db_timeout(struct octeon_device *oct, u64 iq_no);
|
||||
|
||||
static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *);
|
||||
|
||||
|
@ -149,8 +149,11 @@ int octeon_init_instr_queue(struct octeon_device *oct,
|
|||
|
||||
/* Initialize the spinlock for this instruction queue */
|
||||
spin_lock_init(&iq->lock);
|
||||
spin_lock_init(&iq->post_lock);
|
||||
|
||||
oct->io_qmask.iq |= (1 << iq_no);
|
||||
spin_lock_init(&iq->iq_flush_running_lock);
|
||||
|
||||
oct->io_qmask.iq |= (1ULL << iq_no);
|
||||
|
||||
/* Set the 32B/64B mode for each input queue */
|
||||
oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
|
||||
|
@ -253,8 +256,8 @@ int lio_wait_for_instr_fetch(struct octeon_device *oct)
|
|||
instr_cnt = 0;
|
||||
|
||||
/*for (i = 0; i < oct->num_iqs; i++) {*/
|
||||
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
|
||||
if (!(oct->io_qmask.iq & (1UL << i)))
|
||||
for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
|
||||
if (!(oct->io_qmask.iq & (1ULL << i)))
|
||||
continue;
|
||||
pending =
|
||||
atomic_read(&oct->
|
||||
|
@ -391,13 +394,13 @@ __add_to_request_list(struct octeon_instr_queue *iq,
|
|||
|
||||
int
|
||||
lio_process_iq_request_list(struct octeon_device *oct,
|
||||
struct octeon_instr_queue *iq)
|
||||
struct octeon_instr_queue *iq, u32 napi_budget)
|
||||
{
|
||||
int reqtype;
|
||||
void *buf;
|
||||
u32 old = iq->flush_index;
|
||||
u32 inst_count = 0;
|
||||
unsigned pkts_compl = 0, bytes_compl = 0;
|
||||
unsigned int pkts_compl = 0, bytes_compl = 0;
|
||||
struct octeon_soft_command *sc;
|
||||
struct octeon_instr_irh *irh;
|
||||
|
||||
|
@ -457,6 +460,9 @@ lio_process_iq_request_list(struct octeon_device *oct,
|
|||
skip_this:
|
||||
inst_count++;
|
||||
INCR_INDEX_BY1(old, iq->max_count);
|
||||
|
||||
if ((napi_budget) && (inst_count >= napi_budget))
|
||||
break;
|
||||
}
|
||||
if (bytes_compl)
|
||||
octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl,
|
||||
|
@ -466,38 +472,63 @@ lio_process_iq_request_list(struct octeon_device *oct,
|
|||
return inst_count;
|
||||
}
|
||||
|
||||
static inline void
|
||||
update_iq_indices(struct octeon_device *oct, struct octeon_instr_queue *iq)
|
||||
/* Can only be called from process context */
|
||||
int
|
||||
octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
|
||||
u32 pending_thresh, u32 napi_budget)
|
||||
{
|
||||
u32 inst_processed = 0;
|
||||
u32 tot_inst_processed = 0;
|
||||
int tx_done = 1;
|
||||
|
||||
/* Calculate how many commands Octeon has read and move the read index
|
||||
* accordingly.
|
||||
*/
|
||||
iq->octeon_read_index = oct->fn_list.update_iq_read_idx(oct, iq);
|
||||
if (!spin_trylock(&iq->iq_flush_running_lock))
|
||||
return tx_done;
|
||||
|
||||
/* Move the NORESPONSE requests to the per-device completion list. */
|
||||
if (iq->flush_index != iq->octeon_read_index)
|
||||
inst_processed = lio_process_iq_request_list(oct, iq);
|
||||
spin_lock_bh(&iq->lock);
|
||||
|
||||
if (inst_processed) {
|
||||
atomic_sub(inst_processed, &iq->instr_pending);
|
||||
iq->stats.instr_processed += inst_processed;
|
||||
}
|
||||
}
|
||||
iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq);
|
||||
|
||||
static void
|
||||
octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
|
||||
u32 pending_thresh)
|
||||
{
|
||||
if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) {
|
||||
spin_lock_bh(&iq->lock);
|
||||
update_iq_indices(oct, iq);
|
||||
spin_unlock_bh(&iq->lock);
|
||||
do {
|
||||
/* Process any outstanding IQ packets. */
|
||||
if (iq->flush_index == iq->octeon_read_index)
|
||||
break;
|
||||
|
||||
if (napi_budget)
|
||||
inst_processed = lio_process_iq_request_list
|
||||
(oct, iq,
|
||||
napi_budget - tot_inst_processed);
|
||||
else
|
||||
inst_processed =
|
||||
lio_process_iq_request_list(oct, iq, 0);
|
||||
|
||||
if (inst_processed) {
|
||||
atomic_sub(inst_processed, &iq->instr_pending);
|
||||
iq->stats.instr_processed += inst_processed;
|
||||
}
|
||||
|
||||
tot_inst_processed += inst_processed;
|
||||
inst_processed = 0;
|
||||
|
||||
} while (tot_inst_processed < napi_budget);
|
||||
|
||||
if (napi_budget && (tot_inst_processed >= napi_budget))
|
||||
tx_done = 0;
|
||||
}
|
||||
|
||||
iq->last_db_time = jiffies;
|
||||
|
||||
spin_unlock_bh(&iq->lock);
|
||||
|
||||
spin_unlock(&iq->iq_flush_running_lock);
|
||||
|
||||
return tx_done;
|
||||
}
|
||||
|
||||
static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no)
|
||||
/* Process instruction queue after timeout.
|
||||
* This routine gets called from a workqueue or when removing the module.
|
||||
*/
|
||||
static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
|
||||
{
|
||||
struct octeon_instr_queue *iq;
|
||||
u64 next_time;
|
||||
|
@ -508,24 +539,17 @@ static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no)
|
|||
if (!iq)
|
||||
return;
|
||||
|
||||
/* return immediately, if no work pending */
|
||||
if (!atomic_read(&iq->instr_pending))
|
||||
return;
|
||||
/* If jiffies - last_db_time < db_timeout do nothing */
|
||||
next_time = iq->last_db_time + iq->db_timeout;
|
||||
if (!time_after(jiffies, (unsigned long)next_time))
|
||||
return;
|
||||
iq->last_db_time = jiffies;
|
||||
|
||||
/* Get the lock and prevent tasklets. This routine gets called from
|
||||
* the poll thread. Instructions can now be posted in tasklet context
|
||||
*/
|
||||
spin_lock_bh(&iq->lock);
|
||||
if (iq->fill_cnt != 0)
|
||||
ring_doorbell(oct, iq);
|
||||
|
||||
spin_unlock_bh(&iq->lock);
|
||||
|
||||
/* Flush the instruction queue */
|
||||
if (iq->do_auto_flush)
|
||||
octeon_flush_iq(oct, iq, 1);
|
||||
octeon_flush_iq(oct, iq, 1, 0);
|
||||
}
|
||||
|
||||
/* Called by the Poll thread at regular intervals to check the instruction
|
||||
|
@ -550,7 +574,10 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
|
|||
struct iq_post_status st;
|
||||
struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
|
||||
|
||||
spin_lock_bh(&iq->lock);
|
||||
/* Get the lock and prevent other tasks and tx interrupt handler from
|
||||
* running.
|
||||
*/
|
||||
spin_lock_bh(&iq->post_lock);
|
||||
|
||||
st = __post_command2(oct, iq, force_db, cmd);
|
||||
|
||||
|
@ -566,10 +593,13 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
|
|||
INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
|
||||
}
|
||||
|
||||
spin_unlock_bh(&iq->lock);
|
||||
spin_unlock_bh(&iq->post_lock);
|
||||
|
||||
if (iq->do_auto_flush)
|
||||
octeon_flush_iq(oct, iq, 2);
|
||||
/* This is only done here to expedite packets being flushed
|
||||
* for cases where there are no IQ completion interrupts.
|
||||
*/
|
||||
/*if (iq->do_auto_flush)*/
|
||||
/* octeon_flush_iq(oct, iq, 2, 0);*/
|
||||
|
||||
return st.status;
|
||||
}
|
||||
|
|
|
@ -54,6 +54,7 @@ int octeon_setup_response_list(struct octeon_device *oct)
|
|||
spin_lock_init(&oct->response_list[i].lock);
|
||||
atomic_set(&oct->response_list[i].pending_req_count, 0);
|
||||
}
|
||||
spin_lock_init(&oct->cmd_resp_wqlock);
|
||||
|
||||
oct->dma_comp_wq.wq = alloc_workqueue("dma-comp", WQ_MEM_RECLAIM, 0);
|
||||
if (!oct->dma_comp_wq.wq) {
|
||||
|
@ -64,6 +65,7 @@ int octeon_setup_response_list(struct octeon_device *oct)
|
|||
cwq = &oct->dma_comp_wq;
|
||||
INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion);
|
||||
cwq->wk.ctxptr = oct;
|
||||
oct->cmd_resp_state = OCT_DRV_ONLINE;
|
||||
queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100));
|
||||
|
||||
return ret;
|
||||
|
|
Загрузка…
Ссылка в новой задаче