Govindarajulu Varadarajan says:

====================
The following patch adds multi tx support for enic.

Signed-off-by: Nishank Trivedi <nistrive@cisco.com>
Signed-off-by: Christian Benvenuti <benve@cisco.com>
Signed-off-by: Govindarajulu Varadarajan <govindarajulu90@gmail.com>
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2013-09-05 12:39:39 -04:00
Родитель 3c3769e633 001e1c1d5e
Коммит c3de991feb
5 изменённых файлов: 54 добавлений и 19 удалений

Просмотреть файл

@ -2076,7 +2076,8 @@ F: drivers/usb/chipidea/
CISCO VIC ETHERNET NIC DRIVER
M: Christian Benvenuti <benve@cisco.com>
M: Roopa Prabhu <roprabhu@cisco.com>
M: Sujith Sankar <ssujith@cisco.com>
M: Govindarajulu Varadarajan <govindarajulu90@gmail.com>
M: Neel Patel <neepatel@cisco.com>
M: Nishank Trivedi <nistrive@cisco.com>
S: Supported

Просмотреть файл

@ -32,12 +32,12 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
#define DRV_VERSION "2.1.1.43"
#define DRV_VERSION "2.1.1.50"
#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
#define ENIC_WQ_MAX 1
#define ENIC_WQ_MAX 8
#define ENIC_RQ_MAX 8
#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)

Просмотреть файл

@ -128,10 +128,10 @@ static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
completed_index, enic_wq_free_buf,
opaque);
if (netif_queue_stopped(enic->netdev) &&
if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
vnic_wq_desc_avail(&enic->wq[q_number]) >=
(MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
netif_wake_queue(enic->netdev);
netif_wake_subqueue(enic->netdev, q_number);
spin_unlock(&enic->wq_lock[q_number]);
@ -292,10 +292,15 @@ static irqreturn_t enic_isr_msix_rq(int irq, void *data)
static irqreturn_t enic_isr_msix_wq(int irq, void *data)
{
struct enic *enic = data;
unsigned int cq = enic_cq_wq(enic, 0);
unsigned int intr = enic_msix_wq_intr(enic, 0);
unsigned int cq;
unsigned int intr;
unsigned int wq_work_to_do = -1; /* no limit */
unsigned int wq_work_done;
unsigned int wq_irq;
wq_irq = (u32)irq - enic->msix_entry[enic_msix_wq_intr(enic, 0)].vector;
cq = enic_cq_wq(enic, wq_irq);
intr = enic_msix_wq_intr(enic, wq_irq);
wq_work_done = vnic_cq_service(&enic->cq[cq],
wq_work_to_do, enic_wq_service, NULL);
@ -511,14 +516,18 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct enic *enic = netdev_priv(netdev);
struct vnic_wq *wq = &enic->wq[0];
struct vnic_wq *wq;
unsigned long flags;
unsigned int txq_map;
if (skb->len <= 0) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
wq = &enic->wq[txq_map];
/* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
* which is very likely. In the off chance it's going to take
* more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
@ -531,23 +540,23 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
spin_lock_irqsave(&enic->wq_lock[0], flags);
spin_lock_irqsave(&enic->wq_lock[txq_map], flags);
if (vnic_wq_desc_avail(wq) <
skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
netif_stop_queue(netdev);
netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
/* This is a hard error, log it */
netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
spin_unlock_irqrestore(&enic->wq_lock[0], flags);
spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
return NETDEV_TX_BUSY;
}
enic_queue_wq_skb(enic, wq, skb);
if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
netif_stop_queue(netdev);
netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
spin_unlock_irqrestore(&enic->wq_lock[0], flags);
spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
return NETDEV_TX_OK;
}
@ -1025,6 +1034,14 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
skb_put(skb, bytes_written);
skb->protocol = eth_type_trans(skb, netdev);
skb_record_rx_queue(skb, q_number);
if (netdev->features & NETIF_F_RXHASH) {
skb->rxhash = rss_hash;
if (rss_type & (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX |
NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 |
NIC_CFG_RSS_HASH_TYPE_TCP_IPV4))
skb->l4_rxhash = true;
}
if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
skb->csum = htons(checksum);
@ -1369,7 +1386,7 @@ static int enic_open(struct net_device *netdev)
enic_set_rx_mode(netdev);
netif_wake_queue(netdev);
netif_tx_wake_all_queues(netdev);
for (i = 0; i < enic->rq_count; i++)
napi_enable(&enic->napi[i]);
@ -2032,7 +2049,8 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* instance data is initialized to zero.
*/
netdev = alloc_etherdev(sizeof(struct enic));
netdev = alloc_etherdev_mqs(sizeof(struct enic),
ENIC_RQ_MAX, ENIC_WQ_MAX);
if (!netdev)
return -ENOMEM;
@ -2062,11 +2080,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
/* Query PCI controller on system for DMA addressing
* limitation for the device. Try 40-bit first, and
* limitation for the device. Try 64-bit first, and
* fail to 32-bit.
*/
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
@ -2080,10 +2098,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_release_regions;
}
} else {
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_err(dev, "Unable to obtain %u-bit DMA "
"for consistent allocations, aborting\n", 40);
"for consistent allocations, aborting\n", 64);
goto err_out_release_regions;
}
using_dac = 1;
@ -2198,6 +2216,9 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_dev_close;
}
netif_set_real_num_tx_queues(netdev, enic->wq_count);
netif_set_real_num_rx_queues(netdev, enic->rq_count);
/* Setup notification timer, HW reset task, and wq locks
*/
@ -2246,6 +2267,8 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ENIC_SETTING(enic, TSO))
netdev->hw_features |= NETIF_F_TSO |
NETIF_F_TSO6 | NETIF_F_TSO_ECN;
if (ENIC_SETTING(enic, RSS))
netdev->hw_features |= NETIF_F_RXHASH;
if (ENIC_SETTING(enic, RXCSUM))
netdev->hw_features |= NETIF_F_RXCSUM;

Просмотреть файл

@ -175,6 +175,7 @@ unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
{
return vdev->res[type].count;
}
EXPORT_SYMBOL(vnic_dev_get_res_count);
void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
unsigned int index)
@ -193,6 +194,7 @@ void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
return (char __iomem *)vdev->res[type].vaddr;
}
}
EXPORT_SYMBOL(vnic_dev_get_res);
static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size)
@ -942,6 +944,7 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
kfree(vdev);
}
}
EXPORT_SYMBOL(vnic_dev_unregister);
struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
@ -969,6 +972,13 @@ err_out:
vnic_dev_unregister(vdev);
return NULL;
}
EXPORT_SYMBOL(vnic_dev_register);
struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev)
{
return vdev->pdev;
}
EXPORT_SYMBOL(vnic_dev_get_pdev);
int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
{

Просмотреть файл

@ -127,6 +127,7 @@ int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
unsigned int num_bars);
struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev);
int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len);
int vnic_dev_enable2(struct vnic_dev *vdev, int active);
int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status);