Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "Several fixups, of note: 1) Fix unlock of not held spinlock in RXRPC code, from Alexey Khoroshilov. 2) Call pci_disable_device() from the correct shutdown path in bnx2x driver, from Yuval Mintz. 3) Fix qeth build on s390 for some configurations, from Eugene Crosser. 4) Cure locking bugs in bond_loadbalance_arp_mon(), from Ding Tianhong. 5) Must do netif_napi_add() before registering netdevice in sky2 driver, from Stanislaw Gruszka. 6) Fix lost bug fix during merge due to code movement in ieee802154, noticed and fixed by the eagle eyed Stephen Rothwell. 7) Get rid of resource leak in xen-netfront driver, from Annie Li. 8) Bounds checks in qlcnic driver are off by one, from Manish Chopra. 9) TPROXY can leak sockets when TCP early demux is enabled, fix from Holger Eitzenberger" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (32 commits) qeth: fix build of s390 allmodconfig bonding: fix locking in bond_loadbalance_arp_mon() tun: add device name(iff) field to proc fdinfo entry DT: net: davinci_emac: "ti, davinci-no-bd-ram" property is actually optional DT: net: davinci_emac: "ti, davinci-rmii-en" property is actually optional bnx2x: Fix generic option settings net: Fix warning on make htmldocs caused by skbuff.c llc: remove noisy WARN from llc_mac_hdr_init qlcnic: Fix loopback test failure qlcnic: Fix tx timeout. qlcnic: Fix initialization of vlan list. qlcnic: Correct off-by-one errors in bounds checks net: Document promote_secondaries net: gre: use icmp_hdr() to get inner ip header i40e: Add missing braces to i40e_dcb_need_reconfig() xen-netfront: fix resource leak in netfront net: 6lowpan: fixup for code movement hyperv: Add support for physically discontinuous receive buffer sky2: initialize napi before registering device net: Fix memory leak if TPROXY used with TCP early demux ...
This commit is contained in:
Коммит
1d494f36d1
|
@ -10,8 +10,6 @@ Required properties:
|
|||
- ti,davinci-ctrl-mod-reg-offset: offset to control module register
|
||||
- ti,davinci-ctrl-ram-offset: offset to control module ram
|
||||
- ti,davinci-ctrl-ram-size: size of control module ram
|
||||
- ti,davinci-rmii-en: use RMII
|
||||
- ti,davinci-no-bd-ram: has the emac controller BD RAM
|
||||
- interrupts: interrupt mapping for the davinci emac interrupts sources:
|
||||
4 sources: <Receive Threshold Interrupt
|
||||
Receive Interrupt
|
||||
|
@ -22,6 +20,8 @@ Optional properties:
|
|||
- phy-handle: Contains a phandle to an Ethernet PHY.
|
||||
If absent, davinci_emac driver defaults to 100/FULL.
|
||||
- local-mac-address : 6 bytes, mac address
|
||||
- ti,davinci-rmii-en: 1 byte, 1 means use RMII
|
||||
- ti,davinci-no-bd-ram: boolean, does EMAC have BD RAM?
|
||||
|
||||
Example (enbw_cmc board):
|
||||
eth0: emac@1e20000 {
|
||||
|
|
|
@ -1088,6 +1088,12 @@ igmpv3_unsolicited_report_interval - INTEGER
|
|||
IGMPv3 report retransmit will take place.
|
||||
Default: 1000 (1 seconds)
|
||||
|
||||
promote_secondaries - BOOLEAN
|
||||
When a primary IP address is removed from this interface
|
||||
promote a corresponding secondary IP address instead of
|
||||
removing all the corresponding secondary IP addresses.
|
||||
|
||||
|
||||
tag - INTEGER
|
||||
Allows you to write a number, which can be used as required.
|
||||
Default value is 0.
|
||||
|
|
|
@ -583,6 +583,7 @@ Currently implemented fanout policies are:
|
|||
- PACKET_FANOUT_CPU: schedule to socket by CPU packet arrives on
|
||||
- PACKET_FANOUT_RND: schedule to socket by random selection
|
||||
- PACKET_FANOUT_ROLLOVER: if one socket is full, rollover to another
|
||||
- PACKET_FANOUT_QM: schedule to socket by skbs recorded queue_mapping
|
||||
|
||||
Minimal example code by David S. Miller (try things like "./test eth0 hash",
|
||||
"./test eth0 lb", etc.):
|
||||
|
|
|
@ -209,7 +209,6 @@ static int create_gpadl_header(void *kbuffer, u32 size,
|
|||
{
|
||||
int i;
|
||||
int pagecount;
|
||||
unsigned long long pfn;
|
||||
struct vmbus_channel_gpadl_header *gpadl_header;
|
||||
struct vmbus_channel_gpadl_body *gpadl_body;
|
||||
struct vmbus_channel_msginfo *msgheader;
|
||||
|
@ -219,7 +218,6 @@ static int create_gpadl_header(void *kbuffer, u32 size,
|
|||
int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
|
||||
|
||||
pagecount = size >> PAGE_SHIFT;
|
||||
pfn = virt_to_phys(kbuffer) >> PAGE_SHIFT;
|
||||
|
||||
/* do we need a gpadl body msg */
|
||||
pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
|
||||
|
@ -248,7 +246,8 @@ static int create_gpadl_header(void *kbuffer, u32 size,
|
|||
gpadl_header->range[0].byte_offset = 0;
|
||||
gpadl_header->range[0].byte_count = size;
|
||||
for (i = 0; i < pfncount; i++)
|
||||
gpadl_header->range[0].pfn_array[i] = pfn+i;
|
||||
gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys(
|
||||
kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
|
||||
*msginfo = msgheader;
|
||||
*messagecount = 1;
|
||||
|
||||
|
@ -301,7 +300,9 @@ static int create_gpadl_header(void *kbuffer, u32 size,
|
|||
* so the hypervisor gurantees that this is ok.
|
||||
*/
|
||||
for (i = 0; i < pfncurr; i++)
|
||||
gpadl_body->pfn[i] = pfn + pfnsum + i;
|
||||
gpadl_body->pfn[i] = slow_virt_to_phys(
|
||||
kbuffer + PAGE_SIZE * (pfnsum + i)) >>
|
||||
PAGE_SHIFT;
|
||||
|
||||
/* add to msg header */
|
||||
list_add_tail(&msgbody->msglistentry,
|
||||
|
@ -327,7 +328,8 @@ static int create_gpadl_header(void *kbuffer, u32 size,
|
|||
gpadl_header->range[0].byte_offset = 0;
|
||||
gpadl_header->range[0].byte_count = size;
|
||||
for (i = 0; i < pagecount; i++)
|
||||
gpadl_header->range[0].pfn_array[i] = pfn+i;
|
||||
gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys(
|
||||
kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
|
||||
|
||||
*msginfo = msgheader;
|
||||
*messagecount = 1;
|
||||
|
@ -344,7 +346,7 @@ nomem:
|
|||
* vmbus_establish_gpadl - Estabish a GPADL for the specified buffer
|
||||
*
|
||||
* @channel: a channel
|
||||
* @kbuffer: from kmalloc
|
||||
* @kbuffer: from kmalloc or vmalloc
|
||||
* @size: page-size multiple
|
||||
* @gpadl_handle: some funky thing
|
||||
*/
|
||||
|
|
|
@ -2346,7 +2346,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
|
|||
arp_work.work);
|
||||
struct slave *slave, *oldcurrent;
|
||||
struct list_head *iter;
|
||||
int do_failover = 0;
|
||||
int do_failover = 0, slave_state_changed = 0;
|
||||
|
||||
if (!bond_has_slaves(bond))
|
||||
goto re_arm;
|
||||
|
@ -2370,7 +2370,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
|
|||
bond_time_in_interval(bond, slave->dev->last_rx, 1)) {
|
||||
|
||||
slave->link = BOND_LINK_UP;
|
||||
bond_set_active_slave(slave);
|
||||
slave_state_changed = 1;
|
||||
|
||||
/* primary_slave has no meaning in round-robin
|
||||
* mode. the window of a slave being up and
|
||||
|
@ -2399,7 +2399,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
|
|||
!bond_time_in_interval(bond, slave->dev->last_rx, 2)) {
|
||||
|
||||
slave->link = BOND_LINK_DOWN;
|
||||
bond_set_backup_slave(slave);
|
||||
slave_state_changed = 1;
|
||||
|
||||
if (slave->link_failure_count < UINT_MAX)
|
||||
slave->link_failure_count++;
|
||||
|
@ -2426,19 +2426,24 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
|
|||
|
||||
rcu_read_unlock();
|
||||
|
||||
if (do_failover) {
|
||||
/* the bond_select_active_slave must hold RTNL
|
||||
* and curr_slave_lock for write.
|
||||
*/
|
||||
if (do_failover || slave_state_changed) {
|
||||
if (!rtnl_trylock())
|
||||
goto re_arm;
|
||||
block_netpoll_tx();
|
||||
write_lock_bh(&bond->curr_slave_lock);
|
||||
|
||||
bond_select_active_slave(bond);
|
||||
if (slave_state_changed) {
|
||||
bond_slave_state_change(bond);
|
||||
} else if (do_failover) {
|
||||
/* the bond_select_active_slave must hold RTNL
|
||||
* and curr_slave_lock for write.
|
||||
*/
|
||||
block_netpoll_tx();
|
||||
write_lock_bh(&bond->curr_slave_lock);
|
||||
|
||||
write_unlock_bh(&bond->curr_slave_lock);
|
||||
unblock_netpoll_tx();
|
||||
bond_select_active_slave(bond);
|
||||
|
||||
write_unlock_bh(&bond->curr_slave_lock);
|
||||
unblock_netpoll_tx();
|
||||
}
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
|
@ -2599,45 +2604,51 @@ do_failover:
|
|||
|
||||
/*
|
||||
* Send ARP probes for active-backup mode ARP monitor.
|
||||
*
|
||||
* Called with rcu_read_lock hold.
|
||||
*/
|
||||
static void bond_ab_arp_probe(struct bonding *bond)
|
||||
static bool bond_ab_arp_probe(struct bonding *bond)
|
||||
{
|
||||
struct slave *slave, *before = NULL, *new_slave = NULL,
|
||||
*curr_arp_slave = rcu_dereference(bond->current_arp_slave);
|
||||
*curr_arp_slave, *curr_active_slave;
|
||||
struct list_head *iter;
|
||||
bool found = false;
|
||||
|
||||
read_lock(&bond->curr_slave_lock);
|
||||
rcu_read_lock();
|
||||
curr_arp_slave = rcu_dereference(bond->current_arp_slave);
|
||||
curr_active_slave = rcu_dereference(bond->curr_active_slave);
|
||||
|
||||
if (curr_arp_slave && bond->curr_active_slave)
|
||||
if (curr_arp_slave && curr_active_slave)
|
||||
pr_info("PROBE: c_arp %s && cas %s BAD\n",
|
||||
curr_arp_slave->dev->name,
|
||||
bond->curr_active_slave->dev->name);
|
||||
curr_active_slave->dev->name);
|
||||
|
||||
if (bond->curr_active_slave) {
|
||||
bond_arp_send_all(bond, bond->curr_active_slave);
|
||||
read_unlock(&bond->curr_slave_lock);
|
||||
return;
|
||||
if (curr_active_slave) {
|
||||
bond_arp_send_all(bond, curr_active_slave);
|
||||
rcu_read_unlock();
|
||||
return true;
|
||||
}
|
||||
|
||||
read_unlock(&bond->curr_slave_lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
/* if we don't have a curr_active_slave, search for the next available
|
||||
* backup slave from the current_arp_slave and make it the candidate
|
||||
* for becoming the curr_active_slave
|
||||
*/
|
||||
|
||||
if (!rtnl_trylock())
|
||||
return false;
|
||||
/* curr_arp_slave might have gone away */
|
||||
curr_arp_slave = ACCESS_ONCE(bond->current_arp_slave);
|
||||
|
||||
if (!curr_arp_slave) {
|
||||
curr_arp_slave = bond_first_slave_rcu(bond);
|
||||
if (!curr_arp_slave)
|
||||
return;
|
||||
curr_arp_slave = bond_first_slave(bond);
|
||||
if (!curr_arp_slave) {
|
||||
rtnl_unlock();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
bond_set_slave_inactive_flags(curr_arp_slave);
|
||||
|
||||
bond_for_each_slave_rcu(bond, slave, iter) {
|
||||
bond_for_each_slave(bond, slave, iter) {
|
||||
if (!found && !before && IS_UP(slave->dev))
|
||||
before = slave;
|
||||
|
||||
|
@ -2667,21 +2678,26 @@ static void bond_ab_arp_probe(struct bonding *bond)
|
|||
if (!new_slave && before)
|
||||
new_slave = before;
|
||||
|
||||
if (!new_slave)
|
||||
return;
|
||||
if (!new_slave) {
|
||||
rtnl_unlock();
|
||||
return true;
|
||||
}
|
||||
|
||||
new_slave->link = BOND_LINK_BACK;
|
||||
bond_set_slave_active_flags(new_slave);
|
||||
bond_arp_send_all(bond, new_slave);
|
||||
new_slave->jiffies = jiffies;
|
||||
rcu_assign_pointer(bond->current_arp_slave, new_slave);
|
||||
rtnl_unlock();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void bond_activebackup_arp_mon(struct work_struct *work)
|
||||
{
|
||||
struct bonding *bond = container_of(work, struct bonding,
|
||||
arp_work.work);
|
||||
bool should_notify_peers = false;
|
||||
bool should_notify_peers = false, should_commit = false;
|
||||
int delta_in_ticks;
|
||||
|
||||
delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
|
||||
|
@ -2690,12 +2706,11 @@ static void bond_activebackup_arp_mon(struct work_struct *work)
|
|||
goto re_arm;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
should_notify_peers = bond_should_notify_peers(bond);
|
||||
should_commit = bond_ab_arp_inspect(bond);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (bond_ab_arp_inspect(bond)) {
|
||||
rcu_read_unlock();
|
||||
|
||||
if (should_commit) {
|
||||
/* Race avoidance with bond_close flush of workqueue */
|
||||
if (!rtnl_trylock()) {
|
||||
delta_in_ticks = 1;
|
||||
|
@ -2704,13 +2719,14 @@ static void bond_activebackup_arp_mon(struct work_struct *work)
|
|||
}
|
||||
|
||||
bond_ab_arp_commit(bond);
|
||||
|
||||
rtnl_unlock();
|
||||
rcu_read_lock();
|
||||
}
|
||||
|
||||
bond_ab_arp_probe(bond);
|
||||
rcu_read_unlock();
|
||||
if (!bond_ab_arp_probe(bond)) {
|
||||
/* rtnl locking failed, re-arm */
|
||||
delta_in_ticks = 1;
|
||||
should_notify_peers = false;
|
||||
}
|
||||
|
||||
re_arm:
|
||||
if (bond->params.arp_interval)
|
||||
|
|
|
@ -303,6 +303,19 @@ static inline void bond_set_backup_slave(struct slave *slave)
|
|||
}
|
||||
}
|
||||
|
||||
static inline void bond_slave_state_change(struct bonding *bond)
|
||||
{
|
||||
struct list_head *iter;
|
||||
struct slave *tmp;
|
||||
|
||||
bond_for_each_slave(bond, tmp, iter) {
|
||||
if (tmp->link == BOND_LINK_UP)
|
||||
bond_set_active_slave(tmp);
|
||||
else if (tmp->link == BOND_LINK_DOWN)
|
||||
bond_set_backup_slave(tmp);
|
||||
}
|
||||
}
|
||||
|
||||
static inline int bond_slave_state(struct slave *slave)
|
||||
{
|
||||
return slave->backup;
|
||||
|
|
|
@ -212,7 +212,6 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
|
|||
int neX000, ctron;
|
||||
#endif
|
||||
static unsigned version_printed;
|
||||
struct ei_device *ei_local = netdev_priv(dev);
|
||||
|
||||
if ((apne_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
|
||||
netdev_info(dev, version);
|
||||
|
|
|
@ -358,49 +358,47 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
|||
|
||||
cfg_idx = bnx2x_get_link_cfg_idx(bp);
|
||||
old_multi_phy_config = bp->link_params.multi_phy_config;
|
||||
switch (cmd->port) {
|
||||
case PORT_TP:
|
||||
if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
|
||||
break; /* no port change */
|
||||
|
||||
if (!(bp->port.supported[0] & SUPPORTED_TP ||
|
||||
bp->port.supported[1] & SUPPORTED_TP)) {
|
||||
if (cmd->port != bnx2x_get_port_type(bp)) {
|
||||
switch (cmd->port) {
|
||||
case PORT_TP:
|
||||
if (!(bp->port.supported[0] & SUPPORTED_TP ||
|
||||
bp->port.supported[1] & SUPPORTED_TP)) {
|
||||
DP(BNX2X_MSG_ETHTOOL,
|
||||
"Unsupported port type\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
bp->link_params.multi_phy_config &=
|
||||
~PORT_HW_CFG_PHY_SELECTION_MASK;
|
||||
if (bp->link_params.multi_phy_config &
|
||||
PORT_HW_CFG_PHY_SWAPPED_ENABLED)
|
||||
bp->link_params.multi_phy_config |=
|
||||
PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
|
||||
else
|
||||
bp->link_params.multi_phy_config |=
|
||||
PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
|
||||
break;
|
||||
case PORT_FIBRE:
|
||||
case PORT_DA:
|
||||
if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
|
||||
bp->port.supported[1] & SUPPORTED_FIBRE)) {
|
||||
DP(BNX2X_MSG_ETHTOOL,
|
||||
"Unsupported port type\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
bp->link_params.multi_phy_config &=
|
||||
~PORT_HW_CFG_PHY_SELECTION_MASK;
|
||||
if (bp->link_params.multi_phy_config &
|
||||
PORT_HW_CFG_PHY_SWAPPED_ENABLED)
|
||||
bp->link_params.multi_phy_config |=
|
||||
PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
|
||||
else
|
||||
bp->link_params.multi_phy_config |=
|
||||
PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
|
||||
break;
|
||||
default:
|
||||
DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
bp->link_params.multi_phy_config &=
|
||||
~PORT_HW_CFG_PHY_SELECTION_MASK;
|
||||
if (bp->link_params.multi_phy_config &
|
||||
PORT_HW_CFG_PHY_SWAPPED_ENABLED)
|
||||
bp->link_params.multi_phy_config |=
|
||||
PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
|
||||
else
|
||||
bp->link_params.multi_phy_config |=
|
||||
PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
|
||||
break;
|
||||
case PORT_FIBRE:
|
||||
case PORT_DA:
|
||||
if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE)
|
||||
break; /* no port change */
|
||||
|
||||
if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
|
||||
bp->port.supported[1] & SUPPORTED_FIBRE)) {
|
||||
DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
bp->link_params.multi_phy_config &=
|
||||
~PORT_HW_CFG_PHY_SELECTION_MASK;
|
||||
if (bp->link_params.multi_phy_config &
|
||||
PORT_HW_CFG_PHY_SWAPPED_ENABLED)
|
||||
bp->link_params.multi_phy_config |=
|
||||
PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
|
||||
else
|
||||
bp->link_params.multi_phy_config |=
|
||||
PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
|
||||
break;
|
||||
default:
|
||||
DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Save new config in case command complete successfully */
|
||||
new_multi_phy_config = bp->link_params.multi_phy_config;
|
||||
|
|
|
@ -13102,9 +13102,9 @@ static void __bnx2x_remove(struct pci_dev *pdev,
|
|||
|
||||
if (atomic_read(&pdev->enable_cnt) == 1)
|
||||
pci_release_regions(pdev);
|
||||
}
|
||||
|
||||
pci_disable_device(pdev);
|
||||
pci_disable_device(pdev);
|
||||
}
|
||||
}
|
||||
|
||||
static void bnx2x_remove_one(struct pci_dev *pdev)
|
||||
|
|
|
@ -4440,9 +4440,10 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
|
|||
/* Check if APP Table has changed */
|
||||
if (memcmp(&new_cfg->app,
|
||||
&old_cfg->app,
|
||||
sizeof(new_cfg->app)))
|
||||
sizeof(new_cfg->app))) {
|
||||
need_reconfig = true;
|
||||
dev_info(&pf->pdev->dev, "APP Table change detected.\n");
|
||||
}
|
||||
|
||||
return need_reconfig;
|
||||
}
|
||||
|
|
|
@ -5020,6 +5020,8 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
}
|
||||
}
|
||||
|
||||
netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
|
||||
|
||||
err = register_netdev(dev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "cannot register net device\n");
|
||||
|
@ -5028,8 +5030,6 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
|
||||
netif_carrier_off(dev);
|
||||
|
||||
netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
|
||||
|
||||
sky2_show_addr(dev);
|
||||
|
||||
if (hw->ports > 1) {
|
||||
|
|
|
@ -683,12 +683,17 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
|
|||
adapter->ahw->linkup = 0;
|
||||
netif_carrier_off(netdev);
|
||||
} else if (!adapter->ahw->linkup && linkup) {
|
||||
/* Do not advertise Link up if the port is in loopback mode */
|
||||
if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode)
|
||||
adapter->ahw->linkup = 1;
|
||||
|
||||
/* Do not advertise Link up to the stack if device
|
||||
* is in loopback mode
|
||||
*/
|
||||
if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode) {
|
||||
netdev_info(netdev, "NIC Link is up for loopback test\n");
|
||||
return;
|
||||
}
|
||||
|
||||
netdev_info(netdev, "NIC Link is up\n");
|
||||
adapter->ahw->linkup = 1;
|
||||
netif_carrier_on(netdev);
|
||||
}
|
||||
}
|
||||
|
@ -1150,13 +1155,13 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
|
|||
u16 lro_length, length, data_offset, t_vid, vid = 0xffff;
|
||||
u32 seq_number;
|
||||
|
||||
if (unlikely(ring > adapter->max_rds_rings))
|
||||
if (unlikely(ring >= adapter->max_rds_rings))
|
||||
return NULL;
|
||||
|
||||
rds_ring = &recv_ctx->rds_rings[ring];
|
||||
|
||||
index = qlcnic_get_lro_sts_refhandle(sts_data0);
|
||||
if (unlikely(index > rds_ring->num_desc))
|
||||
if (unlikely(index >= rds_ring->num_desc))
|
||||
return NULL;
|
||||
|
||||
buffer = &rds_ring->rx_buf_arr[index];
|
||||
|
@ -1662,13 +1667,13 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
|
|||
u16 vid = 0xffff;
|
||||
int err;
|
||||
|
||||
if (unlikely(ring > adapter->max_rds_rings))
|
||||
if (unlikely(ring >= adapter->max_rds_rings))
|
||||
return NULL;
|
||||
|
||||
rds_ring = &recv_ctx->rds_rings[ring];
|
||||
|
||||
index = qlcnic_83xx_hndl(sts_data[0]);
|
||||
if (unlikely(index > rds_ring->num_desc))
|
||||
if (unlikely(index >= rds_ring->num_desc))
|
||||
return NULL;
|
||||
|
||||
buffer = &rds_ring->rx_buf_arr[index];
|
||||
|
|
|
@ -1837,6 +1837,7 @@ int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
|
|||
qlcnic_linkevent_request(adapter, 1);
|
||||
|
||||
adapter->ahw->reset_context = 0;
|
||||
netif_tx_start_all_queues(netdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2704,14 +2705,8 @@ static int qlcnic_open(struct net_device *netdev)
|
|||
|
||||
err = __qlcnic_up(adapter, netdev);
|
||||
if (err)
|
||||
goto err_out;
|
||||
qlcnic_detach(adapter);
|
||||
|
||||
netif_tx_start_all_queues(netdev);
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
qlcnic_detach(adapter);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -448,8 +448,7 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter,
|
||||
struct qlcnic_info *info)
|
||||
static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
|
||||
{
|
||||
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
|
||||
struct qlcnic_cmd_args cmd;
|
||||
|
@ -495,10 +494,6 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
|
|||
if (err)
|
||||
return -EIO;
|
||||
|
||||
err = qlcnic_sriov_get_vf_acl(adapter, &nic_info);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (qlcnic_83xx_get_port_info(adapter))
|
||||
return -EIO;
|
||||
|
||||
|
@ -555,6 +550,10 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
|
|||
if (err)
|
||||
goto err_out_send_channel_term;
|
||||
|
||||
err = qlcnic_sriov_get_vf_acl(adapter);
|
||||
if (err)
|
||||
goto err_out_send_channel_term;
|
||||
|
||||
err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
|
||||
if (err)
|
||||
goto err_out_send_channel_term;
|
||||
|
|
|
@ -1524,9 +1524,9 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv)
|
|||
priv->dev->dev_addr, 0);
|
||||
if (!is_valid_ether_addr(priv->dev->dev_addr))
|
||||
eth_hw_addr_random(priv->dev);
|
||||
pr_info("%s: device MAC address %pM\n", priv->dev->name,
|
||||
priv->dev->dev_addr);
|
||||
}
|
||||
pr_warn("%s: device MAC address %pM\n", priv->dev->name,
|
||||
priv->dev->dev_addr);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1635,7 +1635,7 @@ static int stmmac_hw_setup(struct net_device *dev)
|
|||
stmmac_mmc_setup(priv);
|
||||
|
||||
ret = stmmac_init_ptp(priv);
|
||||
if (ret)
|
||||
if (ret && ret != -EOPNOTSUPP)
|
||||
pr_warn("%s: failed PTP initialisation\n", __func__);
|
||||
|
||||
#ifdef CONFIG_STMMAC_DEBUG_FS
|
||||
|
|
|
@ -462,7 +462,7 @@ struct nvsp_message {
|
|||
|
||||
#define NETVSC_MTU 65536
|
||||
|
||||
#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*2) /* 2MB */
|
||||
#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */
|
||||
|
||||
#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
|
||||
|
||||
|
|
|
@ -136,8 +136,7 @@ static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
|
|||
|
||||
if (net_device->recv_buf) {
|
||||
/* Free up the receive buffer */
|
||||
free_pages((unsigned long)net_device->recv_buf,
|
||||
get_order(net_device->recv_buf_size));
|
||||
vfree(net_device->recv_buf);
|
||||
net_device->recv_buf = NULL;
|
||||
}
|
||||
|
||||
|
@ -163,9 +162,7 @@ static int netvsc_init_recv_buf(struct hv_device *device)
|
|||
return -ENODEV;
|
||||
ndev = net_device->ndev;
|
||||
|
||||
net_device->recv_buf =
|
||||
(void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
|
||||
get_order(net_device->recv_buf_size));
|
||||
net_device->recv_buf = vzalloc(net_device->recv_buf_size);
|
||||
if (!net_device->recv_buf) {
|
||||
netdev_err(ndev, "unable to allocate receive "
|
||||
"buffer of size %d\n", net_device->recv_buf_size);
|
||||
|
|
|
@ -69,6 +69,7 @@
|
|||
#include <net/netns/generic.h>
|
||||
#include <net/rtnetlink.h>
|
||||
#include <net/sock.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
|
@ -2228,6 +2229,27 @@ static int tun_chr_close(struct inode *inode, struct file *file)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
static int tun_chr_show_fdinfo(struct seq_file *m, struct file *f)
|
||||
{
|
||||
struct tun_struct *tun;
|
||||
struct ifreq ifr;
|
||||
|
||||
memset(&ifr, 0, sizeof(ifr));
|
||||
|
||||
rtnl_lock();
|
||||
tun = tun_get(f);
|
||||
if (tun)
|
||||
tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
|
||||
rtnl_unlock();
|
||||
|
||||
if (tun)
|
||||
tun_put(tun);
|
||||
|
||||
return seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct file_operations tun_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.llseek = no_llseek,
|
||||
|
@ -2242,7 +2264,10 @@ static const struct file_operations tun_fops = {
|
|||
#endif
|
||||
.open = tun_chr_open,
|
||||
.release = tun_chr_close,
|
||||
.fasync = tun_chr_fasync
|
||||
.fasync = tun_chr_fasync,
|
||||
#ifdef CONFIG_PROC_FS
|
||||
.show_fdinfo = tun_chr_show_fdinfo,
|
||||
#endif
|
||||
};
|
||||
|
||||
static struct miscdevice tun_miscdev = {
|
||||
|
|
|
@ -117,6 +117,7 @@ struct netfront_info {
|
|||
} tx_skbs[NET_TX_RING_SIZE];
|
||||
grant_ref_t gref_tx_head;
|
||||
grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
|
||||
struct page *grant_tx_page[NET_TX_RING_SIZE];
|
||||
unsigned tx_skb_freelist;
|
||||
|
||||
spinlock_t rx_lock ____cacheline_aligned_in_smp;
|
||||
|
@ -396,6 +397,7 @@ static void xennet_tx_buf_gc(struct net_device *dev)
|
|||
gnttab_release_grant_reference(
|
||||
&np->gref_tx_head, np->grant_tx_ref[id]);
|
||||
np->grant_tx_ref[id] = GRANT_INVALID_REF;
|
||||
np->grant_tx_page[id] = NULL;
|
||||
add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
|
||||
dev_kfree_skb_irq(skb);
|
||||
}
|
||||
|
@ -452,6 +454,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
|
|||
gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
|
||||
mfn, GNTMAP_readonly);
|
||||
|
||||
np->grant_tx_page[id] = virt_to_page(data);
|
||||
tx->gref = np->grant_tx_ref[id] = ref;
|
||||
tx->offset = offset;
|
||||
tx->size = len;
|
||||
|
@ -497,6 +500,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
|
|||
np->xbdev->otherend_id,
|
||||
mfn, GNTMAP_readonly);
|
||||
|
||||
np->grant_tx_page[id] = page;
|
||||
tx->gref = np->grant_tx_ref[id] = ref;
|
||||
tx->offset = offset;
|
||||
tx->size = bytes;
|
||||
|
@ -596,6 +600,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
mfn = virt_to_mfn(data);
|
||||
gnttab_grant_foreign_access_ref(
|
||||
ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
|
||||
np->grant_tx_page[id] = virt_to_page(data);
|
||||
tx->gref = np->grant_tx_ref[id] = ref;
|
||||
tx->offset = offset;
|
||||
tx->size = len;
|
||||
|
@ -1085,10 +1090,11 @@ static void xennet_release_tx_bufs(struct netfront_info *np)
|
|||
continue;
|
||||
|
||||
skb = np->tx_skbs[i].skb;
|
||||
gnttab_end_foreign_access_ref(np->grant_tx_ref[i],
|
||||
GNTMAP_readonly);
|
||||
gnttab_release_grant_reference(&np->gref_tx_head,
|
||||
np->grant_tx_ref[i]);
|
||||
get_page(np->grant_tx_page[i]);
|
||||
gnttab_end_foreign_access(np->grant_tx_ref[i],
|
||||
GNTMAP_readonly,
|
||||
(unsigned long)page_address(np->grant_tx_page[i]));
|
||||
np->grant_tx_page[i] = NULL;
|
||||
np->grant_tx_ref[i] = GRANT_INVALID_REF;
|
||||
add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
|
||||
dev_kfree_skb_irq(skb);
|
||||
|
@ -1097,78 +1103,35 @@ static void xennet_release_tx_bufs(struct netfront_info *np)
|
|||
|
||||
static void xennet_release_rx_bufs(struct netfront_info *np)
|
||||
{
|
||||
struct mmu_update *mmu = np->rx_mmu;
|
||||
struct multicall_entry *mcl = np->rx_mcl;
|
||||
struct sk_buff_head free_list;
|
||||
struct sk_buff *skb;
|
||||
unsigned long mfn;
|
||||
int xfer = 0, noxfer = 0, unused = 0;
|
||||
int id, ref;
|
||||
|
||||
dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n",
|
||||
__func__);
|
||||
return;
|
||||
|
||||
skb_queue_head_init(&free_list);
|
||||
|
||||
spin_lock_bh(&np->rx_lock);
|
||||
|
||||
for (id = 0; id < NET_RX_RING_SIZE; id++) {
|
||||
ref = np->grant_rx_ref[id];
|
||||
if (ref == GRANT_INVALID_REF) {
|
||||
unused++;
|
||||
continue;
|
||||
}
|
||||
struct sk_buff *skb;
|
||||
struct page *page;
|
||||
|
||||
skb = np->rx_skbs[id];
|
||||
mfn = gnttab_end_foreign_transfer_ref(ref);
|
||||
gnttab_release_grant_reference(&np->gref_rx_head, ref);
|
||||
if (!skb)
|
||||
continue;
|
||||
|
||||
ref = np->grant_rx_ref[id];
|
||||
if (ref == GRANT_INVALID_REF)
|
||||
continue;
|
||||
|
||||
page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
|
||||
|
||||
/* gnttab_end_foreign_access() needs a page ref until
|
||||
* foreign access is ended (which may be deferred).
|
||||
*/
|
||||
get_page(page);
|
||||
gnttab_end_foreign_access(ref, 0,
|
||||
(unsigned long)page_address(page));
|
||||
np->grant_rx_ref[id] = GRANT_INVALID_REF;
|
||||
|
||||
if (0 == mfn) {
|
||||
skb_shinfo(skb)->nr_frags = 0;
|
||||
dev_kfree_skb(skb);
|
||||
noxfer++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
|
||||
/* Remap the page. */
|
||||
const struct page *page =
|
||||
skb_frag_page(&skb_shinfo(skb)->frags[0]);
|
||||
unsigned long pfn = page_to_pfn(page);
|
||||
void *vaddr = page_address(page);
|
||||
|
||||
MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
|
||||
mfn_pte(mfn, PAGE_KERNEL),
|
||||
0);
|
||||
mcl++;
|
||||
mmu->ptr = ((u64)mfn << PAGE_SHIFT)
|
||||
| MMU_MACHPHYS_UPDATE;
|
||||
mmu->val = pfn;
|
||||
mmu++;
|
||||
|
||||
set_phys_to_machine(pfn, mfn);
|
||||
}
|
||||
__skb_queue_tail(&free_list, skb);
|
||||
xfer++;
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n",
|
||||
__func__, xfer, noxfer, unused);
|
||||
|
||||
if (xfer) {
|
||||
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
|
||||
/* Do all the remapping work and M2P updates. */
|
||||
MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu,
|
||||
NULL, DOMID_SELF);
|
||||
mcl++;
|
||||
HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
|
||||
}
|
||||
}
|
||||
|
||||
__skb_queue_purge(&free_list);
|
||||
|
||||
spin_unlock_bh(&np->rx_lock);
|
||||
}
|
||||
|
||||
|
@ -1339,6 +1302,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
|
|||
for (i = 0; i < NET_RX_RING_SIZE; i++) {
|
||||
np->rx_skbs[i] = NULL;
|
||||
np->grant_rx_ref[i] = GRANT_INVALID_REF;
|
||||
np->grant_tx_page[i] = NULL;
|
||||
}
|
||||
|
||||
/* A grant for every tx ring slot */
|
||||
|
|
|
@ -738,6 +738,8 @@ struct qeth_discipline {
|
|||
int (*freeze)(struct ccwgroup_device *);
|
||||
int (*thaw) (struct ccwgroup_device *);
|
||||
int (*restore)(struct ccwgroup_device *);
|
||||
int (*control_event_handler)(struct qeth_card *card,
|
||||
struct qeth_ipa_cmd *cmd);
|
||||
};
|
||||
|
||||
struct qeth_vlan_vid {
|
||||
|
@ -948,13 +950,10 @@ int qeth_query_card_info(struct qeth_card *card,
|
|||
int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
|
||||
int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
|
||||
void *reply_param);
|
||||
void qeth_bridge_state_change(struct qeth_card *card, struct qeth_ipa_cmd *cmd);
|
||||
void qeth_bridgeport_query_support(struct qeth_card *card);
|
||||
int qeth_bridgeport_query_ports(struct qeth_card *card,
|
||||
enum qeth_sbp_roles *role, enum qeth_sbp_states *state);
|
||||
int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role);
|
||||
int qeth_bridgeport_an_set(struct qeth_card *card, int enable);
|
||||
void qeth_bridge_host_event(struct qeth_card *card, struct qeth_ipa_cmd *cmd);
|
||||
int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
|
||||
int qeth_get_elements_no(struct qeth_card *, struct sk_buff *, int);
|
||||
int qeth_get_elements_for_frags(struct sk_buff *);
|
||||
|
|
|
@ -69,6 +69,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
|
|||
static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
|
||||
|
||||
struct workqueue_struct *qeth_wq;
|
||||
EXPORT_SYMBOL_GPL(qeth_wq);
|
||||
|
||||
static void qeth_close_dev_handler(struct work_struct *work)
|
||||
{
|
||||
|
@ -616,15 +617,12 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
|
|||
qeth_schedule_recovery(card);
|
||||
return NULL;
|
||||
case IPA_CMD_SETBRIDGEPORT:
|
||||
if (cmd->data.sbp.hdr.command_code ==
|
||||
IPA_SBP_BRIDGE_PORT_STATE_CHANGE) {
|
||||
qeth_bridge_state_change(card, cmd);
|
||||
return NULL;
|
||||
} else
|
||||
return cmd;
|
||||
case IPA_CMD_ADDRESS_CHANGE_NOTIF:
|
||||
qeth_bridge_host_event(card, cmd);
|
||||
return NULL;
|
||||
if (card->discipline->control_event_handler
|
||||
(card, cmd))
|
||||
return cmd;
|
||||
else
|
||||
return NULL;
|
||||
case IPA_CMD_MODCCID:
|
||||
return cmd;
|
||||
case IPA_CMD_REGISTER_LOCAL_ADDR:
|
||||
|
@ -4973,10 +4971,6 @@ retriable:
|
|||
qeth_query_setadapterparms(card);
|
||||
if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST))
|
||||
qeth_query_setdiagass(card);
|
||||
qeth_bridgeport_query_support(card);
|
||||
if (card->options.sbp.supported_funcs)
|
||||
dev_info(&card->gdev->dev,
|
||||
"The device represents a HiperSockets Bridge Capable Port\n");
|
||||
return 0;
|
||||
out:
|
||||
dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
|
||||
|
|
|
@ -33,6 +33,11 @@ static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *,
|
|||
unsigned long));
|
||||
static void qeth_l2_set_multicast_list(struct net_device *);
|
||||
static int qeth_l2_recover(void *);
|
||||
static void qeth_bridgeport_query_support(struct qeth_card *card);
|
||||
static void qeth_bridge_state_change(struct qeth_card *card,
|
||||
struct qeth_ipa_cmd *cmd);
|
||||
static void qeth_bridge_host_event(struct qeth_card *card,
|
||||
struct qeth_ipa_cmd *cmd);
|
||||
|
||||
static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
||||
{
|
||||
|
@ -989,6 +994,10 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
|
|||
rc = -ENODEV;
|
||||
goto out_remove;
|
||||
}
|
||||
qeth_bridgeport_query_support(card);
|
||||
if (card->options.sbp.supported_funcs)
|
||||
dev_info(&card->gdev->dev,
|
||||
"The device represents a HiperSockets Bridge Capable Port\n");
|
||||
qeth_trace_features(card);
|
||||
|
||||
if (!card->dev && qeth_l2_setup_netdev(card)) {
|
||||
|
@ -1233,6 +1242,26 @@ out:
|
|||
return rc;
|
||||
}
|
||||
|
||||
/* Returns zero if the command is successfully "consumed" */
|
||||
static int qeth_l2_control_event(struct qeth_card *card,
|
||||
struct qeth_ipa_cmd *cmd)
|
||||
{
|
||||
switch (cmd->hdr.command) {
|
||||
case IPA_CMD_SETBRIDGEPORT:
|
||||
if (cmd->data.sbp.hdr.command_code ==
|
||||
IPA_SBP_BRIDGE_PORT_STATE_CHANGE) {
|
||||
qeth_bridge_state_change(card, cmd);
|
||||
return 0;
|
||||
} else
|
||||
return 1;
|
||||
case IPA_CMD_ADDRESS_CHANGE_NOTIF:
|
||||
qeth_bridge_host_event(card, cmd);
|
||||
return 0;
|
||||
default:
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
struct qeth_discipline qeth_l2_discipline = {
|
||||
.start_poll = qeth_qdio_start_poll,
|
||||
.input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
|
||||
|
@ -1246,6 +1275,7 @@ struct qeth_discipline qeth_l2_discipline = {
|
|||
.freeze = qeth_l2_pm_suspend,
|
||||
.thaw = qeth_l2_pm_resume,
|
||||
.restore = qeth_l2_pm_resume,
|
||||
.control_event_handler = qeth_l2_control_event,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(qeth_l2_discipline);
|
||||
|
||||
|
@ -1463,7 +1493,8 @@ static void qeth_bridge_state_change_worker(struct work_struct *work)
|
|||
kfree(data);
|
||||
}
|
||||
|
||||
void qeth_bridge_state_change(struct qeth_card *card, struct qeth_ipa_cmd *cmd)
|
||||
static void qeth_bridge_state_change(struct qeth_card *card,
|
||||
struct qeth_ipa_cmd *cmd)
|
||||
{
|
||||
struct qeth_sbp_state_change *qports =
|
||||
&cmd->data.sbp.data.state_change;
|
||||
|
@ -1488,7 +1519,6 @@ void qeth_bridge_state_change(struct qeth_card *card, struct qeth_ipa_cmd *cmd)
|
|||
sizeof(struct qeth_sbp_state_change) + extrasize);
|
||||
queue_work(qeth_wq, &data->worker);
|
||||
}
|
||||
EXPORT_SYMBOL(qeth_bridge_state_change);
|
||||
|
||||
struct qeth_bridge_host_data {
|
||||
struct work_struct worker;
|
||||
|
@ -1528,7 +1558,8 @@ static void qeth_bridge_host_event_worker(struct work_struct *work)
|
|||
kfree(data);
|
||||
}
|
||||
|
||||
void qeth_bridge_host_event(struct qeth_card *card, struct qeth_ipa_cmd *cmd)
|
||||
static void qeth_bridge_host_event(struct qeth_card *card,
|
||||
struct qeth_ipa_cmd *cmd)
|
||||
{
|
||||
struct qeth_ipacmd_addr_change *hostevs =
|
||||
&cmd->data.addrchange;
|
||||
|
@ -1560,7 +1591,6 @@ void qeth_bridge_host_event(struct qeth_card *card, struct qeth_ipa_cmd *cmd)
|
|||
sizeof(struct qeth_ipacmd_addr_change) + extrasize);
|
||||
queue_work(qeth_wq, &data->worker);
|
||||
}
|
||||
EXPORT_SYMBOL(qeth_bridge_host_event);
|
||||
|
||||
/* SETBRIDGEPORT support; sending commands */
|
||||
|
||||
|
@ -1683,7 +1713,7 @@ static int qeth_bridgeport_query_support_cb(struct qeth_card *card,
|
|||
* Sets bitmask of supported setbridgeport subfunctions in the qeth_card
|
||||
* strucutre: card->options.sbp.supported_funcs.
|
||||
*/
|
||||
void qeth_bridgeport_query_support(struct qeth_card *card)
|
||||
static void qeth_bridgeport_query_support(struct qeth_card *card)
|
||||
{
|
||||
struct qeth_cmd_buffer *iob;
|
||||
struct qeth_ipa_cmd *cmd;
|
||||
|
@ -1709,7 +1739,6 @@ void qeth_bridgeport_query_support(struct qeth_card *card)
|
|||
}
|
||||
card->options.sbp.supported_funcs = cbctl.data.supported;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qeth_bridgeport_query_support);
|
||||
|
||||
static int qeth_bridgeport_query_ports_cb(struct qeth_card *card,
|
||||
struct qeth_reply *reply, unsigned long data)
|
||||
|
|
|
@ -3593,6 +3593,13 @@ out:
|
|||
return rc;
|
||||
}
|
||||
|
||||
/* Returns zero if the command is successfully "consumed" */
|
||||
static int qeth_l3_control_event(struct qeth_card *card,
|
||||
struct qeth_ipa_cmd *cmd)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
struct qeth_discipline qeth_l3_discipline = {
|
||||
.start_poll = qeth_qdio_start_poll,
|
||||
.input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
|
||||
|
@ -3606,6 +3613,7 @@ struct qeth_discipline qeth_l3_discipline = {
|
|||
.freeze = qeth_l3_pm_suspend,
|
||||
.thaw = qeth_l3_pm_resume,
|
||||
.restore = qeth_l3_pm_resume,
|
||||
.control_event_handler = qeth_l3_control_event,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(qeth_l3_discipline);
|
||||
|
||||
|
|
|
@ -2456,6 +2456,7 @@ void skb_zerocopy(struct sk_buff *to, const struct sk_buff *from,
|
|||
void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
|
||||
int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
|
||||
void skb_scrub_packet(struct sk_buff *skb, bool xnet);
|
||||
unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
|
||||
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
|
||||
|
||||
struct skb_checksum_ops {
|
||||
|
|
|
@ -47,6 +47,8 @@
|
|||
#include <linux/in.h>
|
||||
#include <linux/inet.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/udp.h>
|
||||
#include <linux/netdevice.h>
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
#include <net/pkt_sched.h>
|
||||
|
@ -2119,7 +2121,7 @@ EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
|
|||
/**
|
||||
* skb_zerocopy - Zero copy skb to skb
|
||||
* @to: destination buffer
|
||||
* @source: source buffer
|
||||
* @from: source buffer
|
||||
* @len: number of bytes to copy from source buffer
|
||||
* @hlen: size of linear headroom in destination buffer
|
||||
*
|
||||
|
@ -3916,3 +3918,26 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
|
|||
nf_reset_trace(skb);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(skb_scrub_packet);
|
||||
|
||||
/**
|
||||
* skb_gso_transport_seglen - Return length of individual segments of a gso packet
|
||||
*
|
||||
* @skb: GSO skb
|
||||
*
|
||||
* skb_gso_transport_seglen is used to determine the real size of the
|
||||
* individual segments, including Layer4 headers (TCP/UDP).
|
||||
*
|
||||
* The MAC/L2 or network (IP, IPv6) headers are not accounted for.
|
||||
*/
|
||||
unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
|
||||
{
|
||||
const struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||
unsigned int hdr_len;
|
||||
|
||||
if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
|
||||
hdr_len = tcp_hdrlen(skb);
|
||||
else
|
||||
hdr_len = sizeof(struct udphdr);
|
||||
return hdr_len + shinfo->gso_size;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
|
||||
|
|
|
@ -678,7 +678,7 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
|
|||
hc06_ptr += 3;
|
||||
} else {
|
||||
/* compress nothing */
|
||||
memcpy(hc06_ptr, &hdr, 4);
|
||||
memcpy(hc06_ptr, hdr, 4);
|
||||
/* replace the top byte with new ECN | DSCP format */
|
||||
*hc06_ptr = tmp;
|
||||
hc06_ptr += 4;
|
||||
|
|
|
@ -178,7 +178,7 @@ static int ipgre_err(struct sk_buff *skb, u32 info,
|
|||
else
|
||||
itn = net_generic(net, ipgre_net_id);
|
||||
|
||||
iph = (const struct iphdr *)skb->data;
|
||||
iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
|
||||
t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
|
||||
iph->daddr, iph->saddr, tpi->key);
|
||||
|
||||
|
|
|
@ -314,7 +314,7 @@ static int ip_rcv_finish(struct sk_buff *skb)
|
|||
const struct iphdr *iph = ip_hdr(skb);
|
||||
struct rtable *rt;
|
||||
|
||||
if (sysctl_ip_early_demux && !skb_dst(skb)) {
|
||||
if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
|
||||
const struct net_protocol *ipprot;
|
||||
int protocol = iph->protocol;
|
||||
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#include <linux/if_ether.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
#include <net/sock.h>
|
||||
#include <net/ip.h>
|
||||
|
@ -930,7 +931,7 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
|
|||
}
|
||||
rtnl_unlock();
|
||||
|
||||
return PTR_RET(itn->fb_tunnel_dev);
|
||||
return PTR_ERR_OR_ZERO(itn->fb_tunnel_dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ip_tunnel_init_net);
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@
|
|||
|
||||
int ip6_rcv_finish(struct sk_buff *skb)
|
||||
{
|
||||
if (sysctl_ip_early_demux && !skb_dst(skb)) {
|
||||
if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
|
||||
const struct inet6_protocol *ipprot;
|
||||
|
||||
ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]);
|
||||
|
|
|
@ -43,7 +43,7 @@ int llc_mac_hdr_init(struct sk_buff *skb,
|
|||
rc = 0;
|
||||
break;
|
||||
default:
|
||||
WARN(1, "device type not supported: %d\n", skb->dev->type);
|
||||
break;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -381,6 +381,8 @@ static int rxrpc_connect_exclusive(struct rxrpc_sock *rx,
|
|||
|
||||
rxrpc_assign_connection_id(conn);
|
||||
rx->conn = conn;
|
||||
} else {
|
||||
spin_lock(&trans->client_lock);
|
||||
}
|
||||
|
||||
/* we've got a connection with a free channel and we can now attach the
|
||||
|
|
|
@ -180,7 +180,8 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
|
|||
if (copy > len - copied)
|
||||
copy = len - copied;
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
|
||||
if (skb->ip_summed == CHECKSUM_UNNECESSARY ||
|
||||
skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
ret = skb_copy_datagram_iovec(skb, offset,
|
||||
msg->msg_iov, copy);
|
||||
} else {
|
||||
|
@ -353,6 +354,10 @@ csum_copy_error:
|
|||
if (continue_call)
|
||||
rxrpc_put_call(continue_call);
|
||||
rxrpc_kill_skb(skb);
|
||||
if (!(flags & MSG_PEEK)) {
|
||||
if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
|
||||
BUG();
|
||||
}
|
||||
skb_kill_datagram(&rx->sk, skb, flags);
|
||||
rxrpc_put_call(call);
|
||||
return -EAGAIN;
|
||||
|
|
|
@ -21,7 +21,6 @@
|
|||
#include <net/netlink.h>
|
||||
#include <net/sch_generic.h>
|
||||
#include <net/pkt_sched.h>
|
||||
#include <net/tcp.h>
|
||||
|
||||
|
||||
/* Simple Token Bucket Filter.
|
||||
|
@ -148,16 +147,10 @@ static u64 psched_ns_t2l(const struct psched_ratecfg *r,
|
|||
* Return length of individual segments of a gso packet,
|
||||
* including all headers (MAC, IP, TCP/UDP)
|
||||
*/
|
||||
static unsigned int skb_gso_seglen(const struct sk_buff *skb)
|
||||
static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
|
||||
{
|
||||
unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
|
||||
const struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||
|
||||
if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
|
||||
hdr_len += tcp_hdrlen(skb);
|
||||
else
|
||||
hdr_len += sizeof(struct udphdr);
|
||||
return hdr_len + shinfo->gso_size;
|
||||
return hdr_len + skb_gso_transport_seglen(skb);
|
||||
}
|
||||
|
||||
/* GSO packet is too big, segment it so that tbf can transmit
|
||||
|
@ -202,7 +195,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|||
int ret;
|
||||
|
||||
if (qdisc_pkt_len(skb) > q->max_size) {
|
||||
if (skb_is_gso(skb) && skb_gso_seglen(skb) <= q->max_size)
|
||||
if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size)
|
||||
return tbf_segment(skb, sch);
|
||||
return qdisc_reshape_fail(skb, sch);
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче