Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Two read past end of buffer fixes in AF_KEY, from Eric Biggers.

 2) Memory leak in key_notify_policy(), from Steffen Klassert.

 3) Fix overflow with bpf arrays, from Daniel Borkmann.

 4) Fix RDMA regression with mlx5 due to mlx5 no longer using
    pci_irq_get_affinity(), from Saeed Mahameed.

 5) Missing RCU read locking in nl80211_send_iface() when it calls
    ieee80211_bss_get_ie(), from Dominik Brodowski.

 6) cfg80211 should check dev_set_name()'s return value, from Johannes
    Berg.

 7) Missing module license tag in 9p protocol, from Stephen Hemminger.

 8) Fix crash due to too small MTU in udp ipv6 sendmsg, from Mike
    Maloney.

 9) Fix endless loop in netlink extack code, from David Ahern.

10) TLS socket layer sets inverted error codes, resulting in an endless
    loop. From Robert Hering.

11) Revert openvswitch erspan tunnel support, it's mis-designed and we
    need to kill it before it goes into a real release. From William Tu.

12) Fix lan78xx failures in full speed USB mode, from Yuiko Oshino.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (54 commits)
  net, sched: fix panic when updating miniq {b,q}stats
  qed: Fix potential use-after-free in qed_spq_post()
  nfp: use the correct index for link speed table
  lan78xx: Fix failure in USB Full Speed
  sctp: do not allow the v4 socket to bind a v4mapped v6 address
  sctp: return error if the asoc has been peeled off in sctp_wait_for_sndbuf
  sctp: reinit stream if stream outcnt has been change by sinit in sendmsg
  ibmvnic: Fix pending MAC address changes
  netlink: extack: avoid parenthesized string constant warning
  ipv4: Make neigh lookup keys for loopback/point-to-point devices be INADDR_ANY
  net: Allow neigh contructor functions ability to modify the primary_key
  sh_eth: fix dumping ARSTR
  Revert "openvswitch: Add erspan tunnel support."
  net/tls: Fix inverted error codes to avoid endless loop
  ipv6: ip6_make_skb() needs to clear cork.base.dst
  sctp: avoid compiler warning on implicit fallthru
  net: ipv4: Make "ip route get" match iif lo rules again.
  netlink: extack needs to be reset each time through loop
  tipc: fix a memory leak in tipc_nl_node_get_link()
  ipv6: fix udpv6 sendmsg crash caused by too small MTU
  ...
This commit is contained in:
Linus Torvalds 2018-01-16 12:45:30 -08:00
Родитель 41aa5e5d71 81d947e2b8
Коммит b45a53be53
59 изменённых файлов: 390 добавлений и 239 удалений

Просмотреть файл

@ -1324,7 +1324,8 @@ static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
return err;
if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
!MLX5_CAP_GEN(dev->mdev, disable_local_lb))
(!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
!MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
return err;
mutex_lock(&dev->lb_mutex);
@ -1342,7 +1343,8 @@ static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
mlx5_core_dealloc_transport_domain(dev->mdev, tdn);
if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
!MLX5_CAP_GEN(dev->mdev, disable_local_lb))
(!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
!MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
return;
mutex_lock(&dev->lb_mutex);
@ -4158,7 +4160,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
goto err_cnt;
dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
if (!dev->mdev->priv.uar)
if (IS_ERR(dev->mdev->priv.uar))
goto err_cong;
err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
@ -4187,7 +4189,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
}
if ((MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
MLX5_CAP_GEN(mdev, disable_local_lb))
(MLX5_CAP_GEN(mdev, disable_local_lb_uc) ||
MLX5_CAP_GEN(mdev, disable_local_lb_mc)))
mutex_init(&dev->lb_mutex);
dev->ib_active = true;

Просмотреть файл

@ -1913,3 +1913,7 @@ static struct platform_driver cs89x0_driver = {
module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe);
#endif /* CONFIG_CS89x0_PLATFORM */
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Crystal Semiconductor (Now Cirrus Logic) CS89[02]0 network driver");
MODULE_AUTHOR("Russell Nelson <nelson@crynwr.com>");

Просмотреть файл

@ -756,6 +756,12 @@ static int ibmvnic_login(struct net_device *netdev)
}
} while (adapter->renegotiate);
/* handle pending MAC address changes after successful login */
if (adapter->mac_change_pending) {
__ibmvnic_set_mac(netdev, &adapter->desired.mac);
adapter->mac_change_pending = false;
}
return 0;
}
@ -993,11 +999,6 @@ static int ibmvnic_open(struct net_device *netdev)
mutex_lock(&adapter->reset_lock);
if (adapter->mac_change_pending) {
__ibmvnic_set_mac(netdev, &adapter->desired.mac);
adapter->mac_change_pending = false;
}
if (adapter->state != VNIC_CLOSED) {
rc = ibmvnic_login(netdev);
if (rc) {
@ -1527,7 +1528,7 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
struct sockaddr *addr = p;
if (adapter->state != VNIC_OPEN) {
if (adapter->state == VNIC_PROBED) {
memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
adapter->mac_change_pending = true;
return 0;

Просмотреть файл

@ -895,7 +895,7 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
void mlx5e_timestamp_set(struct mlx5e_priv *priv);
void mlx5e_timestamp_init(struct mlx5e_priv *priv);
struct mlx5e_redirect_rqt_param {
bool is_rss;

Просмотреть файл

@ -922,8 +922,9 @@ static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
static void mlx5e_ets_init(struct mlx5e_priv *priv)
{
int i;
struct ieee_ets ets;
int err;
int i;
if (!MLX5_CAP_GEN(priv->mdev, ets))
return;
@ -936,11 +937,16 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
ets.prio_tc[i] = i;
}
if (ets.ets_cap > 1) {
/* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
ets.prio_tc[0] = 1;
ets.prio_tc[1] = 0;
}
mlx5e_dcbnl_ieee_setets_core(priv, &ets);
err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
if (err)
netdev_err(priv->netdev,
"%s, Failed to init ETS: %d\n", __func__, err);
}
enum {

Просмотреть файл

@ -207,7 +207,6 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
return;
mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
mlx5e_update_stats(priv, true);
mutex_unlock(&priv->state_lock);

Просмотреть файл

@ -2669,7 +2669,7 @@ void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
netif_carrier_on(netdev);
}
void mlx5e_timestamp_set(struct mlx5e_priv *priv)
void mlx5e_timestamp_init(struct mlx5e_priv *priv)
{
priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
@ -2690,7 +2690,6 @@ int mlx5e_open_locked(struct net_device *netdev)
mlx5e_activate_priv_channels(priv);
if (priv->profile->update_carrier)
priv->profile->update_carrier(priv);
mlx5e_timestamp_set(priv);
if (priv->profile->update_stats)
queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
@ -3219,12 +3218,12 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
return 0;
}
#define MLX5E_SET_FEATURE(netdev, feature, enable) \
#define MLX5E_SET_FEATURE(features, feature, enable) \
do { \
if (enable) \
netdev->features |= feature; \
*features |= feature; \
else \
netdev->features &= ~feature; \
*features &= ~feature; \
} while (0)
typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
@ -3347,6 +3346,7 @@ static int set_feature_arfs(struct net_device *netdev, bool enable)
#endif
static int mlx5e_handle_feature(struct net_device *netdev,
netdev_features_t *features,
netdev_features_t wanted_features,
netdev_features_t feature,
mlx5e_feature_handler feature_handler)
@ -3365,34 +3365,40 @@ static int mlx5e_handle_feature(struct net_device *netdev,
return err;
}
MLX5E_SET_FEATURE(netdev, feature, enable);
MLX5E_SET_FEATURE(features, feature, enable);
return 0;
}
static int mlx5e_set_features(struct net_device *netdev,
netdev_features_t features)
{
netdev_features_t oper_features = netdev->features;
int err;
err = mlx5e_handle_feature(netdev, features, NETIF_F_LRO,
set_feature_lro);
err |= mlx5e_handle_feature(netdev, features,
err = mlx5e_handle_feature(netdev, &oper_features, features,
NETIF_F_LRO, set_feature_lro);
err |= mlx5e_handle_feature(netdev, &oper_features, features,
NETIF_F_HW_VLAN_CTAG_FILTER,
set_feature_cvlan_filter);
err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
set_feature_tc_num_filters);
err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
set_feature_rx_all);
err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXFCS,
set_feature_rx_fcs);
err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
set_feature_rx_vlan);
err |= mlx5e_handle_feature(netdev, &oper_features, features,
NETIF_F_HW_TC, set_feature_tc_num_filters);
err |= mlx5e_handle_feature(netdev, &oper_features, features,
NETIF_F_RXALL, set_feature_rx_all);
err |= mlx5e_handle_feature(netdev, &oper_features, features,
NETIF_F_RXFCS, set_feature_rx_fcs);
err |= mlx5e_handle_feature(netdev, &oper_features, features,
NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
#ifdef CONFIG_RFS_ACCEL
err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE,
set_feature_arfs);
err |= mlx5e_handle_feature(netdev, &oper_features, features,
NETIF_F_NTUPLE, set_feature_arfs);
#endif
return err ? -EINVAL : 0;
if (err) {
netdev->features = oper_features;
return -EINVAL;
}
return 0;
}
static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
@ -4139,6 +4145,8 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
mlx5e_timestamp_init(priv);
}
static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)

Просмотреть файл

@ -877,6 +877,8 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
mlx5e_build_rep_params(mdev, &priv->channels.params);
mlx5e_build_rep_netdev(netdev);
mlx5e_timestamp_init(priv);
}
static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)

Просмотреть файл

@ -238,15 +238,19 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
int err = 0;
/* Temporarily enable local_lb */
if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) {
mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb);
if (!lbtp->local_lb)
mlx5_nic_vport_update_local_lb(priv->mdev, true);
err = mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb);
if (err)
return err;
if (!lbtp->local_lb) {
err = mlx5_nic_vport_update_local_lb(priv->mdev, true);
if (err)
return err;
}
err = mlx5e_refresh_tirs(priv, true);
if (err)
return err;
goto out;
lbtp->loopback_ok = false;
init_completion(&lbtp->comp);
@ -256,16 +260,21 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
lbtp->pt.dev = priv->netdev;
lbtp->pt.af_packet_priv = lbtp;
dev_add_pack(&lbtp->pt);
return 0;
out:
if (!lbtp->local_lb)
mlx5_nic_vport_update_local_lb(priv->mdev, false);
return err;
}
static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv,
struct mlx5e_lbt_priv *lbtp)
{
if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) {
if (!lbtp->local_lb)
mlx5_nic_vport_update_local_lb(priv->mdev, false);
}
dev_remove_pack(&lbtp->pt);
mlx5e_refresh_tirs(priv, false);

Просмотреть файл

@ -86,6 +86,8 @@ void mlx5i_init(struct mlx5_core_dev *mdev,
mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
mlx5i_build_nic_params(mdev, &priv->channels.params);
mlx5e_timestamp_init(priv);
/* netdev init */
netdev->hw_features |= NETIF_F_SG;
netdev->hw_features |= NETIF_F_IP_CSUM;
@ -450,7 +452,6 @@ static int mlx5i_open(struct net_device *netdev)
mlx5e_refresh_tirs(epriv, false);
mlx5e_activate_priv_channels(epriv);
mlx5e_timestamp_set(epriv);
mutex_unlock(&epriv->state_lock);
return 0;

Просмотреть файл

@ -423,9 +423,13 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
switch (clock->ptp_info.pin_config[pin].func) {
case PTP_PF_EXTTS:
ptp_event.index = pin;
ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
be64_to_cpu(eqe->data.pps.time_stamp));
if (clock->pps_info.enabled) {
ptp_event.type = PTP_CLOCK_PPSUSR;
ptp_event.pps_times.ts_real = ns_to_timespec64(eqe->data.pps.time_stamp);
ptp_event.pps_times.ts_real =
ns_to_timespec64(ptp_event.timestamp);
} else {
ptp_event.type = PTP_CLOCK_EXTTS;
}

Просмотреть файл

@ -319,6 +319,7 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
struct mlx5_eq_table *table = &priv->eq_table;
int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
int nvec;
int err;
nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
MLX5_EQ_VEC_COMP_BASE;
@ -328,21 +329,23 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL);
if (!priv->irq_info)
goto err_free_msix;
return -ENOMEM;
nvec = pci_alloc_irq_vectors(dev->pdev,
MLX5_EQ_VEC_COMP_BASE + 1, nvec,
PCI_IRQ_MSIX);
if (nvec < 0)
return nvec;
if (nvec < 0) {
err = nvec;
goto err_free_irq_info;
}
table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
return 0;
err_free_msix:
err_free_irq_info:
kfree(priv->irq_info);
return -ENOMEM;
return err;
}
static void mlx5_free_irq_vectors(struct mlx5_core_dev *dev)
@ -578,8 +581,7 @@ static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev)
int ret = 0;
/* Disable local_lb by default */
if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
MLX5_CAP_GEN(dev, disable_local_lb))
if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH)
ret = mlx5_nic_vport_update_local_lb(dev, false);
return ret;
@ -1121,10 +1123,13 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_stop_poll;
}
if (boot && mlx5_init_once(dev, priv)) {
if (boot) {
err = mlx5_init_once(dev, priv);
if (err) {
dev_err(&pdev->dev, "sw objs init failed\n");
goto err_stop_poll;
}
}
err = mlx5_alloc_irq_vectors(dev);
if (err) {
@ -1133,8 +1138,9 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
}
dev->priv.uar = mlx5_get_uars_page(dev);
if (!dev->priv.uar) {
if (IS_ERR(dev->priv.uar)) {
dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
err = PTR_ERR(dev->priv.uar);
goto err_disable_msix;
}

Просмотреть файл

@ -168,18 +168,16 @@ struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev)
struct mlx5_uars_page *ret;
mutex_lock(&mdev->priv.bfregs.reg_head.lock);
if (list_empty(&mdev->priv.bfregs.reg_head.list)) {
ret = alloc_uars_page(mdev, false);
if (IS_ERR(ret)) {
ret = NULL;
goto out;
}
list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
} else {
if (!list_empty(&mdev->priv.bfregs.reg_head.list)) {
ret = list_first_entry(&mdev->priv.bfregs.reg_head.list,
struct mlx5_uars_page, list);
kref_get(&ret->ref_count);
goto out;
}
ret = alloc_uars_page(mdev, false);
if (IS_ERR(ret))
goto out;
list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
out:
mutex_unlock(&mdev->priv.bfregs.reg_head.lock);

Просмотреть файл

@ -908,23 +908,33 @@ int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
void *in;
int err;
mlx5_core_dbg(mdev, "%s local_lb\n", enable ? "enable" : "disable");
if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) &&
!MLX5_CAP_GEN(mdev, disable_local_lb_uc))
return 0;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(modify_nic_vport_context_in, in,
field_select.disable_mc_local_lb, 1);
MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.disable_mc_local_lb, !enable);
MLX5_SET(modify_nic_vport_context_in, in,
field_select.disable_uc_local_lb, 1);
MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.disable_uc_local_lb, !enable);
if (MLX5_CAP_GEN(mdev, disable_local_lb_mc))
MLX5_SET(modify_nic_vport_context_in, in,
field_select.disable_mc_local_lb, 1);
if (MLX5_CAP_GEN(mdev, disable_local_lb_uc))
MLX5_SET(modify_nic_vport_context_in, in,
field_select.disable_uc_local_lb, 1);
err = mlx5_modify_nic_vport_context(mdev, in, inlen);
if (!err)
mlx5_core_dbg(mdev, "%s local_lb\n",
enable ? "enable" : "disable");
kvfree(in);
return err;
}

Просмотреть файл

@ -333,7 +333,7 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
ls >= ARRAY_SIZE(ls_to_ethtool))
return 0;
cmd->base.speed = ls_to_ethtool[sts];
cmd->base.speed = ls_to_ethtool[ls];
cmd->base.duplex = DUPLEX_FULL;
return 0;

Просмотреть файл

@ -776,6 +776,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
int rc = 0;
struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
bool b_ret_ent = true;
bool eblock;
if (!p_hwfn)
return -EINVAL;
@ -794,6 +795,11 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
if (rc)
goto spq_post_fail;
/* Check if entry is in block mode before qed_spq_add_entry,
* which might kfree p_ent.
*/
eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK);
/* Add the request to the pending queue */
rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
if (rc)
@ -811,7 +817,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
spin_unlock_bh(&p_spq->lock);
if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
if (eblock) {
/* For entries in QED BLOCK mode, the completion code cannot
* perform the necessary cleanup - if it did, we couldn't
* access p_ent here to see whether it's successful or not.

Просмотреть файл

@ -2089,8 +2089,8 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
add_reg(CSMR);
if (cd->select_mii)
add_reg(RMII_MII);
add_reg(ARSTR);
if (cd->tsu) {
add_tsu_reg(ARSTR);
add_tsu_reg(TSU_CTRST);
add_tsu_reg(TSU_FWEN0);
add_tsu_reg(TSU_FWEN1);

Просмотреть файл

@ -1006,17 +1006,18 @@ static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
if (!ifname_is_set)
snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index);
mutex_unlock(&pn->all_ppp_mutex);
ret = register_netdevice(ppp->dev);
if (ret < 0)
goto err_unit;
atomic_inc(&ppp_unit_count);
mutex_unlock(&pn->all_ppp_mutex);
return 0;
err_unit:
mutex_lock(&pn->all_ppp_mutex);
unit_put(&pn->units_idr, ppp->file.index);
err:
mutex_unlock(&pn->all_ppp_mutex);

Просмотреть файл

@ -2396,6 +2396,7 @@ static int lan78xx_reset(struct lan78xx_net *dev)
buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
dev->rx_qlen = 4;
dev->tx_qlen = 4;
}
ret = lan78xx_write_reg(dev, BURST_CAP, buf);

Просмотреть файл

@ -489,6 +489,7 @@ static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = {
static spinlock_t hwsim_radio_lock;
static LIST_HEAD(hwsim_radios);
static struct workqueue_struct *hwsim_wq;
static int hwsim_radio_idx;
static struct platform_driver mac80211_hwsim_driver = {
@ -3120,6 +3121,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
if (info->attrs[HWSIM_ATTR_CHANNELS])
param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) {
GENL_SET_ERR_MSG(info, "too many channels specified");
return -EINVAL;
}
if (info->attrs[HWSIM_ATTR_NO_VIF])
param.no_vif = true;
@ -3342,7 +3348,7 @@ static void remove_user_radios(u32 portid)
if (entry->destroy_on_close && entry->portid == portid) {
list_del(&entry->list);
INIT_WORK(&entry->destroy_work, destroy_radio);
schedule_work(&entry->destroy_work);
queue_work(hwsim_wq, &entry->destroy_work);
}
}
spin_unlock_bh(&hwsim_radio_lock);
@ -3417,7 +3423,7 @@ static void __net_exit hwsim_exit_net(struct net *net)
list_del(&data->list);
INIT_WORK(&data->destroy_work, destroy_radio);
schedule_work(&data->destroy_work);
queue_work(hwsim_wq, &data->destroy_work);
}
spin_unlock_bh(&hwsim_radio_lock);
}
@ -3449,6 +3455,10 @@ static int __init init_mac80211_hwsim(void)
spin_lock_init(&hwsim_radio_lock);
hwsim_wq = alloc_workqueue("hwsim_wq",WQ_MEM_RECLAIM,0);
if (!hwsim_wq)
return -ENOMEM;
err = register_pernet_device(&hwsim_net_ops);
if (err)
return err;
@ -3587,8 +3597,11 @@ static void __exit exit_mac80211_hwsim(void)
hwsim_exit_netlink();
mac80211_hwsim_free();
flush_workqueue(hwsim_wq);
unregister_netdev(hwsim_mon);
platform_driver_unregister(&mac80211_hwsim_driver);
unregister_pernet_device(&hwsim_net_ops);
destroy_workqueue(hwsim_wq);
}
module_exit(exit_mac80211_hwsim);

Просмотреть файл

@ -36,6 +36,7 @@
#include <linux/kernel.h>
#include <linux/completion.h>
#include <linux/pci.h>
#include <linux/irq.h>
#include <linux/spinlock_types.h>
#include <linux/semaphore.h>
#include <linux/slab.h>
@ -1231,7 +1232,23 @@ enum {
static inline const struct cpumask *
mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector)
{
return pci_irq_get_affinity(dev->pdev, MLX5_EQ_VEC_COMP_BASE + vector);
const struct cpumask *mask;
struct irq_desc *desc;
unsigned int irq;
int eqn;
int err;
err = mlx5_vector2eqn(dev, vector, &eqn, &irq);
if (err)
return NULL;
desc = irq_to_desc(irq);
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
#else
mask = desc->irq_common_data.affinity;
#endif
return mask;
}
#endif /* MLX5_DRIVER_H */

Просмотреть файл

@ -1027,8 +1027,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_wq_sz[0x5];
u8 nic_vport_change_event[0x1];
u8 disable_local_lb[0x1];
u8 reserved_at_3e2[0x9];
u8 disable_local_lb_uc[0x1];
u8 disable_local_lb_mc[0x1];
u8 reserved_at_3e3[0x8];
u8 log_max_vlan_list[0x5];
u8 reserved_at_3f0[0x3];
u8 log_max_current_mc_list[0x5];

Просмотреть файл

@ -85,7 +85,7 @@ struct netlink_ext_ack {
* to the lack of an output buffer.)
*/
#define NL_SET_ERR_MSG(extack, msg) do { \
static const char __msg[] = (msg); \
static const char __msg[] = msg; \
struct netlink_ext_ack *__extack = (extack); \
\
if (__extack) \
@ -101,7 +101,7 @@ struct netlink_ext_ack {
} while (0)
#define NL_SET_ERR_MSG_ATTR(extack, attr, msg) do { \
static const char __msg[] = (msg); \
static const char __msg[] = msg; \
struct netlink_ext_ack *__extack = (extack); \
\
if (__extack) { \

Просмотреть файл

@ -174,6 +174,15 @@ static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr)
* if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL.
* If ring is never resized, and if the pointer is merely
* tested, there's no need to take the lock - see e.g. __ptr_ring_empty.
* However, if called outside the lock, and if some other CPU
* consumes ring entries at the same time, the value returned
* is not guaranteed to be correct.
* In this case - to avoid incorrectly detecting the ring
* as empty - the CPU consuming the ring entries is responsible
* for either consuming all ring entries until the ring is empty,
* or synchronizing with some other CPU and causing it to
* execute __ptr_ring_peek and/or consume the ring enteries
* after the synchronization point.
*/
static inline void *__ptr_ring_peek(struct ptr_ring *r)
{
@ -182,10 +191,7 @@ static inline void *__ptr_ring_peek(struct ptr_ring *r)
return NULL;
}
/* Note: callers invoking this in a loop must use a compiler barrier,
* for example cpu_relax(). Callers must take consumer_lock
* if the ring is ever resized - see e.g. ptr_ring_empty.
*/
/* See __ptr_ring_peek above for locking rules. */
static inline bool __ptr_ring_empty(struct ptr_ring *r)
{
return !__ptr_ring_peek(r);

Просмотреть файл

@ -20,6 +20,9 @@ static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32
static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
{
if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
key = INADDR_ANY;
return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
}

Просмотреть файл

@ -815,6 +815,8 @@ struct cfg80211_csa_settings {
u8 count;
};
#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
/**
* struct iface_combination_params - input parameters for interface combinations
*

Просмотреть файл

@ -179,6 +179,7 @@ struct Qdisc_ops {
const struct Qdisc_class_ops *cl_ops;
char id[IFNAMSIZ];
int priv_size;
unsigned int static_flags;
int (*enqueue)(struct sk_buff *skb,
struct Qdisc *sch,
@ -444,6 +445,7 @@ void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
unsigned int len);
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops);
void qdisc_free(struct Qdisc *qdisc);
struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops, u32 parentid);
void __qdisc_calculate_pkt_len(struct sk_buff *skb,

Просмотреть файл

@ -170,7 +170,7 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
static inline void tls_err_abort(struct sock *sk)
{
sk->sk_err = -EBADMSG;
sk->sk_err = EBADMSG;
sk->sk_error_report(sk);
}

Просмотреть файл

@ -363,7 +363,6 @@ enum ovs_tunnel_key_attr {
OVS_TUNNEL_KEY_ATTR_IPV6_SRC, /* struct in6_addr src IPv6 address. */
OVS_TUNNEL_KEY_ATTR_IPV6_DST, /* struct in6_addr dst IPv6 address. */
OVS_TUNNEL_KEY_ATTR_PAD,
OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS, /* be32 ERSPAN index. */
__OVS_TUNNEL_KEY_ATTR_MAX
};

Просмотреть файл

@ -56,7 +56,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
u32 elem_size, index_mask, max_entries;
bool unpriv = !capable(CAP_SYS_ADMIN);
struct bpf_array *array;
u64 array_size;
u64 array_size, mask64;
/* check sanity of attributes */
if (attr->max_entries == 0 || attr->key_size != 4 ||
@ -74,13 +74,25 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
elem_size = round_up(attr->value_size, 8);
max_entries = attr->max_entries;
index_mask = roundup_pow_of_two(max_entries) - 1;
if (unpriv)
/* On 32 bit archs roundup_pow_of_two() with max_entries that has
* upper most bit set in u32 space is undefined behavior due to
* resulting 1U << 32, so do it manually here in u64 space.
*/
mask64 = fls_long(max_entries - 1);
mask64 = 1ULL << mask64;
mask64 -= 1;
index_mask = mask64;
if (unpriv) {
/* round up array size to nearest power of 2,
* since cpu will speculate within index_mask limits
*/
max_entries = index_mask + 1;
/* Check for overflows. */
if (max_entries < attr->max_entries)
return ERR_PTR(-E2BIG);
}
array_size = sizeof(*array);
if (percpu)

Просмотреть файл

@ -2493,6 +2493,11 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
return -EINVAL;
}
if (opcode == BPF_ARSH && BPF_CLASS(insn->code) != BPF_ALU64) {
verbose(env, "BPF_ARSH not supported for 32 bit ALU\n");
return -EINVAL;
}
if ((opcode == BPF_LSH || opcode == BPF_RSH ||
opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
@ -4472,7 +4477,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
*/
map_ptr = env->insn_aux_data[i + delta].map_ptr;
if (map_ptr == BPF_MAP_PTR_POISON) {
verbose(env, "tail_call obusing map_ptr\n");
verbose(env, "tail_call abusing map_ptr\n");
return -EINVAL;
}
if (!map_ptr->unpriv_array)

Просмотреть файл

@ -543,3 +543,7 @@ static void p9_trans_xen_exit(void)
return xenbus_unregister_driver(&xen_9pfs_front_driver);
}
module_exit(p9_trans_xen_exit);
MODULE_AUTHOR("Stefano Stabellini <stefano@aporeto.com>");
MODULE_DESCRIPTION("Xen Transport for 9P");
MODULE_LICENSE("GPL");

Просмотреть файл

@ -532,7 +532,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
if (n->parms->dead) {
rc = ERR_PTR(-EINVAL);
@ -544,7 +544,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
n1 != NULL;
n1 = rcu_dereference_protected(n1->next,
lockdep_is_held(&tbl->lock))) {
if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
if (want_ref)
neigh_hold(n1);
rc = n1;

Просмотреть файл

@ -223,11 +223,16 @@ static bool arp_key_eq(const struct neighbour *neigh, const void *pkey)
static int arp_constructor(struct neighbour *neigh)
{
__be32 addr = *(__be32 *)neigh->primary_key;
__be32 addr;
struct net_device *dev = neigh->dev;
struct in_device *in_dev;
struct neigh_parms *parms;
u32 inaddr_any = INADDR_ANY;
if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
memcpy(neigh->primary_key, &inaddr_any, arp_tbl.key_len);
addr = *(__be32 *)neigh->primary_key;
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
if (!in_dev) {

Просмотреть файл

@ -981,6 +981,7 @@ static int esp_init_state(struct xfrm_state *x)
switch (encap->encap_type) {
default:
err = -EINVAL;
goto error;
case UDP_ENCAP_ESPINUDP:
x->props.header_len += sizeof(struct udphdr);

Просмотреть файл

@ -38,7 +38,8 @@ static struct sk_buff **esp4_gro_receive(struct sk_buff **head,
__be32 spi;
int err;
skb_pull(skb, offset);
if (!pskb_pull(skb, offset))
return NULL;
if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
goto out;

Просмотреть файл

@ -2762,6 +2762,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
if (err == 0 && rt->dst.error)
err = -rt->dst.error;
} else {
fl4.flowi4_iif = LOOPBACK_IFINDEX;
rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
err = 0;
if (IS_ERR(rt))

Просмотреть файл

@ -890,13 +890,12 @@ static int esp6_init_state(struct xfrm_state *x)
x->props.header_len += IPV4_BEET_PHMAXLEN +
(sizeof(struct ipv6hdr) - sizeof(struct iphdr));
break;
default:
case XFRM_MODE_TRANSPORT:
break;
case XFRM_MODE_TUNNEL:
x->props.header_len += sizeof(struct ipv6hdr);
break;
default:
goto error;
}
align = ALIGN(crypto_aead_blocksize(aead), 4);

Просмотреть файл

@ -60,7 +60,8 @@ static struct sk_buff **esp6_gro_receive(struct sk_buff **head,
int nhoff;
int err;
skb_pull(skb, offset);
if (!pskb_pull(skb, offset))
return NULL;
if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
goto out;

Просмотреть файл

@ -1206,14 +1206,16 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
v6_cork->tclass = ipc6->tclass;
if (rt->dst.flags & DST_XFRM_TUNNEL)
mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
rt->dst.dev->mtu : dst_mtu(&rt->dst);
READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst);
else
mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
rt->dst.dev->mtu : dst_mtu(rt->dst.path);
READ_ONCE(rt->dst.dev->mtu) : dst_mtu(rt->dst.path);
if (np->frag_size < mtu) {
if (np->frag_size)
mtu = np->frag_size;
}
if (mtu < IPV6_MIN_MTU)
return -EINVAL;
cork->base.fragsize = mtu;
if (dst_allfrag(rt->dst.path))
cork->base.flags |= IPCORK_ALLFRAG;
@ -1733,6 +1735,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
cork.base.flags = 0;
cork.base.addr = 0;
cork.base.opt = NULL;
cork.base.dst = NULL;
v6_cork.opt = NULL;
err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6);
if (err) {

Просмотреть файл

@ -401,6 +401,11 @@ static int verify_address_len(const void *p)
#endif
int len;
if (sp->sadb_address_len <
DIV_ROUND_UP(sizeof(*sp) + offsetofend(typeof(*addr), sa_family),
sizeof(uint64_t)))
return -EINVAL;
switch (addr->sa_family) {
case AF_INET:
len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin), sizeof(uint64_t));
@ -511,6 +516,9 @@ static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void *
uint16_t ext_type;
int ext_len;
if (len < sizeof(*ehdr))
return -EINVAL;
ext_len = ehdr->sadb_ext_len;
ext_len *= sizeof(uint64_t);
ext_type = ehdr->sadb_ext_type;
@ -2194,8 +2202,10 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_ev
return PTR_ERR(out_skb);
err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
if (err < 0)
if (err < 0) {
kfree_skb(out_skb);
return err;
}
out_hdr = (struct sadb_msg *) out_skb->data;
out_hdr->sadb_msg_version = PF_KEY_V2;

Просмотреть файл

@ -2384,7 +2384,7 @@ int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
struct nlmsghdr *,
struct netlink_ext_ack *))
{
struct netlink_ext_ack extack = {};
struct netlink_ext_ack extack;
struct nlmsghdr *nlh;
int err;
@ -2405,6 +2405,7 @@ int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
goto ack;
memset(&extack, 0, sizeof(extack));
err = cb(skb, nlh, &extack);
if (err == -EINTR)
goto skip;

Просмотреть файл

@ -49,7 +49,6 @@
#include <net/mpls.h>
#include <net/vxlan.h>
#include <net/tun_proto.h>
#include <net/erspan.h>
#include "flow_netlink.h"
@ -334,8 +333,7 @@ size_t ovs_tun_key_attr_size(void)
* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS and covered by it.
*/
+ nla_total_size(2) /* OVS_TUNNEL_KEY_ATTR_TP_SRC */
+ nla_total_size(2) /* OVS_TUNNEL_KEY_ATTR_TP_DST */
+ nla_total_size(4); /* OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS */
+ nla_total_size(2); /* OVS_TUNNEL_KEY_ATTR_TP_DST */
}
static size_t ovs_nsh_key_attr_size(void)
@ -402,7 +400,6 @@ static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1]
.next = ovs_vxlan_ext_key_lens },
[OVS_TUNNEL_KEY_ATTR_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
[OVS_TUNNEL_KEY_ATTR_IPV6_DST] = { .len = sizeof(struct in6_addr) },
[OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS] = { .len = sizeof(u32) },
};
static const struct ovs_len_tbl
@ -634,33 +631,6 @@ static int vxlan_tun_opt_from_nlattr(const struct nlattr *attr,
return 0;
}
static int erspan_tun_opt_from_nlattr(const struct nlattr *attr,
struct sw_flow_match *match, bool is_mask,
bool log)
{
unsigned long opt_key_offset;
struct erspan_metadata opts;
BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts));
memset(&opts, 0, sizeof(opts));
opts.index = nla_get_be32(attr);
/* Index has only 20-bit */
if (ntohl(opts.index) & ~INDEX_MASK) {
OVS_NLERR(log, "ERSPAN index number %x too large.",
ntohl(opts.index));
return -EINVAL;
}
SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), is_mask);
opt_key_offset = TUN_METADATA_OFFSET(sizeof(opts));
SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, &opts, sizeof(opts),
is_mask);
return 0;
}
static int ip_tun_from_nlattr(const struct nlattr *attr,
struct sw_flow_match *match, bool is_mask,
bool log)
@ -768,19 +738,6 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
break;
case OVS_TUNNEL_KEY_ATTR_PAD:
break;
case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
if (opts_type) {
OVS_NLERR(log, "Multiple metadata blocks provided");
return -EINVAL;
}
err = erspan_tun_opt_from_nlattr(a, match, is_mask, log);
if (err)
return err;
tun_flags |= TUNNEL_ERSPAN_OPT;
opts_type = type;
break;
default:
OVS_NLERR(log, "Unknown IP tunnel attribute %d",
type);
@ -905,10 +862,6 @@ static int __ip_tun_to_nlattr(struct sk_buff *skb,
else if (output->tun_flags & TUNNEL_VXLAN_OPT &&
vxlan_opt_to_nlattr(skb, tun_opts, swkey_tun_opts_len))
return -EMSGSIZE;
else if (output->tun_flags & TUNNEL_ERSPAN_OPT &&
nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
((struct erspan_metadata *)tun_opts)->index))
return -EMSGSIZE;
}
return 0;
@ -2533,8 +2486,6 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
break;
case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
break;
case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
break;
}
};

Просмотреть файл

@ -1063,17 +1063,6 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
}
if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
if (qdisc_is_percpu_stats(sch)) {
sch->cpu_bstats =
netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
if (!sch->cpu_bstats)
goto err_out4;
sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
if (!sch->cpu_qstats)
goto err_out4;
}
if (tca[TCA_STAB]) {
stab = qdisc_get_stab(tca[TCA_STAB]);
if (IS_ERR(stab)) {
@ -1115,7 +1104,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
ops->destroy(sch);
err_out3:
dev_put(dev);
kfree((char *) sch - sch->padded);
qdisc_free(sch);
err_out2:
module_put(ops->owner);
err_out:
@ -1123,8 +1112,6 @@ err_out:
return NULL;
err_out4:
free_percpu(sch->cpu_bstats);
free_percpu(sch->cpu_qstats);
/*
* Any broken qdiscs that would require a ops->reset() here?
* The qdisc was never in action so it shouldn't be necessary.

Просмотреть файл

@ -633,6 +633,19 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
qdisc_skb_head_init(&sch->q);
spin_lock_init(&sch->q.lock);
if (ops->static_flags & TCQ_F_CPUSTATS) {
sch->cpu_bstats =
netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
if (!sch->cpu_bstats)
goto errout1;
sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
if (!sch->cpu_qstats) {
free_percpu(sch->cpu_bstats);
goto errout1;
}
}
spin_lock_init(&sch->busylock);
lockdep_set_class(&sch->busylock,
dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
@ -642,6 +655,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
dev->qdisc_running_key ?: &qdisc_running_key);
sch->ops = ops;
sch->flags = ops->static_flags;
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
sch->dev_queue = dev_queue;
@ -649,6 +663,8 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
refcount_set(&sch->refcnt, 1);
return sch;
errout1:
kfree(p);
errout:
return ERR_PTR(err);
}
@ -698,7 +714,7 @@ void qdisc_reset(struct Qdisc *qdisc)
}
EXPORT_SYMBOL(qdisc_reset);
static void qdisc_free(struct Qdisc *qdisc)
void qdisc_free(struct Qdisc *qdisc)
{
if (qdisc_is_percpu_stats(qdisc)) {
free_percpu(qdisc->cpu_bstats);

Просмотреть файл

@ -66,7 +66,6 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
{
struct ingress_sched_data *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
int err;
net_inc_ingress_queue();
@ -76,13 +75,7 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
q->block_info.chain_head_change = clsact_chain_head_change;
q->block_info.chain_head_change_priv = &q->miniqp;
err = tcf_block_get_ext(&q->block, sch, &q->block_info);
if (err)
return err;
sch->flags |= TCQ_F_CPUSTATS;
return 0;
return tcf_block_get_ext(&q->block, sch, &q->block_info);
}
static void ingress_destroy(struct Qdisc *sch)
@ -121,6 +114,7 @@ static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
.cl_ops = &ingress_class_ops,
.id = "ingress",
.priv_size = sizeof(struct ingress_sched_data),
.static_flags = TCQ_F_CPUSTATS,
.init = ingress_init,
.destroy = ingress_destroy,
.dump = ingress_dump,
@ -192,13 +186,7 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
q->egress_block_info.chain_head_change = clsact_chain_head_change;
q->egress_block_info.chain_head_change_priv = &q->miniqp_egress;
err = tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info);
if (err)
return err;
sch->flags |= TCQ_F_CPUSTATS;
return 0;
return tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info);
}
static void clsact_destroy(struct Qdisc *sch)
@ -225,6 +213,7 @@ static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
.cl_ops = &clsact_class_ops,
.id = "clsact",
.priv_size = sizeof(struct clsact_sched_data),
.static_flags = TCQ_F_CPUSTATS,
.init = clsact_init,
.destroy = clsact_destroy,
.dump = ingress_dump,

Просмотреть файл

@ -826,6 +826,7 @@ static int sctp_inet6_af_supported(sa_family_t family, struct sctp_sock *sp)
case AF_INET:
if (!__ipv6_only_sock(sctp_opt2sk(sp)))
return 1;
/* fallthru */
default:
return 0;
}

Просмотреть файл

@ -918,9 +918,9 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
break;
case SCTP_CID_ABORT:
if (sctp_test_T_bit(chunk)) {
if (sctp_test_T_bit(chunk))
packet->vtag = asoc->c.my_vtag;
}
/* fallthru */
/* The following chunks are "response" chunks, i.e.
* they are generated in response to something we
* received. If we are sending these, then we can

Просмотреть файл

@ -85,7 +85,7 @@
static int sctp_writeable(struct sock *sk);
static void sctp_wfree(struct sk_buff *skb);
static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
size_t msg_len, struct sock **orig_sk);
size_t msg_len);
static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
static int sctp_wait_for_accept(struct sock *sk, long timeo);
@ -335,16 +335,14 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
if (len < sizeof (struct sockaddr))
return NULL;
/* V4 mapped address are really of AF_INET family */
if (addr->sa.sa_family == AF_INET6 &&
ipv6_addr_v4mapped(&addr->v6.sin6_addr)) {
if (!opt->pf->af_supported(AF_INET, opt))
return NULL;
} else {
/* Does this PF support this AF? */
if (!opt->pf->af_supported(addr->sa.sa_family, opt))
return NULL;
}
/* V4 mapped address are really of AF_INET family */
if (addr->sa.sa_family == AF_INET6 &&
ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
!opt->pf->af_supported(AF_INET, opt))
return NULL;
/* If we get this far, af is valid. */
af = sctp_get_af_specific(addr->sa.sa_family);
@ -1883,8 +1881,14 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
*/
if (sinit) {
if (sinit->sinit_num_ostreams) {
asoc->c.sinit_num_ostreams =
sinit->sinit_num_ostreams;
__u16 outcnt = sinit->sinit_num_ostreams;
asoc->c.sinit_num_ostreams = outcnt;
/* outcnt has been changed, so re-init stream */
err = sctp_stream_init(&asoc->stream, outcnt, 0,
GFP_KERNEL);
if (err)
goto out_free;
}
if (sinit->sinit_max_instreams) {
asoc->c.sinit_max_instreams =
@ -1971,7 +1975,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
if (!sctp_wspace(asoc)) {
/* sk can be changed by peel off when waiting for buf. */
err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len, &sk);
err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
if (err) {
if (err == -ESRCH) {
/* asoc is already dead. */
@ -8016,12 +8020,12 @@ void sctp_sock_rfree(struct sk_buff *skb)
/* Helper function to wait for space in the sndbuf. */
static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
size_t msg_len, struct sock **orig_sk)
size_t msg_len)
{
struct sock *sk = asoc->base.sk;
int err = 0;
long current_timeo = *timeo_p;
DEFINE_WAIT(wait);
int err = 0;
pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
*timeo_p, msg_len);
@ -8050,17 +8054,13 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
release_sock(sk);
current_timeo = schedule_timeout(current_timeo);
lock_sock(sk);
if (sk != asoc->base.sk) {
release_sock(sk);
sk = asoc->base.sk;
lock_sock(sk);
}
if (sk != asoc->base.sk)
goto do_error;
*timeo_p = current_timeo;
}
out:
*orig_sk = sk;
finish_wait(&asoc->wait, &wait);
/* Release the association's refcnt. */

Просмотреть файл

@ -1880,36 +1880,38 @@ int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
if (strcmp(name, tipc_bclink_name) == 0) {
err = tipc_nl_add_bc_link(net, &msg);
if (err) {
nlmsg_free(msg.skb);
return err;
}
if (err)
goto err_free;
} else {
int bearer_id;
struct tipc_node *node;
struct tipc_link *link;
node = tipc_node_find_by_name(net, name, &bearer_id);
if (!node)
return -EINVAL;
if (!node) {
err = -EINVAL;
goto err_free;
}
tipc_node_read_lock(node);
link = node->links[bearer_id].link;
if (!link) {
tipc_node_read_unlock(node);
nlmsg_free(msg.skb);
return -EINVAL;
err = -EINVAL;
goto err_free;
}
err = __tipc_nl_add_link(net, &msg, link, 0);
tipc_node_read_unlock(node);
if (err) {
nlmsg_free(msg.skb);
return err;
}
if (err)
goto err_free;
}
return genlmsg_reply(msg.skb, info);
err_free:
nlmsg_free(msg.skb);
return err;
}
int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)

Просмотреть файл

@ -391,7 +391,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
while (msg_data_left(msg)) {
if (sk->sk_err) {
ret = sk->sk_err;
ret = -sk->sk_err;
goto send_end;
}
@ -544,7 +544,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
size_t copy, required_size;
if (sk->sk_err) {
ret = sk->sk_err;
ret = -sk->sk_err;
goto sendpage_end;
}

Просмотреть файл

@ -439,6 +439,8 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
if (rv)
goto use_default_name;
} else {
int rv;
use_default_name:
/* NOTE: This is *probably* safe w/out holding rtnl because of
* the restrictions on phy names. Probably this call could
@ -446,7 +448,11 @@ use_default_name:
* phyX. But, might should add some locking and check return
* value, and use a different name if this one exists?
*/
dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx);
rv = dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx);
if (rv < 0) {
kfree(rdev);
return NULL;
}
}
INIT_LIST_HEAD(&rdev->wiphy.wdev_list);

Просмотреть файл

@ -507,8 +507,6 @@ void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
void cfg80211_stop_nan(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev);
#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond)
#else

Просмотреть файл

@ -2618,12 +2618,13 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
const u8 *ssid_ie;
if (!wdev->current_bss)
break;
rcu_read_lock();
ssid_ie = ieee80211_bss_get_ie(&wdev->current_bss->pub,
WLAN_EID_SSID);
if (!ssid_ie)
break;
if (nla_put(msg, NL80211_ATTR_SSID, ssid_ie[1], ssid_ie + 2))
goto nla_put_failure_locked;
if (ssid_ie &&
nla_put(msg, NL80211_ATTR_SSID, ssid_ie[1], ssid_ie + 2))
goto nla_put_failure_rcu_locked;
rcu_read_unlock();
break;
}
default:
@ -2635,6 +2636,8 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
genlmsg_end(msg, hdr);
return 0;
nla_put_failure_rcu_locked:
rcu_read_unlock();
nla_put_failure_locked:
wdev_unlock(wdev);
nla_put_failure:

Просмотреть файл

@ -1769,8 +1769,7 @@ static void handle_reg_beacon(struct wiphy *wiphy, unsigned int chan_idx,
if (wiphy->regulatory_flags & REGULATORY_DISABLE_BEACON_HINTS)
return;
chan_before.center_freq = chan->center_freq;
chan_before.flags = chan->flags;
chan_before = *chan;
if (chan->flags & IEEE80211_CHAN_NO_IR) {
chan->flags &= ~IEEE80211_CHAN_NO_IR;

Просмотреть файл

@ -518,7 +518,7 @@ int xfrm_trans_queue(struct sk_buff *skb,
return -ENOBUFS;
XFRM_TRANS_SKB_CB(skb)->finish = finish;
skb_queue_tail(&trans->queue, skb);
__skb_queue_tail(&trans->queue, skb);
tasklet_schedule(&trans->tasklet);
return 0;
}

Просмотреть файл

@ -609,7 +609,8 @@ static void xfrm_hash_rebuild(struct work_struct *work)
/* re-insert all policies by order of creation */
list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
if (xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
if (policy->walk.dead ||
xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
/* skip socket policies */
continue;
}
@ -974,8 +975,6 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
}
if (!cnt)
err = -ESRCH;
else
xfrm_policy_cache_flush();
out:
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
return err;
@ -1743,6 +1742,8 @@ void xfrm_policy_cache_flush(void)
bool found = 0;
int cpu;
might_sleep();
local_bh_disable();
rcu_read_lock();
for_each_possible_cpu(cpu) {
@ -2062,8 +2063,11 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
if (num_xfrms <= 0)
goto make_dummy_bundle;
local_bh_disable();
xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
xflo->dst_orig);
local_bh_enable();
if (IS_ERR(xdst)) {
err = PTR_ERR(xdst);
if (err != -EAGAIN)
@ -2150,9 +2154,12 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
goto no_transform;
}
local_bh_disable();
xdst = xfrm_resolve_and_create_bundle(
pols, num_pols, fl,
family, dst_orig);
local_bh_enable();
if (IS_ERR(xdst)) {
xfrm_pols_put(pols, num_pols);
err = PTR_ERR(xdst);

Просмотреть файл

@ -313,13 +313,14 @@ retry:
if ((type && !try_module_get(type->owner)))
type = NULL;
rcu_read_unlock();
if (!type && try_load) {
request_module("xfrm-offload-%d-%d", family, proto);
try_load = 0;
goto retry;
}
rcu_read_unlock();
return type;
}
@ -1534,8 +1535,12 @@ out:
err = -EINVAL;
spin_lock_bh(&x1->lock);
if (likely(x1->km.state == XFRM_STATE_VALID)) {
if (x->encap && x1->encap)
if (x->encap && x1->encap &&
x->encap->encap_type == x1->encap->encap_type)
memcpy(x1->encap, x->encap, sizeof(*x1->encap));
else if (x->encap || x1->encap)
goto fail;
if (x->coaddr && x1->coaddr) {
memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
}
@ -1552,6 +1557,8 @@ out:
x->km.state = XFRM_STATE_DEAD;
__xfrm_state_put(x);
}
fail:
spin_unlock_bh(&x1->lock);
xfrm_state_put(x1);

Просмотреть файл

@ -272,6 +272,46 @@ static struct bpf_test tests[] = {
.errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT,
},
{
"arsh32 on imm",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "BPF_ARSH not supported for 32 bit ALU",
},
{
"arsh32 on reg",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_1, 5),
BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "BPF_ARSH not supported for 32 bit ALU",
},
{
"arsh64 on imm",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"arsh64 on reg",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_1, 5),
BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"no bpf_exit",
.insns = {