Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
  net: Add a flow_cache_flush_deferred function
  ipv4: reintroduce route cache garbage collector
  net: have ipconfig not wait if no dev is available
  sctp: Do not account for sizeof(struct sk_buff) in estimated rwnd
  asix: new device id
  davinci-cpdma: fix locking issue in cpdma_chan_stop
  sctp: fix incorrect overflow check on autoclose
  r8169: fix Config2 MSIEnable bit setting.
  llc: llc_cmsg_rcv was getting called after sk_eat_skb.
  net: bpf_jit: fix an off-one bug in x86_64 cond jump target
  iwlwifi: update SCD BC table for all SCD queues
  Revert "Bluetooth: Revert: Fix L2CAP connection establishment"
  Bluetooth: Clear RFCOMM session timer when disconnecting last channel
  Bluetooth: Prevent uninitialized data access in L2CAP configuration
  iwlwifi: allow to switch to HT40 if not associated
  iwlwifi: tx_sync only on PAN context
  mwifiex: avoid double list_del in command cancel path
  ath9k: fix max phy rate at rate control init
  nfc: signedness bug in __nci_request()
  iwlwifi: do not set the sequence control bit is not needed
This commit is contained in:
Linus Torvalds 2011-12-21 18:29:26 -08:00
Родитель d5ed5e48f4 c0ed1c14a7
Коммит ecefc36b41
27 изменённых файлов: 220 добавлений и 47 удалений

Просмотреть файл

@ -568,8 +568,8 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
break;
}
if (filter[i].jt != 0) {
if (filter[i].jf)
t_offset += is_near(f_offset) ? 2 : 6;
if (filter[i].jf && f_offset)
t_offset += is_near(f_offset) ? 2 : 5;
EMIT_COND_JMP(t_op, t_offset);
if (filter[i].jf)
EMIT_JMP(f_offset);

Просмотреть файл

@ -477,7 +477,6 @@ enum rtl_register_content {
/* Config1 register p.24 */
LEDS1 = (1 << 7),
LEDS0 = (1 << 6),
MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */
Speed_down = (1 << 4),
MEMMAP = (1 << 3),
IOMAP = (1 << 2),
@ -485,6 +484,7 @@ enum rtl_register_content {
PMEnable = (1 << 0), /* Power Management Enable */
/* Config2 register p. 25 */
MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
PCI_Clock_66MHz = 0x01,
PCI_Clock_33MHz = 0x00,
@ -3426,22 +3426,24 @@ static const struct rtl_cfg_info {
};
/* Cfg9346_Unlock assumed. */
static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
static unsigned rtl_try_msi(struct rtl8169_private *tp,
const struct rtl_cfg_info *cfg)
{
void __iomem *ioaddr = tp->mmio_addr;
unsigned msi = 0;
u8 cfg2;
cfg2 = RTL_R8(Config2) & ~MSIEnable;
if (cfg->features & RTL_FEATURE_MSI) {
if (pci_enable_msi(pdev)) {
dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
if (pci_enable_msi(tp->pci_dev)) {
netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
} else {
cfg2 |= MSIEnable;
msi = RTL_FEATURE_MSI;
}
}
RTL_W8(Config2, cfg2);
if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
RTL_W8(Config2, cfg2);
return msi;
}
@ -4077,7 +4079,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->features |= RTL_FEATURE_WOL;
if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
tp->features |= RTL_FEATURE_WOL;
tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
tp->features |= rtl_try_msi(tp, cfg);
RTL_W8(Cfg9346, Cfg9346_Lock);
if (rtl_tbi_enabled(tp)) {

Просмотреть файл

@ -836,11 +836,13 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
/* handle completed packets */
spin_unlock_irqrestore(&chan->lock, flags);
do {
ret = __cpdma_chan_process(chan);
if (ret < 0)
break;
} while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
spin_lock_irqsave(&chan->lock, flags);
/* remaining packets haven't been tx/rx'ed, clean them up */
while (chan->head) {

Просмотреть файл

@ -1655,6 +1655,10 @@ static const struct usb_device_id products [] = {
// ASIX 88772a
USB_DEVICE(0x0db0, 0xa877),
.driver_info = (unsigned long) &ax88772_info,
}, {
// Asus USB Ethernet Adapter
USB_DEVICE (0x0b95, 0x7e2b),
.driver_info = (unsigned long) &ax88772_info,
},
{ }, // END
};

Просмотреть файл

@ -1271,7 +1271,9 @@ static void ath_rc_init(struct ath_softc *sc,
ath_rc_priv->max_valid_rate = k;
ath_rc_sort_validrates(rate_table, ath_rc_priv);
ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4];
ath_rc_priv->rate_max_phy = (k > 4) ?
ath_rc_priv->valid_rate_index[k-4] :
ath_rc_priv->valid_rate_index[k-1];
ath_rc_priv->rate_table = rate_table;
ath_dbg(common, ATH_DBG_CONFIG,

Просмотреть файл

@ -606,8 +606,8 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
if (ctx->ht.enabled) {
/* if HT40 is used, it should not change
* after associated except channel switch */
if (iwl_is_associated_ctx(ctx) &&
!ctx->ht.is_40mhz)
if (!ctx->ht.is_40mhz ||
!iwl_is_associated_ctx(ctx))
iwlagn_config_ht40(conf, ctx);
} else
ctx->ht.is_40mhz = false;

Просмотреть файл

@ -91,7 +91,10 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
tx_cmd->tid_tspec = qc[0] & 0xf;
tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
} else {
tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
else
tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
}
iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);

Просмотреть файл

@ -2850,6 +2850,9 @@ static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw,
int ret;
u8 sta_id;
if (ctx->ctxid != IWL_RXON_CTX_PAN)
return 0;
IWL_DEBUG_MAC80211(priv, "enter\n");
mutex_lock(&priv->shrd->mutex);
@ -2898,6 +2901,9 @@ static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw,
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
struct iwl_rxon_context *ctx = vif_priv->ctx;
if (ctx->ctxid != IWL_RXON_CTX_PAN)
return;
IWL_DEBUG_MAC80211(priv, "enter\n");
mutex_lock(&priv->shrd->mutex);

Просмотреть файл

@ -1197,9 +1197,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
/* Set up entry for this TFD in Tx byte-count array */
if (is_agg)
iwl_trans_txq_update_byte_cnt_tbl(trans, txq,
le16_to_cpu(tx_cmd->len));
iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen,
DMA_BIDIRECTIONAL);

Просмотреть файл

@ -939,7 +939,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
{
struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
unsigned long cmd_flags;
unsigned long cmd_pending_q_flags;
unsigned long scan_pending_q_flags;
uint16_t cancel_scan_cmd = false;
@ -949,12 +948,9 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
cmd_node = adapter->curr_cmd;
cmd_node->wait_q_enabled = false;
cmd_node->cmd_flag |= CMD_F_CANCELED;
spin_lock_irqsave(&adapter->cmd_pending_q_lock,
cmd_pending_q_flags);
list_del(&cmd_node->list);
spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
cmd_pending_q_flags);
mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
mwifiex_complete_cmd(adapter, adapter->curr_cmd);
adapter->curr_cmd = NULL;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
}
@ -981,7 +977,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
}
adapter->cmd_wait_q.status = -1;
mwifiex_complete_cmd(adapter, adapter->curr_cmd);
}
/*

Просмотреть файл

@ -207,6 +207,7 @@ extern struct flow_cache_object *flow_cache_lookup(
u8 dir, flow_resolve_t resolver, void *ctx);
extern void flow_cache_flush(void);
extern void flow_cache_flush_deferred(void);
extern atomic_t flow_cache_genid;
#endif

Просмотреть файл

@ -241,6 +241,9 @@ extern struct sctp_globals {
* bits is an indicator of when to send and window update SACK.
*/
int rwnd_update_shift;
/* Threshold for autoclose timeout, in seconds. */
unsigned long max_autoclose;
} sctp_globals;
#define sctp_rto_initial (sctp_globals.rto_initial)
@ -281,6 +284,7 @@ extern struct sctp_globals {
#define sctp_auth_enable (sctp_globals.auth_enable)
#define sctp_checksum_disable (sctp_globals.checksum_disable)
#define sctp_rwnd_upd_shift (sctp_globals.rwnd_update_shift)
#define sctp_max_autoclose (sctp_globals.max_autoclose)
/* SCTP Socket type: UDP or TCP style. */
typedef enum {

Просмотреть файл

@ -673,7 +673,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
goto encrypt;
auth:
if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
return 0;
if (!hci_conn_auth(conn, sec_level, auth_type))

Просмотреть файл

@ -2152,7 +2152,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
void *ptr = req->data;
int type, olen;
unsigned long val;
struct l2cap_conf_rfc rfc;
struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
@ -2271,6 +2271,16 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
}
}
/* Use sane default values in case a misbehaving remote device
* did not send an RFC option.
*/
rfc.mode = chan->mode;
rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
rfc.max_pdu_size = cpu_to_le16(chan->imtu);
BT_ERR("Expected RFC option was not found, using defaults");
done:
switch (rfc.mode) {
case L2CAP_MODE_ERTM:

Просмотреть файл

@ -1146,6 +1146,7 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
if (list_empty(&s->dlcs)) {
s->state = BT_DISCONN;
rfcomm_send_disc(s, 0);
rfcomm_session_clear_timer(s);
}
break;

Просмотреть файл

@ -358,6 +358,18 @@ void flow_cache_flush(void)
put_online_cpus();
}
static void flow_cache_flush_task(struct work_struct *work)
{
flow_cache_flush();
}
static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task);
void flow_cache_flush_deferred(void)
{
schedule_work(&flow_cache_flush_work);
}
static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
{
struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);

Просмотреть файл

@ -253,6 +253,10 @@ static int __init ic_open_devs(void)
}
}
/* no point in waiting if we could not bring up at least one device */
if (!ic_first_dev)
goto have_carrier;
/* wait for a carrier on at least one device */
start = jiffies;
while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) {

Просмотреть файл

@ -120,6 +120,7 @@
static int ip_rt_max_size;
static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
static int ip_rt_gc_interval __read_mostly = 60 * HZ;
static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
static int ip_rt_redirect_number __read_mostly = 9;
static int ip_rt_redirect_load __read_mostly = HZ / 50;
@ -133,6 +134,9 @@ static int ip_rt_min_advmss __read_mostly = 256;
static int rt_chain_length_max __read_mostly = 20;
static int redirect_genid;
static struct delayed_work expires_work;
static unsigned long expires_ljiffies;
/*
* Interface to generic destination cache.
*/
@ -830,6 +834,97 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
return ONE;
}
static void rt_check_expire(void)
{
static unsigned int rover;
unsigned int i = rover, goal;
struct rtable *rth;
struct rtable __rcu **rthp;
unsigned long samples = 0;
unsigned long sum = 0, sum2 = 0;
unsigned long delta;
u64 mult;
delta = jiffies - expires_ljiffies;
expires_ljiffies = jiffies;
mult = ((u64)delta) << rt_hash_log;
if (ip_rt_gc_timeout > 1)
do_div(mult, ip_rt_gc_timeout);
goal = (unsigned int)mult;
if (goal > rt_hash_mask)
goal = rt_hash_mask + 1;
for (; goal > 0; goal--) {
unsigned long tmo = ip_rt_gc_timeout;
unsigned long length;
i = (i + 1) & rt_hash_mask;
rthp = &rt_hash_table[i].chain;
if (need_resched())
cond_resched();
samples++;
if (rcu_dereference_raw(*rthp) == NULL)
continue;
length = 0;
spin_lock_bh(rt_hash_lock_addr(i));
while ((rth = rcu_dereference_protected(*rthp,
lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
prefetch(rth->dst.rt_next);
if (rt_is_expired(rth)) {
*rthp = rth->dst.rt_next;
rt_free(rth);
continue;
}
if (rth->dst.expires) {
/* Entry is expired even if it is in use */
if (time_before_eq(jiffies, rth->dst.expires)) {
nofree:
tmo >>= 1;
rthp = &rth->dst.rt_next;
/*
* We only count entries on
* a chain with equal hash inputs once
* so that entries for different QOS
* levels, and other non-hash input
* attributes don't unfairly skew
* the length computation
*/
length += has_noalias(rt_hash_table[i].chain, rth);
continue;
}
} else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
goto nofree;
/* Cleanup aged off entries. */
*rthp = rth->dst.rt_next;
rt_free(rth);
}
spin_unlock_bh(rt_hash_lock_addr(i));
sum += length;
sum2 += length*length;
}
if (samples) {
unsigned long avg = sum / samples;
unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
rt_chain_length_max = max_t(unsigned long,
ip_rt_gc_elasticity,
(avg + 4*sd) >> FRACT_BITS);
}
rover = i;
}
/*
* rt_worker_func() is run in process context.
* we call rt_check_expire() to scan part of the hash table
*/
static void rt_worker_func(struct work_struct *work)
{
rt_check_expire();
schedule_delayed_work(&expires_work, ip_rt_gc_interval);
}
/*
* Perturbation of rt_genid by a small quantity [1..256]
* Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
@ -3178,6 +3273,13 @@ static ctl_table ipv4_route_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "gc_interval",
.data = &ip_rt_gc_interval,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "redirect_load",
.data = &ip_rt_redirect_load,
@ -3388,6 +3490,11 @@ int __init ip_rt_init(void)
devinet_init();
ip_fib_init();
INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
expires_ljiffies = jiffies;
schedule_delayed_work(&expires_work,
net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
if (ip_rt_proc_init())
printk(KERN_ERR "Unable to create route proc files\n");
#ifdef CONFIG_XFRM

Просмотреть файл

@ -833,15 +833,15 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
copied += used;
len -= used;
/* For non stream protcols we get one packet per recvmsg call */
if (sk->sk_type != SOCK_STREAM)
goto copy_uaddr;
if (!(flags & MSG_PEEK)) {
sk_eat_skb(sk, skb, 0);
*seq = 0;
}
/* For non stream protcols we get one packet per recvmsg call */
if (sk->sk_type != SOCK_STREAM)
goto copy_uaddr;
/* Partial read */
if (used + offset < skb->len)
continue;
@ -857,6 +857,12 @@ copy_uaddr:
}
if (llc_sk(sk)->cmsg_flags)
llc_cmsg_rcv(msg, skb);
if (!(flags & MSG_PEEK)) {
sk_eat_skb(sk, skb, 0);
*seq = 0;
}
goto out;
}

Просмотреть файл

@ -69,7 +69,7 @@ static int __nci_request(struct nci_dev *ndev,
__u32 timeout)
{
int rc = 0;
unsigned long completion_rc;
long completion_rc;
ndev->req_status = NCI_REQ_PEND;

Просмотреть файл

@ -173,7 +173,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
(unsigned long)sp->autoclose * HZ;
min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ;
/* Initializes the timers */
for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)

Просмотреть файл

@ -697,13 +697,7 @@ static void sctp_packet_append_data(struct sctp_packet *packet,
/* Keep track of how many bytes are in flight to the receiver. */
asoc->outqueue.outstanding_bytes += datasize;
/* Update our view of the receiver's rwnd. Include sk_buff overhead
* while updating peer.rwnd so that it reduces the chances of a
* receiver running out of receive buffer space even when receive
* window is still open. This can happen when a sender is sending
* sending small messages.
*/
datasize += sizeof(struct sk_buff);
/* Update our view of the receiver's rwnd. */
if (datasize < rwnd)
rwnd -= datasize;
else

Просмотреть файл

@ -411,8 +411,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
chunk->transport->flight_size -=
sctp_data_size(chunk);
q->outstanding_bytes -= sctp_data_size(chunk);
q->asoc->peer.rwnd += (sctp_data_size(chunk) +
sizeof(struct sk_buff));
q->asoc->peer.rwnd += sctp_data_size(chunk);
}
continue;
}
@ -432,8 +431,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
* (Section 7.2.4)), add the data size of those
* chunks to the rwnd.
*/
q->asoc->peer.rwnd += (sctp_data_size(chunk) +
sizeof(struct sk_buff));
q->asoc->peer.rwnd += sctp_data_size(chunk);
q->outstanding_bytes -= sctp_data_size(chunk);
if (chunk->transport)
transport->flight_size -= sctp_data_size(chunk);

Просмотреть файл

@ -1285,6 +1285,9 @@ SCTP_STATIC __init int sctp_init(void)
sctp_max_instreams = SCTP_DEFAULT_INSTREAMS;
sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS;
/* Initialize maximum autoclose timeout. */
sctp_max_autoclose = INT_MAX / HZ;
/* Initialize handle used for association ids. */
idr_init(&sctp_assocs_id);

Просмотреть файл

@ -2200,8 +2200,6 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
return -EINVAL;
if (copy_from_user(&sp->autoclose, optval, optlen))
return -EFAULT;
/* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */
sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ);
return 0;
}

Просмотреть файл

@ -53,6 +53,10 @@ static int sack_timer_min = 1;
static int sack_timer_max = 500;
static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
static int rwnd_scale_max = 16;
static unsigned long max_autoclose_min = 0;
static unsigned long max_autoclose_max =
(MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX)
? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ;
extern long sysctl_sctp_mem[3];
extern int sysctl_sctp_rmem[3];
@ -258,6 +262,15 @@ static ctl_table sctp_table[] = {
.extra1 = &one,
.extra2 = &rwnd_scale_max,
},
{
.procname = "max_autoclose",
.data = &sctp_max_autoclose,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = &proc_doulongvec_minmax,
.extra1 = &max_autoclose_min,
.extra2 = &max_autoclose_max,
},
{ /* sentinel */ }
};

Просмотреть файл

@ -2276,8 +2276,6 @@ static void __xfrm_garbage_collect(struct net *net)
{
struct dst_entry *head, *next;
flow_cache_flush();
spin_lock_bh(&xfrm_policy_sk_bundle_lock);
head = xfrm_policy_sk_bundles;
xfrm_policy_sk_bundles = NULL;
@ -2290,6 +2288,18 @@ static void __xfrm_garbage_collect(struct net *net)
}
}
static void xfrm_garbage_collect(struct net *net)
{
flow_cache_flush();
__xfrm_garbage_collect(net);
}
static void xfrm_garbage_collect_deferred(struct net *net)
{
flow_cache_flush_deferred();
__xfrm_garbage_collect(net);
}
static void xfrm_init_pmtu(struct dst_entry *dst)
{
do {
@ -2422,7 +2432,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
if (likely(dst_ops->neigh_lookup == NULL))
dst_ops->neigh_lookup = xfrm_neigh_lookup;
if (likely(afinfo->garbage_collect == NULL))
afinfo->garbage_collect = __xfrm_garbage_collect;
afinfo->garbage_collect = xfrm_garbage_collect_deferred;
xfrm_policy_afinfo[afinfo->family] = afinfo;
}
write_unlock_bh(&xfrm_policy_afinfo_lock);
@ -2516,7 +2526,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void
switch (event) {
case NETDEV_DOWN:
__xfrm_garbage_collect(dev_net(dev));
xfrm_garbage_collect(dev_net(dev));
}
return NOTIFY_DONE;
}