Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (28 commits) drivers/isdn/hardware/mISDN: Use GFP_ATOMIC when a lock is held ksz884x: Add missing validate_addr hook ksz884x: convert to netdev_tx_t virtio-net: pass gfp to add_buf be2net: convert hdr.timeout in be_cmd_loopback_test() to le32 can: mpc5xxx_can.c: Fix build failure net/ipv4/tcp_input.c: fix compilation breakage when FASTRETRANS_DEBUG > 1 net: sock_queue_err_skb() dont mess with sk_forward_alloc netfilter: xtables: stackptr should be percpu netfilter: don't xt_jumpstack_alloc twice in xt_register_table greth: Fix build after OF device conversions. net: fix sk_forward_alloc corruptions Phonet: listening socket lock protects the connected socket list caif: unlock on error path in cfserl_receive() be2net: remove superfluous externs be2net: add unlock on error path net/rds: Add missing mutex_unlock drivers/isdn/hardware/mISDN: Add missing spin_unlock fs_enet: Adjust BDs after tx error skb: make skb_recycle_check() return a bool value ...
This commit is contained in:
Коммит
076dab234d
|
@ -97,8 +97,10 @@ static int write_reg(struct hfcsusb *hw, __u8 reg, __u8 val)
|
|||
hw->name, __func__, reg, val);
|
||||
|
||||
spin_lock(&hw->ctrl_lock);
|
||||
if (hw->ctrl_cnt >= HFC_CTRL_BUFSIZE)
|
||||
if (hw->ctrl_cnt >= HFC_CTRL_BUFSIZE) {
|
||||
spin_unlock(&hw->ctrl_lock);
|
||||
return 1;
|
||||
}
|
||||
buf = &hw->ctrl_buff[hw->ctrl_in_idx];
|
||||
buf->hfcs_reg = reg;
|
||||
buf->reg_val = val;
|
||||
|
|
|
@ -320,12 +320,12 @@ inittiger(struct tiger_hw *card)
|
|||
return -ENOMEM;
|
||||
}
|
||||
for (i = 0; i < 2; i++) {
|
||||
card->bc[i].hsbuf = kmalloc(NJ_DMA_TXSIZE, GFP_KERNEL);
|
||||
card->bc[i].hsbuf = kmalloc(NJ_DMA_TXSIZE, GFP_ATOMIC);
|
||||
if (!card->bc[i].hsbuf) {
|
||||
pr_info("%s: no B%d send buffer\n", card->name, i + 1);
|
||||
return -ENOMEM;
|
||||
}
|
||||
card->bc[i].hrbuf = kmalloc(NJ_DMA_RXSIZE, GFP_KERNEL);
|
||||
card->bc[i].hrbuf = kmalloc(NJ_DMA_RXSIZE, GFP_ATOMIC);
|
||||
if (!card->bc[i].hrbuf) {
|
||||
pr_info("%s: no B%d recv buffer\n", card->name, i + 1);
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -1429,7 +1429,7 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
|
|||
wrb = wrb_from_mccq(adapter);
|
||||
if (!wrb) {
|
||||
status = -EBUSY;
|
||||
goto err;
|
||||
goto err_unlock;
|
||||
}
|
||||
req = cmd->va;
|
||||
sge = nonembedded_sgl(wrb);
|
||||
|
@ -1457,7 +1457,10 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
|
|||
else
|
||||
status = adapter->flash_status;
|
||||
|
||||
err:
|
||||
return status;
|
||||
|
||||
err_unlock:
|
||||
spin_unlock_bh(&adapter->mcc_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -1497,7 +1500,7 @@ err:
|
|||
return status;
|
||||
}
|
||||
|
||||
extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
|
||||
int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
|
||||
struct be_dma_mem *nonemb_cmd)
|
||||
{
|
||||
struct be_mcc_wrb *wrb;
|
||||
|
@ -1590,7 +1593,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
|
|||
|
||||
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
|
||||
OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
|
||||
req->hdr.timeout = 4;
|
||||
req->hdr.timeout = cpu_to_le32(4);
|
||||
|
||||
req->pattern = cpu_to_le64(pattern);
|
||||
req->src_port = cpu_to_le32(port_num);
|
||||
|
@ -1662,7 +1665,7 @@ err:
|
|||
return status;
|
||||
}
|
||||
|
||||
extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
|
||||
int be_cmd_get_seeprom_data(struct be_adapter *adapter,
|
||||
struct be_dma_mem *nonemb_cmd)
|
||||
{
|
||||
struct be_mcc_wrb *wrb;
|
||||
|
|
|
@ -73,7 +73,7 @@ static u32 __devinit mpc52xx_can_get_clock(struct of_device *ofdev,
|
|||
else
|
||||
*mscan_clksrc = MSCAN_CLKSRC_XTAL;
|
||||
|
||||
freq = mpc5xxx_get_bus_frequency(ofdev->node);
|
||||
freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node);
|
||||
if (!freq)
|
||||
return 0;
|
||||
|
||||
|
@ -152,7 +152,7 @@ static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
|
|||
}
|
||||
|
||||
/* Determine the MSCAN device index from the physical address */
|
||||
pval = of_get_property(ofdev->node, "reg", &plen);
|
||||
pval = of_get_property(ofdev->dev.of_node, "reg", &plen);
|
||||
BUG_ON(!pval || plen < sizeof(*pval));
|
||||
clockidx = (*pval & 0x80) ? 1 : 0;
|
||||
if (*pval & 0x2000)
|
||||
|
@ -168,11 +168,11 @@ static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
|
|||
*/
|
||||
if (clock_name && !strcmp(clock_name, "ip")) {
|
||||
*mscan_clksrc = MSCAN_CLKSRC_IPS;
|
||||
freq = mpc5xxx_get_bus_frequency(ofdev->node);
|
||||
freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node);
|
||||
} else {
|
||||
*mscan_clksrc = MSCAN_CLKSRC_BUS;
|
||||
|
||||
pval = of_get_property(ofdev->node,
|
||||
pval = of_get_property(ofdev->dev.of_node,
|
||||
"fsl,mscan-clock-divider", &plen);
|
||||
if (pval && plen == sizeof(*pval))
|
||||
clockdiv = *pval;
|
||||
|
@ -251,7 +251,7 @@ static int __devinit mpc5xxx_can_probe(struct of_device *ofdev,
|
|||
const struct of_device_id *id)
|
||||
{
|
||||
struct mpc5xxx_can_data *data = (struct mpc5xxx_can_data *)id->data;
|
||||
struct device_node *np = ofdev->node;
|
||||
struct device_node *np = ofdev->dev.of_node;
|
||||
struct net_device *dev;
|
||||
struct mscan_priv *priv;
|
||||
void __iomem *base;
|
||||
|
|
|
@ -504,17 +504,54 @@ static int get_regs_len(struct net_device *dev)
|
|||
}
|
||||
|
||||
/* Some transmit errors cause the transmitter to shut
|
||||
* down. We now issue a restart transmit. Since the
|
||||
* errors close the BD and update the pointers, the restart
|
||||
* _should_ pick up without having to reset any of our
|
||||
* pointers either. Also, To workaround 8260 device erratum
|
||||
* CPM37, we must disable and then re-enable the transmitter
|
||||
* following a Late Collision, Underrun, or Retry Limit error.
|
||||
* down. We now issue a restart transmit.
|
||||
* Also, to workaround 8260 device erratum CPM37, we must
|
||||
* disable and then re-enable the transmitterfollowing a
|
||||
* Late Collision, Underrun, or Retry Limit error.
|
||||
* In addition, tbptr may point beyond BDs beyond still marked
|
||||
* as ready due to internal pipelining, so we need to look back
|
||||
* through the BDs and adjust tbptr to point to the last BD
|
||||
* marked as ready. This may result in some buffers being
|
||||
* retransmitted.
|
||||
*/
|
||||
static void tx_restart(struct net_device *dev)
|
||||
{
|
||||
struct fs_enet_private *fep = netdev_priv(dev);
|
||||
fcc_t __iomem *fccp = fep->fcc.fccp;
|
||||
const struct fs_platform_info *fpi = fep->fpi;
|
||||
fcc_enet_t __iomem *ep = fep->fcc.ep;
|
||||
cbd_t __iomem *curr_tbptr;
|
||||
cbd_t __iomem *recheck_bd;
|
||||
cbd_t __iomem *prev_bd;
|
||||
cbd_t __iomem *last_tx_bd;
|
||||
|
||||
last_tx_bd = fep->tx_bd_base + (fpi->tx_ring * sizeof(cbd_t));
|
||||
|
||||
/* get the current bd held in TBPTR and scan back from this point */
|
||||
recheck_bd = curr_tbptr = (cbd_t __iomem *)
|
||||
((R32(ep, fen_genfcc.fcc_tbptr) - fep->ring_mem_addr) +
|
||||
fep->ring_base);
|
||||
|
||||
prev_bd = (recheck_bd == fep->tx_bd_base) ? last_tx_bd : recheck_bd - 1;
|
||||
|
||||
/* Move through the bds in reverse, look for the earliest buffer
|
||||
* that is not ready. Adjust TBPTR to the following buffer */
|
||||
while ((CBDR_SC(prev_bd) & BD_ENET_TX_READY) != 0) {
|
||||
/* Go back one buffer */
|
||||
recheck_bd = prev_bd;
|
||||
|
||||
/* update the previous buffer */
|
||||
prev_bd = (prev_bd == fep->tx_bd_base) ? last_tx_bd : prev_bd - 1;
|
||||
|
||||
/* We should never see all bds marked as ready, check anyway */
|
||||
if (recheck_bd == curr_tbptr)
|
||||
break;
|
||||
}
|
||||
/* Now update the TBPTR and dirty flag to the current buffer */
|
||||
W32(ep, fen_genfcc.fcc_tbptr,
|
||||
(uint) (((void *)recheck_bd - fep->ring_base) +
|
||||
fep->ring_mem_addr));
|
||||
fep->dirty_tx = recheck_bd;
|
||||
|
||||
C32(fccp, fcc_gfmr, FCC_GFMR_ENT);
|
||||
udelay(10);
|
||||
|
|
|
@ -1607,14 +1607,13 @@ static struct of_device_id greth_of_match[] = {
|
|||
MODULE_DEVICE_TABLE(of, greth_of_match);
|
||||
|
||||
static struct of_platform_driver greth_of_driver = {
|
||||
.name = "grlib-greth",
|
||||
.match_table = greth_of_match,
|
||||
.driver = {
|
||||
.name = "grlib-greth",
|
||||
.owner = THIS_MODULE,
|
||||
.of_match_table = greth_of_match,
|
||||
},
|
||||
.probe = greth_of_probe,
|
||||
.remove = __devexit_p(greth_of_remove),
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "grlib-greth",
|
||||
},
|
||||
};
|
||||
|
||||
static int __init greth_init(void)
|
||||
|
|
|
@ -4854,7 +4854,7 @@ static inline void copy_old_skb(struct sk_buff *old, struct sk_buff *skb)
|
|||
*
|
||||
* Return 0 if successful; otherwise an error code indicating failure.
|
||||
*/
|
||||
static int netdev_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct dev_priv *priv = netdev_priv(dev);
|
||||
struct dev_info *hw_priv = priv->adapter;
|
||||
|
@ -6863,6 +6863,7 @@ static const struct net_device_ops netdev_ops = {
|
|||
.ndo_tx_timeout = netdev_tx_timeout,
|
||||
.ndo_change_mtu = netdev_change_mtu,
|
||||
.ndo_set_mac_address = netdev_set_mac_address,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_do_ioctl = netdev_ioctl,
|
||||
.ndo_set_rx_mode = netdev_set_rx_mode,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
|
|
|
@ -340,7 +340,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
|
|||
|
||||
skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len);
|
||||
|
||||
err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 2, skb);
|
||||
err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 2, skb, gfp);
|
||||
if (err < 0)
|
||||
dev_kfree_skb(skb);
|
||||
|
||||
|
@ -385,8 +385,8 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
|
|||
|
||||
/* chain first in list head */
|
||||
first->private = (unsigned long)list;
|
||||
err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
|
||||
first);
|
||||
err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
|
||||
first, gfp);
|
||||
if (err < 0)
|
||||
give_pages(vi, first);
|
||||
|
||||
|
@ -404,7 +404,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
|
|||
|
||||
sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE);
|
||||
|
||||
err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 1, page);
|
||||
err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 1, page, gfp);
|
||||
if (err < 0)
|
||||
give_pages(vi, page);
|
||||
|
||||
|
|
|
@ -739,17 +739,27 @@ err_out:
|
|||
static void ar9170_usb_firmware_failed(struct ar9170_usb *aru)
|
||||
{
|
||||
struct device *parent = aru->udev->dev.parent;
|
||||
struct usb_device *udev;
|
||||
|
||||
/*
|
||||
* Store a copy of the usb_device pointer locally.
|
||||
* This is because device_release_driver initiates
|
||||
* ar9170_usb_disconnect, which in turn frees our
|
||||
* driver context (aru).
|
||||
*/
|
||||
udev = aru->udev;
|
||||
|
||||
complete(&aru->firmware_loading_complete);
|
||||
|
||||
/* unbind anything failed */
|
||||
if (parent)
|
||||
device_lock(parent);
|
||||
device_release_driver(&aru->udev->dev);
|
||||
|
||||
device_release_driver(&udev->dev);
|
||||
if (parent)
|
||||
device_unlock(parent);
|
||||
|
||||
usb_put_dev(aru->udev);
|
||||
usb_put_dev(udev);
|
||||
}
|
||||
|
||||
static void ar9170_usb_firmware_finish(const struct firmware *fw, void *context)
|
||||
|
|
|
@ -1198,7 +1198,7 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
|
|||
int r;
|
||||
|
||||
ath_print(common, ATH_DBG_FATAL,
|
||||
"Unable to stop TxDMA. Reset HAL!\n");
|
||||
"Failed to stop TX DMA. Resetting hardware!\n");
|
||||
|
||||
spin_lock_bh(&sc->sc_resetlock);
|
||||
r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
|
||||
|
@ -1728,6 +1728,8 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
|
|||
} else
|
||||
bf->bf_isnullfunc = false;
|
||||
|
||||
bf->bf_tx_aborted = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1989,7 +1991,7 @@ static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
|
|||
int nbad = 0;
|
||||
int isaggr = 0;
|
||||
|
||||
if (bf->bf_tx_aborted)
|
||||
if (bf->bf_lastbf->bf_tx_aborted)
|
||||
return 0;
|
||||
|
||||
isaggr = bf_isaggr(bf);
|
||||
|
|
|
@ -329,9 +329,8 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
|
|||
/* create the exported radio header */
|
||||
|
||||
/* radiotap header */
|
||||
radiotap_hdr.hdr.it_version = 0;
|
||||
/* XXX must check this value for pad */
|
||||
radiotap_hdr.hdr.it_pad = 0;
|
||||
memset(&radiotap_hdr, 0, sizeof(radiotap_hdr));
|
||||
/* XXX must check radiotap_hdr.hdr.it_pad for pad */
|
||||
radiotap_hdr.hdr.it_len = cpu_to_le16 (sizeof(struct rx_radiotap_hdr));
|
||||
radiotap_hdr.hdr.it_present = cpu_to_le32 (RX_RADIOTAP_PRESENT);
|
||||
radiotap_hdr.rate = convert_mv_rate_to_radiotap(prxpd->rx_rate);
|
||||
|
|
|
@ -413,7 +413,7 @@ static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
|
|||
*/
|
||||
rt2x00_desc_read(txi, 0, &word);
|
||||
rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN,
|
||||
skb->len - TXINFO_DESC_SIZE);
|
||||
skb->len + TXWI_DESC_SIZE);
|
||||
rt2x00_set_field32(&word, TXINFO_W0_WIV,
|
||||
!test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
|
||||
rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2);
|
||||
|
|
|
@ -625,9 +625,12 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
|
|||
ssb_printk(KERN_ERR PFX "No SPROM available!\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
bus->sprom_offset = (bus->chipco.dev->id.revision < 31) ?
|
||||
SSB_SPROM_BASE1 : SSB_SPROM_BASE31;
|
||||
if (bus->chipco.dev) { /* can be unavailible! */
|
||||
bus->sprom_offset = (bus->chipco.dev->id.revision < 31) ?
|
||||
SSB_SPROM_BASE1 : SSB_SPROM_BASE31;
|
||||
} else {
|
||||
bus->sprom_offset = SSB_SPROM_BASE1;
|
||||
}
|
||||
|
||||
buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL);
|
||||
if (!buf)
|
||||
|
|
|
@ -185,6 +185,7 @@ bool ssb_is_sprom_available(struct ssb_bus *bus)
|
|||
/* this routine differs from specs as we do not access SPROM directly
|
||||
on PCMCIA */
|
||||
if (bus->bustype == SSB_BUSTYPE_PCI &&
|
||||
bus->chipco.dev && /* can be unavailible! */
|
||||
bus->chipco.dev->id.revision >= 31)
|
||||
return bus->chipco.capabilities & SSB_CHIPCO_CAP_SPROM;
|
||||
|
||||
|
|
|
@ -397,7 +397,7 @@ struct xt_table_info {
|
|||
* @stacksize jumps (number of user chains) can possibly be made.
|
||||
*/
|
||||
unsigned int stacksize;
|
||||
unsigned int *stackptr;
|
||||
unsigned int __percpu *stackptr;
|
||||
void ***jumpstack;
|
||||
/* ipt_entry tables: one per CPU */
|
||||
/* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */
|
||||
|
|
|
@ -501,7 +501,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
|
|||
return __alloc_skb(size, priority, 1, -1);
|
||||
}
|
||||
|
||||
extern int skb_recycle_check(struct sk_buff *skb, int skb_size);
|
||||
extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
|
||||
|
||||
extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
|
||||
extern struct sk_buff *skb_clone(struct sk_buff *skb,
|
||||
|
|
|
@ -1524,20 +1524,7 @@ extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
|
|||
|
||||
extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
|
||||
|
||||
static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
|
||||
number of warnings when compiling with -W --ANK
|
||||
*/
|
||||
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
|
||||
(unsigned)sk->sk_rcvbuf)
|
||||
return -ENOMEM;
|
||||
skb_set_owner_r(skb, sk);
|
||||
skb_queue_tail(&sk->sk_error_queue, skb);
|
||||
if (!sock_flag(sk, SOCK_DEAD))
|
||||
sk->sk_data_ready(sk, skb->len);
|
||||
return 0;
|
||||
}
|
||||
extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
|
||||
|
||||
/*
|
||||
* Recover an error report and clear atomically
|
||||
|
|
|
@ -59,16 +59,18 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
|
|||
u8 stx = CFSERL_STX;
|
||||
int ret;
|
||||
u16 expectlen = 0;
|
||||
|
||||
caif_assert(newpkt != NULL);
|
||||
spin_lock(&layr->sync);
|
||||
|
||||
if (layr->incomplete_frm != NULL) {
|
||||
|
||||
layr->incomplete_frm =
|
||||
cfpkt_append(layr->incomplete_frm, newpkt, expectlen);
|
||||
pkt = layr->incomplete_frm;
|
||||
if (pkt == NULL)
|
||||
if (pkt == NULL) {
|
||||
spin_unlock(&layr->sync);
|
||||
return -ENOMEM;
|
||||
}
|
||||
} else {
|
||||
pkt = newpkt;
|
||||
}
|
||||
|
|
|
@ -482,22 +482,22 @@ EXPORT_SYMBOL(consume_skb);
|
|||
* reference count dropping and cleans up the skbuff as if it
|
||||
* just came from __alloc_skb().
|
||||
*/
|
||||
int skb_recycle_check(struct sk_buff *skb, int skb_size)
|
||||
bool skb_recycle_check(struct sk_buff *skb, int skb_size)
|
||||
{
|
||||
struct skb_shared_info *shinfo;
|
||||
|
||||
if (irqs_disabled())
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
|
||||
if (skb_end_pointer(skb) - skb->head < skb_size)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
if (skb_shared(skb) || skb_cloned(skb))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
skb_release_head_state(skb);
|
||||
|
||||
|
@ -509,7 +509,7 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size)
|
|||
skb->data = skb->head + NET_SKB_PAD;
|
||||
skb_reset_tail_pointer(skb);
|
||||
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(skb_recycle_check);
|
||||
|
||||
|
@ -2965,6 +2965,34 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(skb_cow_data);
|
||||
|
||||
static void sock_rmem_free(struct sk_buff *skb)
|
||||
{
|
||||
struct sock *sk = skb->sk;
|
||||
|
||||
atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: We dont mem charge error packets (no sk_forward_alloc changes)
|
||||
*/
|
||||
int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
|
||||
(unsigned)sk->sk_rcvbuf)
|
||||
return -ENOMEM;
|
||||
|
||||
skb_orphan(skb);
|
||||
skb->sk = sk;
|
||||
skb->destructor = sock_rmem_free;
|
||||
atomic_add(skb->truesize, &sk->sk_rmem_alloc);
|
||||
|
||||
skb_queue_tail(&sk->sk_error_queue, skb);
|
||||
if (!sock_flag(sk, SOCK_DEAD))
|
||||
sk->sk_data_ready(sk, skb->len);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(sock_queue_err_skb);
|
||||
|
||||
void skb_tstamp_tx(struct sk_buff *orig_skb,
|
||||
struct skb_shared_hwtstamps *hwtstamps)
|
||||
{
|
||||
|
@ -2996,7 +3024,9 @@ void skb_tstamp_tx(struct sk_buff *orig_skb,
|
|||
memset(serr, 0, sizeof(*serr));
|
||||
serr->ee.ee_errno = ENOMSG;
|
||||
serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
|
||||
|
||||
err = sock_queue_err_skb(sk, skb);
|
||||
|
||||
if (err)
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
|
|
@ -336,7 +336,7 @@ ipt_do_table(struct sk_buff *skb,
|
|||
cpu = smp_processor_id();
|
||||
table_base = private->entries[cpu];
|
||||
jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
|
||||
stackptr = &private->stackptr[cpu];
|
||||
stackptr = per_cpu_ptr(private->stackptr, cpu);
|
||||
origptr = *stackptr;
|
||||
|
||||
e = get_entry(table_base, private->hook_entry[hook]);
|
||||
|
|
|
@ -2639,7 +2639,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
|
|||
if (sk->sk_family == AF_INET) {
|
||||
printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n",
|
||||
msg,
|
||||
&inet->daddr, ntohs(inet->dport),
|
||||
&inet->inet_daddr, ntohs(inet->inet_dport),
|
||||
tp->snd_cwnd, tcp_left_out(tp),
|
||||
tp->snd_ssthresh, tp->prior_ssthresh,
|
||||
tp->packets_out);
|
||||
|
@ -2649,7 +2649,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
|
|||
struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
|
||||
msg,
|
||||
&np->daddr, ntohs(inet->dport),
|
||||
&np->daddr, ntohs(inet->inet_dport),
|
||||
tp->snd_cwnd, tcp_left_out(tp),
|
||||
tp->snd_ssthresh, tp->prior_ssthresh,
|
||||
tp->packets_out);
|
||||
|
|
|
@ -633,9 +633,9 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
|
|||
if (!inet->recverr) {
|
||||
if (!harderr || sk->sk_state != TCP_ESTABLISHED)
|
||||
goto out;
|
||||
} else {
|
||||
} else
|
||||
ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
|
||||
}
|
||||
|
||||
sk->sk_err = err;
|
||||
sk->sk_error_report(sk);
|
||||
out:
|
||||
|
|
|
@ -363,7 +363,7 @@ ip6t_do_table(struct sk_buff *skb,
|
|||
cpu = smp_processor_id();
|
||||
table_base = private->entries[cpu];
|
||||
jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
|
||||
stackptr = &private->stackptr[cpu];
|
||||
stackptr = per_cpu_ptr(private->stackptr, cpu);
|
||||
origptr = *stackptr;
|
||||
|
||||
e = get_entry(table_base, private->hook_entry[hook]);
|
||||
|
|
|
@ -814,7 +814,7 @@ struct dst_entry * ip6_route_output(struct net *net, struct sock *sk,
|
|||
{
|
||||
int flags = 0;
|
||||
|
||||
if (fl->oif || rt6_need_strict(&fl->fl6_dst))
|
||||
if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl->fl6_dst))
|
||||
flags |= RT6_LOOKUP_F_IFACE;
|
||||
|
||||
if (!ipv6_addr_any(&fl->fl6_src))
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
#include <linux/nl80211.h>
|
||||
#include "ieee80211_i.h"
|
||||
|
||||
enum ieee80211_chan_mode
|
||||
static enum ieee80211_chan_mode
|
||||
__ieee80211_get_channel_mode(struct ieee80211_local *local,
|
||||
struct ieee80211_sub_if_data *ignore)
|
||||
{
|
||||
|
|
|
@ -699,10 +699,8 @@ void xt_free_table_info(struct xt_table_info *info)
|
|||
vfree(info->jumpstack);
|
||||
else
|
||||
kfree(info->jumpstack);
|
||||
if (sizeof(unsigned int) * nr_cpu_ids > PAGE_SIZE)
|
||||
vfree(info->stackptr);
|
||||
else
|
||||
kfree(info->stackptr);
|
||||
|
||||
free_percpu(info->stackptr);
|
||||
|
||||
kfree(info);
|
||||
}
|
||||
|
@ -753,14 +751,9 @@ static int xt_jumpstack_alloc(struct xt_table_info *i)
|
|||
unsigned int size;
|
||||
int cpu;
|
||||
|
||||
size = sizeof(unsigned int) * nr_cpu_ids;
|
||||
if (size > PAGE_SIZE)
|
||||
i->stackptr = vmalloc(size);
|
||||
else
|
||||
i->stackptr = kmalloc(size, GFP_KERNEL);
|
||||
i->stackptr = alloc_percpu(unsigned int);
|
||||
if (i->stackptr == NULL)
|
||||
return -ENOMEM;
|
||||
memset(i->stackptr, 0, size);
|
||||
|
||||
size = sizeof(void **) * nr_cpu_ids;
|
||||
if (size > PAGE_SIZE)
|
||||
|
@ -844,10 +837,6 @@ struct xt_table *xt_register_table(struct net *net,
|
|||
struct xt_table_info *private;
|
||||
struct xt_table *t, *table;
|
||||
|
||||
ret = xt_jumpstack_alloc(newinfo);
|
||||
if (ret < 0)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
/* Don't add one object to multiple lists. */
|
||||
table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
|
||||
if (!table) {
|
||||
|
|
|
@ -1045,12 +1045,12 @@ static void pep_sock_unhash(struct sock *sk)
|
|||
lock_sock(sk);
|
||||
if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) {
|
||||
skparent = pn->listener;
|
||||
sk_del_node_init(sk);
|
||||
release_sock(sk);
|
||||
|
||||
sk = skparent;
|
||||
pn = pep_sk(skparent);
|
||||
lock_sock(sk);
|
||||
lock_sock(skparent);
|
||||
sk_del_node_init(sk);
|
||||
sk = skparent;
|
||||
}
|
||||
/* Unhash a listening sock only when it is closed
|
||||
* and all of its active connected pipes are closed. */
|
||||
|
|
|
@ -475,6 +475,7 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
|
|||
err = rds_ib_setup_qp(conn);
|
||||
if (err) {
|
||||
rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err);
|
||||
mutex_unlock(&conn->c_cm_lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -452,6 +452,7 @@ int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id,
|
|||
err = rds_iw_setup_qp(conn);
|
||||
if (err) {
|
||||
rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", err);
|
||||
mutex_unlock(&conn->c_cm_lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче