Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: net/ipv4/Kconfig net/ipv4/tcp_timer.c
This commit is contained in:
Коммит
21a180cda0
|
@ -255,18 +255,6 @@ static void uml_net_tx_timeout(struct net_device *dev)
|
|||
netif_wake_queue(dev);
|
||||
}
|
||||
|
||||
static int uml_net_set_mac(struct net_device *dev, void *addr)
|
||||
{
|
||||
struct uml_net_private *lp = netdev_priv(dev);
|
||||
struct sockaddr *hwaddr = addr;
|
||||
|
||||
spin_lock_irq(&lp->lock);
|
||||
eth_mac_addr(dev, hwaddr->sa_data);
|
||||
spin_unlock_irq(&lp->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int uml_net_change_mtu(struct net_device *dev, int new_mtu)
|
||||
{
|
||||
dev->mtu = new_mtu;
|
||||
|
@ -373,7 +361,7 @@ static const struct net_device_ops uml_netdev_ops = {
|
|||
.ndo_start_xmit = uml_net_start_xmit,
|
||||
.ndo_set_multicast_list = uml_net_set_multicast_list,
|
||||
.ndo_tx_timeout = uml_net_tx_timeout,
|
||||
.ndo_set_mac_address = uml_net_set_mac,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_change_mtu = uml_net_change_mtu,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
};
|
||||
|
@ -472,7 +460,8 @@ static void eth_configure(int n, void *init, char *mac,
|
|||
((*transport->user->init)(&lp->user, dev) != 0))
|
||||
goto out_unregister;
|
||||
|
||||
eth_mac_addr(dev, device->mac);
|
||||
/* don't use eth_mac_addr, it will not work here */
|
||||
memcpy(dev->dev_addr, device->mac, ETH_ALEN);
|
||||
dev->mtu = transport->user->mtu;
|
||||
dev->netdev_ops = ¨_netdev_ops;
|
||||
dev->ethtool_ops = ¨_net_ethtool_ops;
|
||||
|
|
|
@ -2942,6 +2942,9 @@ static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
|||
{
|
||||
struct vortex_private *vp = netdev_priv(dev);
|
||||
|
||||
if (!VORTEX_PCI(vp))
|
||||
return;
|
||||
|
||||
wol->supported = WAKE_MAGIC;
|
||||
|
||||
wol->wolopts = 0;
|
||||
|
@ -2952,6 +2955,10 @@ static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
|||
static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
||||
{
|
||||
struct vortex_private *vp = netdev_priv(dev);
|
||||
|
||||
if (!VORTEX_PCI(vp))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (wol->wolopts & ~WAKE_MAGIC)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -3201,6 +3208,9 @@ static void acpi_set_WOL(struct net_device *dev)
|
|||
return;
|
||||
}
|
||||
|
||||
if (VORTEX_PCI(vp)->current_state < PCI_D3hot)
|
||||
return;
|
||||
|
||||
/* Change the power state to D3; RxEnable doesn't take effect. */
|
||||
pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot);
|
||||
}
|
||||
|
|
|
@ -2428,7 +2428,7 @@ config UGETH_TX_ON_DEMAND
|
|||
|
||||
config MV643XX_ETH
|
||||
tristate "Marvell Discovery (643XX) and Orion ethernet support"
|
||||
depends on MV64X60 || PPC32 || PLAT_ORION
|
||||
depends on (MV64X60 || PPC32 || PLAT_ORION) && INET
|
||||
select INET_LRO
|
||||
select PHYLIB
|
||||
help
|
||||
|
@ -2815,7 +2815,7 @@ config NIU
|
|||
|
||||
config PASEMI_MAC
|
||||
tristate "PA Semi 1/10Gbit MAC"
|
||||
depends on PPC_PASEMI && PCI
|
||||
depends on PPC_PASEMI && PCI && INET
|
||||
select PHYLIB
|
||||
select INET_LRO
|
||||
help
|
||||
|
|
|
@ -364,9 +364,9 @@ static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
|
|||
|
||||
/* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
|
||||
static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
|
||||
static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
|
||||
static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
|
||||
/* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */
|
||||
static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
|
||||
static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
|
||||
static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
|
||||
|
||||
|
||||
|
@ -1597,12 +1597,15 @@ static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
|
|||
return 0; /* nothing to change */
|
||||
|
||||
de_link_down(de);
|
||||
mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
|
||||
de_stop_rxtx(de);
|
||||
|
||||
de->media_type = new_media;
|
||||
de->media_lock = media_lock;
|
||||
de->media_advertise = ecmd->advertising;
|
||||
de_set_media(de);
|
||||
if (netif_running(de->dev))
|
||||
de_start_rxtx(de);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -858,11 +858,12 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
|
|||
if (r < 0)
|
||||
return r;
|
||||
len -= l;
|
||||
if (!len)
|
||||
if (!len) {
|
||||
if (vq->log_ctx)
|
||||
eventfd_signal(vq->log_ctx, 1);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
if (vq->log_ctx)
|
||||
eventfd_signal(vq->log_ctx, 1);
|
||||
/* Length written exceeds what we have stored. This is a bug. */
|
||||
BUG();
|
||||
return 0;
|
||||
|
|
|
@ -322,7 +322,7 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
|
|||
int offset,
|
||||
unsigned int len, __wsum *csump);
|
||||
|
||||
extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
|
||||
extern long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
|
||||
extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
|
||||
extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
|
||||
int offset, int len);
|
||||
|
|
|
@ -199,6 +199,8 @@ static inline int ip_route_newports(struct rtable **rp, u8 protocol,
|
|||
fl.fl_ip_sport = sport;
|
||||
fl.fl_ip_dport = dport;
|
||||
fl.proto = protocol;
|
||||
if (inet_sk(sk)->transparent)
|
||||
fl.flags |= FLOWI_FLAG_ANYSRC;
|
||||
ip_rt_put(*rp);
|
||||
*rp = NULL;
|
||||
security_sk_classify_flow(sk, &fl);
|
||||
|
|
|
@ -24,8 +24,11 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
|
|||
|
||||
if (vlan_dev)
|
||||
skb->dev = vlan_dev;
|
||||
else if (vlan_id)
|
||||
goto drop;
|
||||
else if (vlan_id) {
|
||||
if (!(skb->dev->flags & IFF_PROMISC))
|
||||
goto drop;
|
||||
skb->pkt_type = PACKET_OTHERHOST;
|
||||
}
|
||||
|
||||
return polling ? netif_receive_skb(skb) : netif_rx(skb);
|
||||
|
||||
|
@ -101,8 +104,11 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
|
|||
|
||||
if (vlan_dev)
|
||||
skb->dev = vlan_dev;
|
||||
else if (vlan_id)
|
||||
goto drop;
|
||||
else if (vlan_id) {
|
||||
if (!(skb->dev->flags & IFF_PROMISC))
|
||||
goto drop;
|
||||
skb->pkt_type = PACKET_OTHERHOST;
|
||||
}
|
||||
|
||||
for (p = napi->gro_list; p; p = p->next) {
|
||||
unsigned long diffs;
|
||||
|
|
|
@ -329,7 +329,8 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
|
|||
|
||||
mutex_lock(&virtio_9p_lock);
|
||||
list_for_each_entry(chan, &virtio_chan_list, chan_list) {
|
||||
if (!strncmp(devname, chan->tag, chan->tag_len)) {
|
||||
if (!strncmp(devname, chan->tag, chan->tag_len) &&
|
||||
strlen(devname) == chan->tag_len) {
|
||||
if (!chan->inuse) {
|
||||
chan->inuse = true;
|
||||
found = 1;
|
||||
|
|
|
@ -35,9 +35,10 @@
|
|||
* in any case.
|
||||
*/
|
||||
|
||||
int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
|
||||
long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
|
||||
{
|
||||
int size, err, ct;
|
||||
int size, ct;
|
||||
long err;
|
||||
|
||||
if (m->msg_namelen) {
|
||||
if (mode == VERIFY_READ) {
|
||||
|
|
|
@ -141,10 +141,10 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
|
|||
|
||||
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
|
||||
sk->sk_write_pending++;
|
||||
sk_wait_event(sk, ¤t_timeo, !sk->sk_err &&
|
||||
!(sk->sk_shutdown & SEND_SHUTDOWN) &&
|
||||
sk_stream_memory_free(sk) &&
|
||||
vm_wait);
|
||||
sk_wait_event(sk, ¤t_timeo, sk->sk_err ||
|
||||
(sk->sk_shutdown & SEND_SHUTDOWN) ||
|
||||
(sk_stream_memory_free(sk) &&
|
||||
!vm_wait));
|
||||
sk->sk_write_pending--;
|
||||
|
||||
if (vm_wait) {
|
||||
|
|
|
@ -223,7 +223,7 @@ config NET_IPGRE_DEMUX
|
|||
|
||||
config NET_IPGRE
|
||||
tristate "IP: GRE tunnels over IP"
|
||||
depends on NET_IPGRE_DEMUX
|
||||
depends on (IPV6 || IPV6=n) && NET_IPGRE_DEMUX
|
||||
help
|
||||
Tunneling means encapsulating data of one protocol type within
|
||||
another protocol and sending it over a channel that understands the
|
||||
|
@ -419,7 +419,7 @@ config INET_XFRM_MODE_BEET
|
|||
If unsure, say Y.
|
||||
|
||||
config INET_LRO
|
||||
bool "Large Receive Offload (ipv4/tcp)"
|
||||
tristate "Large Receive Offload (ipv4/tcp)"
|
||||
default y
|
||||
---help---
|
||||
Support for Large Receive Offload (ipv4/tcp).
|
||||
|
|
|
@ -834,7 +834,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
|
|||
int mark = 0;
|
||||
|
||||
|
||||
if (len == 8 || IGMP_V2_SEEN(in_dev)) {
|
||||
if (len == 8) {
|
||||
if (ih->code == 0) {
|
||||
/* Alas, old v1 router presents here. */
|
||||
|
||||
|
@ -856,6 +856,18 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
|
|||
igmpv3_clear_delrec(in_dev);
|
||||
} else if (len < 12) {
|
||||
return; /* ignore bogus packet; freed by caller */
|
||||
} else if (IGMP_V1_SEEN(in_dev)) {
|
||||
/* This is a v3 query with v1 queriers present */
|
||||
max_delay = IGMP_Query_Response_Interval;
|
||||
group = 0;
|
||||
} else if (IGMP_V2_SEEN(in_dev)) {
|
||||
/* this is a v3 query with v2 queriers present;
|
||||
* Interpretation of the max_delay code is problematic here.
|
||||
* A real v2 host would use ih_code directly, while v3 has a
|
||||
* different encoding. We use the v3 encoding as more likely
|
||||
* to be intended in a v3 query.
|
||||
*/
|
||||
max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
|
||||
} else { /* v3 */
|
||||
if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
|
||||
return;
|
||||
|
|
|
@ -1232,7 +1232,7 @@ restart:
|
|||
}
|
||||
|
||||
if (net_ratelimit())
|
||||
printk(KERN_WARNING "Neighbour table overflow.\n");
|
||||
printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
|
||||
rt_drop(rt);
|
||||
return -ENOBUFS;
|
||||
}
|
||||
|
|
|
@ -943,7 +943,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|||
sg = sk->sk_route_caps & NETIF_F_SG;
|
||||
|
||||
while (--iovlen >= 0) {
|
||||
int seglen = iov->iov_len;
|
||||
size_t seglen = iov->iov_len;
|
||||
unsigned char __user *from = iov->iov_base;
|
||||
|
||||
iov++;
|
||||
|
|
|
@ -2532,7 +2532,8 @@ static void tcp_mark_head_lost(struct sock *sk, int packets)
|
|||
cnt += tcp_skb_pcount(skb);
|
||||
|
||||
if (cnt > packets) {
|
||||
if (tcp_is_sack(tp) || (oldcnt >= packets))
|
||||
if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) ||
|
||||
(oldcnt >= packets))
|
||||
break;
|
||||
|
||||
mss = skb_shinfo(skb)->gso_size;
|
||||
|
|
|
@ -135,13 +135,16 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
|
|||
|
||||
/* This function calculates a "timeout" which is equivalent to the timeout of a
|
||||
* TCP connection after "boundary" unsuccessful, exponentially backed-off
|
||||
* retransmissions with an initial RTO of TCP_RTO_MIN.
|
||||
* retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if
|
||||
* syn_set flag is set.
|
||||
*/
|
||||
static bool retransmits_timed_out(struct sock *sk,
|
||||
unsigned int boundary,
|
||||
unsigned int timeout)
|
||||
unsigned int timeout,
|
||||
bool syn_set)
|
||||
{
|
||||
unsigned int linear_backoff_thresh, start_ts;
|
||||
unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
|
||||
|
||||
if (!inet_csk(sk)->icsk_retransmits)
|
||||
return false;
|
||||
|
@ -152,12 +155,12 @@ static bool retransmits_timed_out(struct sock *sk,
|
|||
start_ts = tcp_sk(sk)->retrans_stamp;
|
||||
|
||||
if (likely(timeout == 0)) {
|
||||
linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN);
|
||||
linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
|
||||
|
||||
if (boundary <= linear_backoff_thresh)
|
||||
timeout = ((2 << boundary) - 1) * TCP_RTO_MIN;
|
||||
timeout = ((2 << boundary) - 1) * rto_base;
|
||||
else
|
||||
timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN +
|
||||
timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
|
||||
(boundary - linear_backoff_thresh) * TCP_RTO_MAX;
|
||||
}
|
||||
return (tcp_time_stamp - start_ts) >= timeout;
|
||||
|
@ -168,14 +171,15 @@ static int tcp_write_timeout(struct sock *sk)
|
|||
{
|
||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
int retry_until;
|
||||
bool do_reset;
|
||||
bool do_reset, syn_set = 0;
|
||||
|
||||
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
|
||||
if (icsk->icsk_retransmits)
|
||||
dst_negative_advice(sk);
|
||||
retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
|
||||
syn_set = 1;
|
||||
} else {
|
||||
if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0)) {
|
||||
if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
|
||||
/* Black hole detection */
|
||||
tcp_mtu_probing(icsk, sk);
|
||||
|
||||
|
@ -188,7 +192,7 @@ static int tcp_write_timeout(struct sock *sk)
|
|||
|
||||
retry_until = tcp_orphan_retries(sk, alive);
|
||||
do_reset = alive ||
|
||||
!retransmits_timed_out(sk, retry_until, 0);
|
||||
!retransmits_timed_out(sk, retry_until, 0, 0);
|
||||
|
||||
if (tcp_out_of_resources(sk, do_reset))
|
||||
return 1;
|
||||
|
@ -196,8 +200,7 @@ static int tcp_write_timeout(struct sock *sk)
|
|||
}
|
||||
|
||||
if (retransmits_timed_out(sk, retry_until,
|
||||
(1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV) ? 0 :
|
||||
icsk->icsk_user_timeout)) {
|
||||
syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
|
||||
/* Has it gone just too far? */
|
||||
tcp_write_err(sk);
|
||||
return 1;
|
||||
|
@ -439,7 +442,7 @@ out_reset_timer:
|
|||
icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
|
||||
}
|
||||
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
|
||||
if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0))
|
||||
if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0, 0))
|
||||
__sk_dst_reset(sk);
|
||||
|
||||
out:;
|
||||
|
|
|
@ -670,7 +670,7 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad
|
|||
|
||||
if (net_ratelimit())
|
||||
printk(KERN_WARNING
|
||||
"Neighbour table overflow.\n");
|
||||
"ipv6: Neighbour table overflow.\n");
|
||||
dst_free(&rt->dst);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1559,14 +1559,13 @@ out:
|
|||
* i.e. Path MTU discovery
|
||||
*/
|
||||
|
||||
void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
|
||||
struct net_device *dev, u32 pmtu)
|
||||
static void rt6_do_pmtu_disc(struct in6_addr *daddr, struct in6_addr *saddr,
|
||||
struct net *net, u32 pmtu, int ifindex)
|
||||
{
|
||||
struct rt6_info *rt, *nrt;
|
||||
struct net *net = dev_net(dev);
|
||||
int allfrag = 0;
|
||||
|
||||
rt = rt6_lookup(net, daddr, saddr, dev->ifindex, 0);
|
||||
rt = rt6_lookup(net, daddr, saddr, ifindex, 0);
|
||||
if (rt == NULL)
|
||||
return;
|
||||
|
||||
|
@ -1634,6 +1633,27 @@ out:
|
|||
dst_release(&rt->dst);
|
||||
}
|
||||
|
||||
void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
|
||||
struct net_device *dev, u32 pmtu)
|
||||
{
|
||||
struct net *net = dev_net(dev);
|
||||
|
||||
/*
|
||||
* RFC 1981 states that a node "MUST reduce the size of the packets it
|
||||
* is sending along the path" that caused the Packet Too Big message.
|
||||
* Since it's not possible in the general case to determine which
|
||||
* interface was used to send the original packet, we update the MTU
|
||||
* on the interface that will be used to send future packets. We also
|
||||
* update the MTU on the interface that received the Packet Too Big in
|
||||
* case the original packet was forced out that interface with
|
||||
* SO_BINDTODEVICE or similar. This is the next best thing to the
|
||||
* correct behaviour, which would be to update the MTU on all
|
||||
* interfaces.
|
||||
*/
|
||||
rt6_do_pmtu_disc(daddr, saddr, net, pmtu, 0);
|
||||
rt6_do_pmtu_disc(daddr, saddr, net, pmtu, dev->ifindex);
|
||||
}
|
||||
|
||||
/*
|
||||
* Misc support functions
|
||||
*/
|
||||
|
|
|
@ -507,12 +507,13 @@ static void pipe_grant_credits(struct sock *sk)
|
|||
static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct pep_sock *pn = pep_sk(sk);
|
||||
struct pnpipehdr *hdr = pnp_hdr(skb);
|
||||
struct pnpipehdr *hdr;
|
||||
int wake = 0;
|
||||
|
||||
if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
|
||||
return -EINVAL;
|
||||
|
||||
hdr = pnp_hdr(skb);
|
||||
if (hdr->data[0] != PN_PEP_TYPE_COMMON) {
|
||||
LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n",
|
||||
(unsigned)hdr->data[0]);
|
||||
|
|
|
@ -543,16 +543,20 @@ struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc)
|
|||
id = ntohs(hmacs->hmac_ids[i]);
|
||||
|
||||
/* Check the id is in the supported range */
|
||||
if (id > SCTP_AUTH_HMAC_ID_MAX)
|
||||
if (id > SCTP_AUTH_HMAC_ID_MAX) {
|
||||
id = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* See is we support the id. Supported IDs have name and
|
||||
* length fields set, so that we can allocated and use
|
||||
* them. We can safely just check for name, for without the
|
||||
* name, we can't allocate the TFM.
|
||||
*/
|
||||
if (!sctp_hmac_list[id].hmac_name)
|
||||
if (!sctp_hmac_list[id].hmac_name) {
|
||||
id = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -918,6 +918,11 @@ SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk,
|
|||
/* Walk through the addrs buffer and count the number of addresses. */
|
||||
addr_buf = kaddrs;
|
||||
while (walk_size < addrs_size) {
|
||||
if (walk_size + sizeof(sa_family_t) > addrs_size) {
|
||||
kfree(kaddrs);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
sa_addr = (struct sockaddr *)addr_buf;
|
||||
af = sctp_get_af_specific(sa_addr->sa_family);
|
||||
|
||||
|
@ -1004,9 +1009,13 @@ static int __sctp_connect(struct sock* sk,
|
|||
/* Walk through the addrs buffer and count the number of addresses. */
|
||||
addr_buf = kaddrs;
|
||||
while (walk_size < addrs_size) {
|
||||
if (walk_size + sizeof(sa_family_t) > addrs_size) {
|
||||
err = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
sa_addr = (union sctp_addr *)addr_buf;
|
||||
af = sctp_get_af_specific(sa_addr->sa.sa_family);
|
||||
port = ntohs(sa_addr->v4.sin_port);
|
||||
|
||||
/* If the address family is not supported or if this address
|
||||
* causes the address buffer to overflow return EINVAL.
|
||||
|
@ -1016,6 +1025,8 @@ static int __sctp_connect(struct sock* sk,
|
|||
goto out_free;
|
||||
}
|
||||
|
||||
port = ntohs(sa_addr->v4.sin_port);
|
||||
|
||||
/* Save current address so we can work with it */
|
||||
memcpy(&to, sa_addr, af->sockaddr_len);
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче