Merge branch 'nla_align-set-2'

Nicolas Dichtel says:

====================
netlink: align attributes when needed (patchset #2)

This is the continuation (series #2) of the work done to align netlink
attributes when these attributes contain some 64-bit fields.

In patch #3, I didn't modify the function ila_encap_nlsize(). I was waiting
feedback for this patch: http://patchwork.ozlabs.org/patch/613766/
If it's approved, there will be an update to switch nla_total_size() to
nla_total_size_64bit() after the merge of net in net-next.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2016-04-25 15:09:12 -04:00
Родитель d296ba60d8 2dad624e6d
Коммит e7157f28ce
21 изменённых файлов: 233 добавлений и 144 удалений

Просмотреть файл

@ -92,6 +92,8 @@ enum {
IEEE802154_ATTR_LLSEC_DEV_OVERRIDE,
IEEE802154_ATTR_LLSEC_DEV_KEY_MODE,
IEEE802154_ATTR_PAD,
__IEEE802154_ATTR_MAX,
};

Просмотреть файл

@ -271,6 +271,7 @@ enum {
IFLA_BR_NF_CALL_IP6TABLES,
IFLA_BR_NF_CALL_ARPTABLES,
IFLA_BR_VLAN_DEFAULT_PVID,
IFLA_BR_PAD,
__IFLA_BR_MAX,
};
@ -313,6 +314,7 @@ enum {
IFLA_BRPORT_HOLD_TIMER,
IFLA_BRPORT_FLUSH,
IFLA_BRPORT_MULTICAST_ROUTER,
IFLA_BRPORT_PAD,
__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@ -666,6 +668,7 @@ enum {
IFLA_VF_STATS_TX_BYTES,
IFLA_VF_STATS_BROADCAST,
IFLA_VF_STATS_MULTICAST,
IFLA_VF_STATS_PAD,
__IFLA_VF_STATS_MAX,
};

Просмотреть файл

@ -14,6 +14,7 @@ enum {
ILA_ATTR_LOCATOR_MATCH, /* u64 */
ILA_ATTR_IFINDEX, /* s32 */
ILA_ATTR_DIR, /* u32 */
ILA_ATTR_PAD,
__ILA_ATTR_MAX,
};

Просмотреть файл

@ -435,6 +435,7 @@ enum {
IPVS_STATS_ATTR_OUTPPS, /* current out packet rate */
IPVS_STATS_ATTR_INBPS, /* current in byte rate */
IPVS_STATS_ATTR_OUTBPS, /* current out byte rate */
IPVS_STATS_ATTR_PAD,
__IPVS_STATS_ATTR_MAX,
};

Просмотреть файл

@ -143,6 +143,7 @@ enum {
L2TP_ATTR_RX_SEQ_DISCARDS, /* u64 */
L2TP_ATTR_RX_OOS_PACKETS, /* u64 */
L2TP_ATTR_RX_ERRORS, /* u64 */
L2TP_ATTR_STATS_PAD,
__L2TP_ATTR_STATS_MAX,
};

Просмотреть файл

@ -2197,6 +2197,8 @@ enum nl80211_attrs {
NL80211_ATTR_STA_SUPPORT_P2P_PS,
NL80211_ATTR_PAD,
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@ -3023,6 +3025,7 @@ enum nl80211_survey_info {
NL80211_SURVEY_INFO_TIME_RX,
NL80211_SURVEY_INFO_TIME_TX,
NL80211_SURVEY_INFO_TIME_SCAN,
NL80211_SURVEY_INFO_PAD,
/* keep last */
__NL80211_SURVEY_INFO_AFTER_LAST,
@ -3468,6 +3471,7 @@ enum nl80211_bss {
NL80211_BSS_BEACON_TSF,
NL80211_BSS_PRESP_DATA,
NL80211_BSS_LAST_SEEN_BOOTTIME,
NL80211_BSS_PAD,
/* keep last */
__NL80211_BSS_AFTER_LAST,

Просмотреть файл

@ -519,6 +519,7 @@ enum ovs_flow_attr {
* logging should be suppressed. */
OVS_FLOW_ATTR_UFID, /* Variable length unique flow identifier. */
OVS_FLOW_ATTR_UFID_FLAGS,/* u32 of OVS_UFID_F_*. */
OVS_FLOW_ATTR_PAD,
__OVS_FLOW_ATTR_MAX
};

Просмотреть файл

@ -179,6 +179,7 @@ enum {
TCA_TBF_PRATE64,
TCA_TBF_BURST,
TCA_TBF_PBURST,
TCA_TBF_PAD,
__TCA_TBF_MAX,
};
@ -368,6 +369,7 @@ enum {
TCA_HTB_DIRECT_QLEN,
TCA_HTB_RATE64,
TCA_HTB_CEIL64,
TCA_HTB_PAD,
__TCA_HTB_MAX,
};
@ -531,6 +533,7 @@ enum {
TCA_NETEM_RATE,
TCA_NETEM_ECN,
TCA_NETEM_RATE64,
TCA_NETEM_PAD,
__TCA_NETEM_MAX,
};

Просмотреть файл

@ -135,9 +135,9 @@ static inline size_t br_port_info_size(void)
+ nla_total_size(sizeof(u16)) /* IFLA_BRPORT_NO */
+ nla_total_size(sizeof(u8)) /* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */
+ nla_total_size(sizeof(u8)) /* IFLA_BRPORT_CONFIG_PENDING */
+ nla_total_size(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */
+ nla_total_size(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */
+ nla_total_size(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */
+ nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */
+ nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */
+ nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+ nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MULTICAST_ROUTER */
#endif
@ -190,13 +190,16 @@ static int br_port_fill_attrs(struct sk_buff *skb,
return -EMSGSIZE;
timerval = br_timer_value(&p->message_age_timer);
if (nla_put_u64(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval))
if (nla_put_u64_64bit(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval,
IFLA_BRPORT_PAD))
return -EMSGSIZE;
timerval = br_timer_value(&p->forward_delay_timer);
if (nla_put_u64(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval))
if (nla_put_u64_64bit(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval,
IFLA_BRPORT_PAD))
return -EMSGSIZE;
timerval = br_timer_value(&p->hold_timer);
if (nla_put_u64(skb, IFLA_BRPORT_HOLD_TIMER, timerval))
if (nla_put_u64_64bit(skb, IFLA_BRPORT_HOLD_TIMER, timerval,
IFLA_BRPORT_PAD))
return -EMSGSIZE;
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
@ -1087,10 +1090,10 @@ static size_t br_get_size(const struct net_device *brdev)
nla_total_size(sizeof(u32)) + /* IFLA_BR_ROOT_PATH_COST */
nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE */
nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */
nla_total_size(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */
nla_total_size(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */
nla_total_size(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */
nla_total_size(sizeof(u64)) + /* IFLA_BR_GC_TIMER */
nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */
nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */
nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */
nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_GC_TIMER */
nla_total_size(ETH_ALEN) + /* IFLA_BR_GROUP_ADDR */
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_ROUTER */
@ -1101,12 +1104,12 @@ static size_t br_get_size(const struct net_device *brdev)
nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_MAX */
nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_LAST_MEMBER_CNT */
nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */
nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */
nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */
nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */
nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */
nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */
nla_total_size(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */
nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */
nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */
nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */
nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */
nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */
nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */
#endif
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IPTABLES */
@ -1129,16 +1132,17 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
u64 clockval;
clockval = br_timer_value(&br->hello_timer);
if (nla_put_u64(skb, IFLA_BR_HELLO_TIMER, clockval))
if (nla_put_u64_64bit(skb, IFLA_BR_HELLO_TIMER, clockval, IFLA_BR_PAD))
return -EMSGSIZE;
clockval = br_timer_value(&br->tcn_timer);
if (nla_put_u64(skb, IFLA_BR_TCN_TIMER, clockval))
if (nla_put_u64_64bit(skb, IFLA_BR_TCN_TIMER, clockval, IFLA_BR_PAD))
return -EMSGSIZE;
clockval = br_timer_value(&br->topology_change_timer);
if (nla_put_u64(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval))
if (nla_put_u64_64bit(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval,
IFLA_BR_PAD))
return -EMSGSIZE;
clockval = br_timer_value(&br->gc_timer);
if (nla_put_u64(skb, IFLA_BR_GC_TIMER, clockval))
if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD))
return -EMSGSIZE;
if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
@ -1182,22 +1186,28 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
return -EMSGSIZE;
clockval = jiffies_to_clock_t(br->multicast_last_member_interval);
if (nla_put_u64(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval))
if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval,
IFLA_BR_PAD))
return -EMSGSIZE;
clockval = jiffies_to_clock_t(br->multicast_membership_interval);
if (nla_put_u64(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval))
if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval,
IFLA_BR_PAD))
return -EMSGSIZE;
clockval = jiffies_to_clock_t(br->multicast_querier_interval);
if (nla_put_u64(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval))
if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval,
IFLA_BR_PAD))
return -EMSGSIZE;
clockval = jiffies_to_clock_t(br->multicast_query_interval);
if (nla_put_u64(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval))
if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval,
IFLA_BR_PAD))
return -EMSGSIZE;
clockval = jiffies_to_clock_t(br->multicast_query_response_interval);
if (nla_put_u64(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval))
if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval,
IFLA_BR_PAD))
return -EMSGSIZE;
clockval = jiffies_to_clock_t(br->multicast_startup_query_interval);
if (nla_put_u64(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval))
if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval,
IFLA_BR_PAD))
return -EMSGSIZE;
#endif
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)

Просмотреть файл

@ -825,17 +825,17 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
nla_total_size(sizeof(struct ifla_vf_link_state)) +
nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
/* IFLA_VF_STATS_RX_PACKETS */
nla_total_size(sizeof(__u64)) +
nla_total_size_64bit(sizeof(__u64)) +
/* IFLA_VF_STATS_TX_PACKETS */
nla_total_size(sizeof(__u64)) +
nla_total_size_64bit(sizeof(__u64)) +
/* IFLA_VF_STATS_RX_BYTES */
nla_total_size(sizeof(__u64)) +
nla_total_size_64bit(sizeof(__u64)) +
/* IFLA_VF_STATS_TX_BYTES */
nla_total_size(sizeof(__u64)) +
nla_total_size_64bit(sizeof(__u64)) +
/* IFLA_VF_STATS_BROADCAST */
nla_total_size(sizeof(__u64)) +
nla_total_size_64bit(sizeof(__u64)) +
/* IFLA_VF_STATS_MULTICAST */
nla_total_size(sizeof(__u64)) +
nla_total_size_64bit(sizeof(__u64)) +
nla_total_size(sizeof(struct ifla_vf_trust)));
return size;
} else
@ -1153,18 +1153,18 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
nla_nest_cancel(skb, vfinfo);
return -EMSGSIZE;
}
if (nla_put_u64(skb, IFLA_VF_STATS_RX_PACKETS,
vf_stats.rx_packets) ||
nla_put_u64(skb, IFLA_VF_STATS_TX_PACKETS,
vf_stats.tx_packets) ||
nla_put_u64(skb, IFLA_VF_STATS_RX_BYTES,
vf_stats.rx_bytes) ||
nla_put_u64(skb, IFLA_VF_STATS_TX_BYTES,
vf_stats.tx_bytes) ||
nla_put_u64(skb, IFLA_VF_STATS_BROADCAST,
vf_stats.broadcast) ||
nla_put_u64(skb, IFLA_VF_STATS_MULTICAST,
vf_stats.multicast))
if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
vf_stats.multicast, IFLA_VF_STATS_PAD))
return -EMSGSIZE;
nla_nest_end(skb, vfstats);
nla_nest_end(skb, vf);

Просмотреть файл

@ -34,9 +34,11 @@
#include "ieee802154.h"
static int nla_put_hwaddr(struct sk_buff *msg, int type, __le64 hwaddr)
static int nla_put_hwaddr(struct sk_buff *msg, int type, __le64 hwaddr,
int padattr)
{
return nla_put_u64(msg, type, swab64((__force u64)hwaddr));
return nla_put_u64_64bit(msg, type, swab64((__force u64)hwaddr),
padattr);
}
static __le64 nla_get_hwaddr(const struct nlattr *nla)
@ -623,7 +625,8 @@ ieee802154_llsec_fill_key_id(struct sk_buff *msg,
if (desc->device_addr.mode == IEEE802154_ADDR_LONG &&
nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR,
desc->device_addr.extended_addr))
desc->device_addr.extended_addr,
IEEE802154_ATTR_PAD))
return -EMSGSIZE;
}
@ -638,7 +641,7 @@ ieee802154_llsec_fill_key_id(struct sk_buff *msg,
if (desc->mode == IEEE802154_SCF_KEY_HW_INDEX &&
nla_put_hwaddr(msg, IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED,
desc->extended_source))
desc->extended_source, IEEE802154_ATTR_PAD))
return -EMSGSIZE;
return 0;
@ -1063,7 +1066,8 @@ ieee802154_nl_fill_dev(struct sk_buff *msg, u32 portid, u32 seq,
nla_put_shortaddr(msg, IEEE802154_ATTR_PAN_ID, desc->pan_id) ||
nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR,
desc->short_addr) ||
nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, desc->hwaddr) ||
nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, desc->hwaddr,
IEEE802154_ATTR_PAD) ||
nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
desc->frame_counter) ||
nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_OVERRIDE,
@ -1167,7 +1171,8 @@ ieee802154_nl_fill_devkey(struct sk_buff *msg, u32 portid, u32 seq,
if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, devaddr) ||
nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, devaddr,
IEEE802154_ATTR_PAD) ||
nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
devkey->frame_counter) ||
ieee802154_llsec_fill_key_id(msg, &devkey->key_id))

Просмотреть файл

@ -813,7 +813,8 @@ nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
if (nla_put_u32(msg, NL802154_ATTR_WPAN_PHY, rdev->wpan_phy_idx) ||
nla_put_u32(msg, NL802154_ATTR_IFTYPE, wpan_dev->iftype) ||
nla_put_u64(msg, NL802154_ATTR_WPAN_DEV, wpan_dev_id(wpan_dev)) ||
nla_put_u64_64bit(msg, NL802154_ATTR_WPAN_DEV,
wpan_dev_id(wpan_dev), NL802154_ATTR_PAD) ||
nla_put_u32(msg, NL802154_ATTR_GENERATION,
rdev->devlist_generation ^
(cfg802154_rdev_list_generation << 2)))

Просмотреть файл

@ -109,7 +109,8 @@ static int ila_fill_encap_info(struct sk_buff *skb,
{
struct ila_params *p = ila_params_lwtunnel(lwtstate);
if (nla_put_u64(skb, ILA_ATTR_LOCATOR, (__force u64)p->locator))
if (nla_put_u64_64bit(skb, ILA_ATTR_LOCATOR, (__force u64)p->locator,
ILA_ATTR_PAD))
goto nla_put_failure;
return 0;

Просмотреть файл

@ -418,12 +418,15 @@ static int ila_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info)
static int ila_fill_info(struct ila_map *ila, struct sk_buff *msg)
{
if (nla_put_u64(msg, ILA_ATTR_IDENTIFIER,
(__force u64)ila->p.identifier) ||
nla_put_u64(msg, ILA_ATTR_LOCATOR,
(__force u64)ila->p.ip.locator) ||
nla_put_u64(msg, ILA_ATTR_LOCATOR_MATCH,
(__force u64)ila->p.ip.locator_match) ||
if (nla_put_u64_64bit(msg, ILA_ATTR_IDENTIFIER,
(__force u64)ila->p.identifier,
ILA_ATTR_PAD) ||
nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR,
(__force u64)ila->p.ip.locator,
ILA_ATTR_PAD) ||
nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR_MATCH,
(__force u64)ila->p.ip.locator_match,
ILA_ATTR_PAD) ||
nla_put_s32(msg, ILA_ATTR_IFINDEX, ila->p.ifindex) ||
nla_put_u32(msg, ILA_ATTR_DIR, ila->p.dir))
return -1;

Просмотреть файл

@ -346,22 +346,30 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
if (nest == NULL)
goto nla_put_failure;
if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS,
atomic_long_read(&tunnel->stats.tx_packets)) ||
nla_put_u64(skb, L2TP_ATTR_TX_BYTES,
atomic_long_read(&tunnel->stats.tx_bytes)) ||
nla_put_u64(skb, L2TP_ATTR_TX_ERRORS,
atomic_long_read(&tunnel->stats.tx_errors)) ||
nla_put_u64(skb, L2TP_ATTR_RX_PACKETS,
atomic_long_read(&tunnel->stats.rx_packets)) ||
nla_put_u64(skb, L2TP_ATTR_RX_BYTES,
atomic_long_read(&tunnel->stats.rx_bytes)) ||
nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
atomic_long_read(&tunnel->stats.rx_seq_discards)) ||
nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
atomic_long_read(&tunnel->stats.rx_oos_packets)) ||
nla_put_u64(skb, L2TP_ATTR_RX_ERRORS,
atomic_long_read(&tunnel->stats.rx_errors)))
if (nla_put_u64_64bit(skb, L2TP_ATTR_TX_PACKETS,
atomic_long_read(&tunnel->stats.tx_packets),
L2TP_ATTR_STATS_PAD) ||
nla_put_u64_64bit(skb, L2TP_ATTR_TX_BYTES,
atomic_long_read(&tunnel->stats.tx_bytes),
L2TP_ATTR_STATS_PAD) ||
nla_put_u64_64bit(skb, L2TP_ATTR_TX_ERRORS,
atomic_long_read(&tunnel->stats.tx_errors),
L2TP_ATTR_STATS_PAD) ||
nla_put_u64_64bit(skb, L2TP_ATTR_RX_PACKETS,
atomic_long_read(&tunnel->stats.rx_packets),
L2TP_ATTR_STATS_PAD) ||
nla_put_u64_64bit(skb, L2TP_ATTR_RX_BYTES,
atomic_long_read(&tunnel->stats.rx_bytes),
L2TP_ATTR_STATS_PAD) ||
nla_put_u64_64bit(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
atomic_long_read(&tunnel->stats.rx_seq_discards),
L2TP_ATTR_STATS_PAD) ||
nla_put_u64_64bit(skb, L2TP_ATTR_RX_OOS_PACKETS,
atomic_long_read(&tunnel->stats.rx_oos_packets),
L2TP_ATTR_STATS_PAD) ||
nla_put_u64_64bit(skb, L2TP_ATTR_RX_ERRORS,
atomic_long_read(&tunnel->stats.rx_errors),
L2TP_ATTR_STATS_PAD))
goto nla_put_failure;
nla_nest_end(skb, nest);
@ -754,22 +762,30 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
if (nest == NULL)
goto nla_put_failure;
if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS,
atomic_long_read(&session->stats.tx_packets)) ||
nla_put_u64(skb, L2TP_ATTR_TX_BYTES,
atomic_long_read(&session->stats.tx_bytes)) ||
nla_put_u64(skb, L2TP_ATTR_TX_ERRORS,
atomic_long_read(&session->stats.tx_errors)) ||
nla_put_u64(skb, L2TP_ATTR_RX_PACKETS,
atomic_long_read(&session->stats.rx_packets)) ||
nla_put_u64(skb, L2TP_ATTR_RX_BYTES,
atomic_long_read(&session->stats.rx_bytes)) ||
nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
atomic_long_read(&session->stats.rx_seq_discards)) ||
nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
atomic_long_read(&session->stats.rx_oos_packets)) ||
nla_put_u64(skb, L2TP_ATTR_RX_ERRORS,
atomic_long_read(&session->stats.rx_errors)))
if (nla_put_u64_64bit(skb, L2TP_ATTR_TX_PACKETS,
atomic_long_read(&session->stats.tx_packets),
L2TP_ATTR_STATS_PAD) ||
nla_put_u64_64bit(skb, L2TP_ATTR_TX_BYTES,
atomic_long_read(&session->stats.tx_bytes),
L2TP_ATTR_STATS_PAD) ||
nla_put_u64_64bit(skb, L2TP_ATTR_TX_ERRORS,
atomic_long_read(&session->stats.tx_errors),
L2TP_ATTR_STATS_PAD) ||
nla_put_u64_64bit(skb, L2TP_ATTR_RX_PACKETS,
atomic_long_read(&session->stats.rx_packets),
L2TP_ATTR_STATS_PAD) ||
nla_put_u64_64bit(skb, L2TP_ATTR_RX_BYTES,
atomic_long_read(&session->stats.rx_bytes),
L2TP_ATTR_STATS_PAD) ||
nla_put_u64_64bit(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
atomic_long_read(&session->stats.rx_seq_discards),
L2TP_ATTR_STATS_PAD) ||
nla_put_u64_64bit(skb, L2TP_ATTR_RX_OOS_PACKETS,
atomic_long_read(&session->stats.rx_oos_packets),
L2TP_ATTR_STATS_PAD) ||
nla_put_u64_64bit(skb, L2TP_ATTR_RX_ERRORS,
atomic_long_read(&session->stats.rx_errors),
L2TP_ATTR_STATS_PAD))
goto nla_put_failure;
nla_nest_end(skb, nest);

Просмотреть файл

@ -2875,8 +2875,10 @@ static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type,
if (nla_put_u32(skb, IPVS_STATS_ATTR_CONNS, (u32)kstats->conns) ||
nla_put_u32(skb, IPVS_STATS_ATTR_INPKTS, (u32)kstats->inpkts) ||
nla_put_u32(skb, IPVS_STATS_ATTR_OUTPKTS, (u32)kstats->outpkts) ||
nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes) ||
nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes) ||
nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes,
IPVS_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes,
IPVS_STATS_ATTR_PAD) ||
nla_put_u32(skb, IPVS_STATS_ATTR_CPS, (u32)kstats->cps) ||
nla_put_u32(skb, IPVS_STATS_ATTR_INPPS, (u32)kstats->inpps) ||
nla_put_u32(skb, IPVS_STATS_ATTR_OUTPPS, (u32)kstats->outpps) ||
@ -2900,16 +2902,26 @@ static int ip_vs_genl_fill_stats64(struct sk_buff *skb, int container_type,
if (!nl_stats)
return -EMSGSIZE;
if (nla_put_u64(skb, IPVS_STATS_ATTR_CONNS, kstats->conns) ||
nla_put_u64(skb, IPVS_STATS_ATTR_INPKTS, kstats->inpkts) ||
nla_put_u64(skb, IPVS_STATS_ATTR_OUTPKTS, kstats->outpkts) ||
nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes) ||
nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes) ||
nla_put_u64(skb, IPVS_STATS_ATTR_CPS, kstats->cps) ||
nla_put_u64(skb, IPVS_STATS_ATTR_INPPS, kstats->inpps) ||
nla_put_u64(skb, IPVS_STATS_ATTR_OUTPPS, kstats->outpps) ||
nla_put_u64(skb, IPVS_STATS_ATTR_INBPS, kstats->inbps) ||
nla_put_u64(skb, IPVS_STATS_ATTR_OUTBPS, kstats->outbps))
if (nla_put_u64_64bit(skb, IPVS_STATS_ATTR_CONNS, kstats->conns,
IPVS_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INPKTS, kstats->inpkts,
IPVS_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTPKTS, kstats->outpkts,
IPVS_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INBYTES, kstats->inbytes,
IPVS_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTBYTES, kstats->outbytes,
IPVS_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, IPVS_STATS_ATTR_CPS, kstats->cps,
IPVS_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INPPS, kstats->inpps,
IPVS_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTPPS, kstats->outpps,
IPVS_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, IPVS_STATS_ATTR_INBPS, kstats->inbps,
IPVS_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, IPVS_STATS_ATTR_OUTBPS, kstats->outbps,
IPVS_STATS_ATTR_PAD))
goto nla_put_failure;
nla_nest_end(skb, nl_stats);

Просмотреть файл

@ -754,7 +754,8 @@ static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
if (used &&
nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
nla_put_u64_64bit(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used),
OVS_FLOW_ATTR_PAD))
return -EMSGSIZE;
if (stats.n_packets &&

Просмотреть файл

@ -1122,10 +1122,12 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
nla_put_u64(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps))
nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
TCA_HTB_PAD))
goto nla_put_failure;
if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
nla_put_u64(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps))
nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
TCA_HTB_PAD))
goto nla_put_failure;
return nla_nest_end(skb, nest);

Просмотреть файл

@ -994,7 +994,8 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
goto nla_put_failure;
if (q->rate >= (1ULL << 32)) {
if (nla_put_u64(skb, TCA_NETEM_RATE64, q->rate))
if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
TCA_NETEM_PAD))
goto nla_put_failure;
rate.rate = ~0U;
} else {

Просмотреть файл

@ -472,11 +472,13 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
nla_put_u64(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps))
nla_put_u64_64bit(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps,
TCA_TBF_PAD))
goto nla_put_failure;
if (tbf_peak_present(q) &&
q->peak.rate_bytes_ps >= (1ULL << 32) &&
nla_put_u64(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps))
nla_put_u64_64bit(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps,
TCA_TBF_PAD))
goto nla_put_failure;
return nla_nest_end(skb, nest);

Просмотреть файл

@ -2429,7 +2429,8 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
nla_put_u32(msg, NL80211_ATTR_IFTYPE, wdev->iftype) ||
nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
NL80211_ATTR_PAD) ||
nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, wdev_address(wdev)) ||
nla_put_u32(msg, NL80211_ATTR_GENERATION,
rdev->devlist_generation ^
@ -6874,7 +6875,8 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
if (wdev->netdev &&
nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex))
goto nla_put_failure;
if (nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
if (nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
NL80211_ATTR_PAD))
goto nla_put_failure;
bss = nla_nest_start(msg, NL80211_ATTR_BSS);
@ -6895,7 +6897,8 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
*/
ies = rcu_dereference(res->ies);
if (ies) {
if (nla_put_u64(msg, NL80211_BSS_TSF, ies->tsf))
if (nla_put_u64_64bit(msg, NL80211_BSS_TSF, ies->tsf,
NL80211_BSS_PAD))
goto fail_unlock_rcu;
if (ies->len && nla_put(msg, NL80211_BSS_INFORMATION_ELEMENTS,
ies->len, ies->data))
@ -6905,7 +6908,8 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
/* and this pointer is always (unless driver didn't know) beacon data */
ies = rcu_dereference(res->beacon_ies);
if (ies && ies->from_beacon) {
if (nla_put_u64(msg, NL80211_BSS_BEACON_TSF, ies->tsf))
if (nla_put_u64_64bit(msg, NL80211_BSS_BEACON_TSF, ies->tsf,
NL80211_BSS_PAD))
goto fail_unlock_rcu;
if (ies->len && nla_put(msg, NL80211_BSS_BEACON_IES,
ies->len, ies->data))
@ -6924,8 +6928,8 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
goto nla_put_failure;
if (intbss->ts_boottime &&
nla_put_u64(msg, NL80211_BSS_LAST_SEEN_BOOTTIME,
intbss->ts_boottime))
nla_put_u64_64bit(msg, NL80211_BSS_LAST_SEEN_BOOTTIME,
intbss->ts_boottime, NL80211_BSS_PAD))
goto nla_put_failure;
switch (rdev->wiphy.signal_type) {
@ -7045,28 +7049,28 @@ static int nl80211_send_survey(struct sk_buff *msg, u32 portid, u32 seq,
nla_put_flag(msg, NL80211_SURVEY_INFO_IN_USE))
goto nla_put_failure;
if ((survey->filled & SURVEY_INFO_TIME) &&
nla_put_u64(msg, NL80211_SURVEY_INFO_TIME,
survey->time))
nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME,
survey->time, NL80211_SURVEY_INFO_PAD))
goto nla_put_failure;
if ((survey->filled & SURVEY_INFO_TIME_BUSY) &&
nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_BUSY,
survey->time_busy))
nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_BUSY,
survey->time_busy, NL80211_SURVEY_INFO_PAD))
goto nla_put_failure;
if ((survey->filled & SURVEY_INFO_TIME_EXT_BUSY) &&
nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_EXT_BUSY,
survey->time_ext_busy))
nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_EXT_BUSY,
survey->time_ext_busy, NL80211_SURVEY_INFO_PAD))
goto nla_put_failure;
if ((survey->filled & SURVEY_INFO_TIME_RX) &&
nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_RX,
survey->time_rx))
nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_RX,
survey->time_rx, NL80211_SURVEY_INFO_PAD))
goto nla_put_failure;
if ((survey->filled & SURVEY_INFO_TIME_TX) &&
nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_TX,
survey->time_tx))
nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_TX,
survey->time_tx, NL80211_SURVEY_INFO_PAD))
goto nla_put_failure;
if ((survey->filled & SURVEY_INFO_TIME_SCAN) &&
nla_put_u64(msg, NL80211_SURVEY_INFO_TIME_SCAN,
survey->time_scan))
nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_SCAN,
survey->time_scan, NL80211_SURVEY_INFO_PAD))
goto nla_put_failure;
nla_nest_end(msg, infoattr);
@ -7786,8 +7790,8 @@ __cfg80211_alloc_vendor_skb(struct cfg80211_registered_device *rdev,
}
if (wdev) {
if (nla_put_u64(skb, NL80211_ATTR_WDEV,
wdev_id(wdev)))
if (nla_put_u64_64bit(skb, NL80211_ATTR_WDEV,
wdev_id(wdev), NL80211_ATTR_PAD))
goto nla_put_failure;
if (wdev->netdev &&
nla_put_u32(skb, NL80211_ATTR_IFINDEX,
@ -8380,7 +8384,8 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
if (err)
goto free_msg;
if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie,
NL80211_ATTR_PAD))
goto nla_put_failure;
genlmsg_end(msg, hdr);
@ -8792,7 +8797,8 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
goto free_msg;
if (msg) {
if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie,
NL80211_ATTR_PAD))
goto nla_put_failure;
genlmsg_end(msg, hdr);
@ -10078,7 +10084,8 @@ static int nl80211_probe_client(struct sk_buff *skb,
if (err)
goto free_msg;
if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie,
NL80211_ATTR_PAD))
goto nla_put_failure;
genlmsg_end(msg, hdr);
@ -10503,8 +10510,9 @@ static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
break;
if (nla_put_u32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
(wdev && nla_put_u64(skb, NL80211_ATTR_WDEV,
wdev_id(wdev)))) {
(wdev && nla_put_u64_64bit(skb, NL80211_ATTR_WDEV,
wdev_id(wdev),
NL80211_ATTR_PAD))) {
genlmsg_cancel(skb, hdr);
break;
}
@ -11711,7 +11719,8 @@ static int nl80211_send_scan_msg(struct sk_buff *msg,
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
(wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
wdev->netdev->ifindex)) ||
nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
NL80211_ATTR_PAD))
goto nla_put_failure;
/* ignore errors and send incomplete event anyway */
@ -12378,11 +12387,13 @@ static void nl80211_send_remain_on_chan_event(
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
(wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
wdev->netdev->ifindex)) ||
nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
NL80211_ATTR_PAD) ||
nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq) ||
nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE,
NL80211_CHAN_NO_HT) ||
nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie,
NL80211_ATTR_PAD))
goto nla_put_failure;
if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL &&
@ -12616,7 +12627,8 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
(netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
netdev->ifindex)) ||
nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
NL80211_ATTR_PAD) ||
nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) ||
(sig_dbm &&
nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) ||
@ -12659,9 +12671,11 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
(netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
netdev->ifindex)) ||
nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
NL80211_ATTR_PAD) ||
nla_put(msg, NL80211_ATTR_FRAME, len, buf) ||
nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) ||
nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie,
NL80211_ATTR_PAD) ||
(ack && nla_put_flag(msg, NL80211_ATTR_ACK)))
goto nla_put_failure;
@ -13041,7 +13055,8 @@ nl80211_radar_notify(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev = netdev->ieee80211_ptr;
if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
NL80211_ATTR_PAD))
goto nla_put_failure;
}
@ -13086,7 +13101,8 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) ||
nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) ||
nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, cookie,
NL80211_ATTR_PAD) ||
(acked && nla_put_flag(msg, NL80211_ATTR_ACK)))
goto nla_put_failure;
@ -13231,7 +13247,8 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev,
goto free_msg;
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
NL80211_ATTR_PAD))
goto free_msg;
if (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
@ -13506,7 +13523,8 @@ void cfg80211_crit_proto_stopped(struct wireless_dev *wdev, gfp_t gfp)
goto nla_put_failure;
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
NL80211_ATTR_PAD))
goto nla_put_failure;
genlmsg_end(msg, hdr);
@ -13539,7 +13557,8 @@ void nl80211_send_ap_stopped(struct wireless_dev *wdev)
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex) ||
nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
NL80211_ATTR_PAD))
goto out;
genlmsg_end(msg, hdr);