net: change netdev->features to u32
Quoting Ben Hutchings: we presumably won't be defining features that can only be enabled on 64-bit architectures. Occurences found by `grep -r` on net/, drivers/net, include/ [ Move features and vlan_features next to each other in struct netdev, as per Eric Dumazet's suggestion -DaveM ] Signed-off-by: Michał Mirosław <mirq-linux@rere.qmqm.pl> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Родитель
57422dc530
Коммит
04ed3e741d
|
@ -8312,7 +8312,7 @@ static const struct net_device_ops bnx2_netdev_ops = {
|
|||
#endif
|
||||
};
|
||||
|
||||
static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
|
||||
static void inline vlan_features_add(struct net_device *dev, u32 flags)
|
||||
{
|
||||
dev->vlan_features |= flags;
|
||||
}
|
||||
|
|
|
@ -1372,8 +1372,8 @@ static int bond_compute_features(struct bonding *bond)
|
|||
{
|
||||
struct slave *slave;
|
||||
struct net_device *bond_dev = bond->dev;
|
||||
unsigned long features = bond_dev->features;
|
||||
unsigned long vlan_features = 0;
|
||||
u32 features = bond_dev->features;
|
||||
u32 vlan_features = 0;
|
||||
unsigned short max_hard_header_len = max((u16)ETH_HLEN,
|
||||
bond_dev->hard_header_len);
|
||||
int i;
|
||||
|
|
|
@ -253,7 +253,7 @@ struct myri10ge_priv {
|
|||
unsigned long serial_number;
|
||||
int vendor_specific_offset;
|
||||
int fw_multicast_support;
|
||||
unsigned long features;
|
||||
u32 features;
|
||||
u32 max_tso6;
|
||||
u32 read_dma;
|
||||
u32 write_dma;
|
||||
|
@ -1776,7 +1776,7 @@ static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled)
|
|||
static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled)
|
||||
{
|
||||
struct myri10ge_priv *mgp = netdev_priv(netdev);
|
||||
unsigned long flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO);
|
||||
u32 flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO);
|
||||
|
||||
if (tso_enabled)
|
||||
netdev->features |= flags;
|
||||
|
|
|
@ -502,7 +502,7 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
|
|||
static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
|
||||
{
|
||||
struct efx_nic *efx __attribute__ ((unused)) = netdev_priv(net_dev);
|
||||
unsigned long features;
|
||||
u32 features;
|
||||
|
||||
features = NETIF_F_TSO;
|
||||
if (efx->type->offload_features & NETIF_F_V6_CSUM)
|
||||
|
@ -519,7 +519,7 @@ static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
|
|||
static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
unsigned long features = efx->type->offload_features & NETIF_F_ALL_CSUM;
|
||||
u32 features = efx->type->offload_features & NETIF_F_ALL_CSUM;
|
||||
|
||||
if (enable)
|
||||
net_dev->features |= features;
|
||||
|
|
|
@ -906,7 +906,7 @@ struct efx_nic_type {
|
|||
unsigned int phys_addr_channels;
|
||||
unsigned int tx_dc_base;
|
||||
unsigned int rx_dc_base;
|
||||
unsigned long offload_features;
|
||||
u32 offload_features;
|
||||
u32 reset_world_flags;
|
||||
};
|
||||
|
||||
|
|
|
@ -1142,7 +1142,7 @@ static int tun_get_iff(struct net *net, struct tun_struct *tun,
|
|||
* privs required. */
|
||||
static int set_offload(struct net_device *dev, unsigned long arg)
|
||||
{
|
||||
unsigned int old_features, features;
|
||||
u32 old_features, features;
|
||||
|
||||
old_features = dev->features;
|
||||
/* Unset features, set them as we chew on the arg. */
|
||||
|
|
|
@ -914,7 +914,11 @@ struct net_device {
|
|||
struct list_head unreg_list;
|
||||
|
||||
/* Net device features */
|
||||
unsigned long features;
|
||||
u32 features;
|
||||
|
||||
/* VLAN feature mask */
|
||||
u32 vlan_features;
|
||||
|
||||
#define NETIF_F_SG 1 /* Scatter/gather IO. */
|
||||
#define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
|
||||
#define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
|
||||
|
@ -1176,9 +1180,6 @@ struct net_device {
|
|||
/* rtnetlink link ops */
|
||||
const struct rtnl_link_ops *rtnl_link_ops;
|
||||
|
||||
/* VLAN feature mask */
|
||||
unsigned long vlan_features;
|
||||
|
||||
/* for setting kernel sock attribute on TCP connection setup */
|
||||
#define GSO_MAX_SIZE 65536
|
||||
unsigned int gso_max_size;
|
||||
|
@ -1401,7 +1402,7 @@ struct packet_type {
|
|||
struct packet_type *,
|
||||
struct net_device *);
|
||||
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
|
||||
int features);
|
||||
u32 features);
|
||||
int (*gso_send_check)(struct sk_buff *skb);
|
||||
struct sk_buff **(*gro_receive)(struct sk_buff **head,
|
||||
struct sk_buff *skb);
|
||||
|
@ -2370,7 +2371,7 @@ extern int netdev_tstamp_prequeue;
|
|||
extern int weight_p;
|
||||
extern int netdev_set_master(struct net_device *dev, struct net_device *master);
|
||||
extern int skb_checksum_help(struct sk_buff *skb);
|
||||
extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
|
||||
extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features);
|
||||
#ifdef CONFIG_BUG
|
||||
extern void netdev_rx_csum_fault(struct net_device *dev);
|
||||
#else
|
||||
|
@ -2397,22 +2398,21 @@ extern char *netdev_drivername(const struct net_device *dev, char *buffer, int l
|
|||
|
||||
extern void linkwatch_run_queue(void);
|
||||
|
||||
unsigned long netdev_increment_features(unsigned long all, unsigned long one,
|
||||
unsigned long mask);
|
||||
unsigned long netdev_fix_features(unsigned long features, const char *name);
|
||||
u32 netdev_increment_features(u32 all, u32 one, u32 mask);
|
||||
u32 netdev_fix_features(u32 features, const char *name);
|
||||
|
||||
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
|
||||
struct net_device *dev);
|
||||
|
||||
int netif_skb_features(struct sk_buff *skb);
|
||||
u32 netif_skb_features(struct sk_buff *skb);
|
||||
|
||||
static inline int net_gso_ok(int features, int gso_type)
|
||||
static inline int net_gso_ok(u32 features, int gso_type)
|
||||
{
|
||||
int feature = gso_type << NETIF_F_GSO_SHIFT;
|
||||
return (features & feature) == feature;
|
||||
}
|
||||
|
||||
static inline int skb_gso_ok(struct sk_buff *skb, int features)
|
||||
static inline int skb_gso_ok(struct sk_buff *skb, u32 features)
|
||||
{
|
||||
return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
|
||||
(!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
|
||||
|
|
|
@ -1877,7 +1877,7 @@ extern void skb_split(struct sk_buff *skb,
|
|||
extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
|
||||
int shiftlen);
|
||||
|
||||
extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
|
||||
extern struct sk_buff *skb_segment(struct sk_buff *skb, u32 features);
|
||||
|
||||
static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
|
||||
int len, void *buffer)
|
||||
|
|
|
@ -38,7 +38,7 @@ struct net_protocol {
|
|||
void (*err_handler)(struct sk_buff *skb, u32 info);
|
||||
int (*gso_send_check)(struct sk_buff *skb);
|
||||
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
|
||||
int features);
|
||||
u32 features);
|
||||
struct sk_buff **(*gro_receive)(struct sk_buff **head,
|
||||
struct sk_buff *skb);
|
||||
int (*gro_complete)(struct sk_buff *skb);
|
||||
|
@ -57,7 +57,7 @@ struct inet6_protocol {
|
|||
|
||||
int (*gso_send_check)(struct sk_buff *skb);
|
||||
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
|
||||
int features);
|
||||
u32 features);
|
||||
struct sk_buff **(*gro_receive)(struct sk_buff **head,
|
||||
struct sk_buff *skb);
|
||||
int (*gro_complete)(struct sk_buff *skb);
|
||||
|
|
|
@ -1404,7 +1404,7 @@ extern struct request_sock_ops tcp6_request_sock_ops;
|
|||
extern void tcp_v4_destroy_sock(struct sock *sk);
|
||||
|
||||
extern int tcp_v4_gso_send_check(struct sk_buff *skb);
|
||||
extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
|
||||
extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features);
|
||||
extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
|
||||
struct sk_buff *skb);
|
||||
extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
|
||||
|
|
|
@ -245,5 +245,5 @@ extern void udp4_proc_exit(void);
|
|||
extern void udp_init(void);
|
||||
|
||||
extern int udp4_ufo_send_check(struct sk_buff *skb);
|
||||
extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, int features);
|
||||
extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features);
|
||||
#endif /* _UDP_H */
|
||||
|
|
|
@ -327,7 +327,7 @@ static void vlan_sync_address(struct net_device *dev,
|
|||
static void vlan_transfer_features(struct net_device *dev,
|
||||
struct net_device *vlandev)
|
||||
{
|
||||
unsigned long old_features = vlandev->features;
|
||||
u32 old_features = vlandev->features;
|
||||
|
||||
vlandev->features &= ~dev->vlan_features;
|
||||
vlandev->features |= dev->features & dev->vlan_features;
|
||||
|
|
|
@ -365,7 +365,7 @@ int br_min_mtu(const struct net_bridge *br)
|
|||
void br_features_recompute(struct net_bridge *br)
|
||||
{
|
||||
struct net_bridge_port *p;
|
||||
unsigned long features, mask;
|
||||
u32 features, mask;
|
||||
|
||||
features = mask = br->feature_mask;
|
||||
if (list_empty(&br->port_list))
|
||||
|
|
|
@ -182,7 +182,7 @@ struct net_bridge
|
|||
struct br_cpu_netstats __percpu *stats;
|
||||
spinlock_t hash_lock;
|
||||
struct hlist_head hash[BR_HASH_SIZE];
|
||||
unsigned long feature_mask;
|
||||
u32 feature_mask;
|
||||
#ifdef CONFIG_BRIDGE_NETFILTER
|
||||
struct rtable fake_rtable;
|
||||
bool nf_call_iptables;
|
||||
|
|
|
@ -1858,7 +1858,7 @@ EXPORT_SYMBOL(skb_checksum_help);
|
|||
* It may return NULL if the skb requires no segmentation. This is
|
||||
* only possible when GSO is used for verifying header integrity.
|
||||
*/
|
||||
struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
|
||||
struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features)
|
||||
{
|
||||
struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
|
||||
struct packet_type *ptype;
|
||||
|
@ -2046,7 +2046,7 @@ static bool can_checksum_protocol(unsigned long features, __be16 protocol)
|
|||
protocol == htons(ETH_P_FCOE)));
|
||||
}
|
||||
|
||||
static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features)
|
||||
static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features)
|
||||
{
|
||||
if (!can_checksum_protocol(features, protocol)) {
|
||||
features &= ~NETIF_F_ALL_CSUM;
|
||||
|
@ -2058,10 +2058,10 @@ static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features
|
|||
return features;
|
||||
}
|
||||
|
||||
int netif_skb_features(struct sk_buff *skb)
|
||||
u32 netif_skb_features(struct sk_buff *skb)
|
||||
{
|
||||
__be16 protocol = skb->protocol;
|
||||
int features = skb->dev->features;
|
||||
u32 features = skb->dev->features;
|
||||
|
||||
if (protocol == htons(ETH_P_8021Q)) {
|
||||
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
|
||||
|
@ -2106,7 +2106,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
|
|||
int rc = NETDEV_TX_OK;
|
||||
|
||||
if (likely(!skb->next)) {
|
||||
int features;
|
||||
u32 features;
|
||||
|
||||
/*
|
||||
* If device doesnt need skb->dst, release it right now while
|
||||
|
@ -5213,7 +5213,7 @@ static void rollback_registered(struct net_device *dev)
|
|||
rollback_registered_many(&single);
|
||||
}
|
||||
|
||||
unsigned long netdev_fix_features(unsigned long features, const char *name)
|
||||
u32 netdev_fix_features(u32 features, const char *name)
|
||||
{
|
||||
/* Fix illegal checksum combinations */
|
||||
if ((features & NETIF_F_HW_CSUM) &&
|
||||
|
@ -6143,8 +6143,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
|
|||
* @one to the master device with current feature set @all. Will not
|
||||
* enable anything that is off in @mask. Returns the new feature set.
|
||||
*/
|
||||
unsigned long netdev_increment_features(unsigned long all, unsigned long one,
|
||||
unsigned long mask)
|
||||
u32 netdev_increment_features(u32 all, u32 one, u32 mask)
|
||||
{
|
||||
/* If device needs checksumming, downgrade to it. */
|
||||
if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
|
||||
|
|
|
@ -1458,7 +1458,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
|
|||
void __user *useraddr = ifr->ifr_data;
|
||||
u32 ethcmd;
|
||||
int rc;
|
||||
unsigned long old_features;
|
||||
u32 old_features;
|
||||
|
||||
if (!dev || !netif_device_present(dev))
|
||||
return -ENODEV;
|
||||
|
|
|
@ -99,7 +99,7 @@ NETDEVICE_SHOW(addr_assign_type, fmt_dec);
|
|||
NETDEVICE_SHOW(addr_len, fmt_dec);
|
||||
NETDEVICE_SHOW(iflink, fmt_dec);
|
||||
NETDEVICE_SHOW(ifindex, fmt_dec);
|
||||
NETDEVICE_SHOW(features, fmt_long_hex);
|
||||
NETDEVICE_SHOW(features, fmt_hex);
|
||||
NETDEVICE_SHOW(type, fmt_dec);
|
||||
NETDEVICE_SHOW(link_mode, fmt_dec);
|
||||
|
||||
|
|
|
@ -2497,7 +2497,7 @@ EXPORT_SYMBOL_GPL(skb_pull_rcsum);
|
|||
* a pointer to the first in a list of new skbs for the segments.
|
||||
* In case of error it returns ERR_PTR(err).
|
||||
*/
|
||||
struct sk_buff *skb_segment(struct sk_buff *skb, int features)
|
||||
struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
|
||||
{
|
||||
struct sk_buff *segs = NULL;
|
||||
struct sk_buff *tail = NULL;
|
||||
|
@ -2507,7 +2507,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
|
|||
unsigned int offset = doffset;
|
||||
unsigned int headroom;
|
||||
unsigned int len;
|
||||
int sg = features & NETIF_F_SG;
|
||||
int sg = !!(features & NETIF_F_SG);
|
||||
int nfrags = skb_shinfo(skb)->nr_frags;
|
||||
int err = -ENOMEM;
|
||||
int i = 0;
|
||||
|
|
|
@ -1215,7 +1215,7 @@ out:
|
|||
return err;
|
||||
}
|
||||
|
||||
static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
|
||||
static struct sk_buff *inet_gso_segment(struct sk_buff *skb, u32 features)
|
||||
{
|
||||
struct sk_buff *segs = ERR_PTR(-EINVAL);
|
||||
struct iphdr *iph;
|
||||
|
|
|
@ -2653,7 +2653,7 @@ int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
|
|||
EXPORT_SYMBOL(compat_tcp_getsockopt);
|
||||
#endif
|
||||
|
||||
struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
|
||||
struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features)
|
||||
{
|
||||
struct sk_buff *segs = ERR_PTR(-EINVAL);
|
||||
struct tcphdr *th;
|
||||
|
|
|
@ -2199,7 +2199,7 @@ int udp4_ufo_send_check(struct sk_buff *skb)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, int features)
|
||||
struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features)
|
||||
{
|
||||
struct sk_buff *segs = ERR_PTR(-EINVAL);
|
||||
unsigned int mss;
|
||||
|
|
|
@ -772,7 +772,7 @@ out:
|
|||
return err;
|
||||
}
|
||||
|
||||
static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
|
||||
static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, u32 features)
|
||||
{
|
||||
struct sk_buff *segs = ERR_PTR(-EINVAL);
|
||||
struct ipv6hdr *ipv6h;
|
||||
|
|
|
@ -1299,7 +1299,7 @@ static int udp6_ufo_send_check(struct sk_buff *skb)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, int features)
|
||||
static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features)
|
||||
{
|
||||
struct sk_buff *segs = ERR_PTR(-EINVAL);
|
||||
unsigned int mss;
|
||||
|
|
Загрузка…
Ссылка в новой задаче