skb->vlan_present seems redundant.

We can instead derive it from this boolean expression:

vlan_present = skb->vlan_proto != 0 || skb->vlan_tci != 0

Add a new union, to access both fields in a single load/store
when possible.

	union {
		u32	vlan_all;
		struct {
		__be16	vlan_proto;
		__u16	vlan_tci;
		};
	};

This allows following patch to remove a conditional test in GRO stack.

Note:
  We move remcsum_offload to keep TC_AT_INGRESS_MASK
  and SKB_MONO_DELIVERY_TIME_MASK unchanged.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Yonghong Song <yhs@fb.com>
Acked-by: Martin KaFai Lau <martin.lau@kernel.org>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Eric Dumazet 2022-11-09 09:57:58 +00:00 коммит произвёл Jakub Kicinski
Родитель 2cf7e87fc4
Коммит 354259fa73
6 изменённых файлов: 29 добавлений и 33 удалений

Просмотреть файл

@ -555,11 +555,11 @@ void bpf_jit_compile(struct bpf_prog *fp)
emit_skb_load16(vlan_tci, r_A); emit_skb_load16(vlan_tci, r_A);
break; break;
case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
__emit_skb_load8(__pkt_vlan_present_offset, r_A); emit_skb_load32(vlan_all, r_A);
if (PKT_VLAN_PRESENT_BIT) emit_cmpi(r_A, 0);
emit_alu_K(SRL, PKT_VLAN_PRESENT_BIT); emit_branch_off(BE, 12);
if (PKT_VLAN_PRESENT_BIT < 7) emit_nop();
emit_andi(r_A, 1, r_A); emit_loadimm(1, r_A);
break; break;
case BPF_LD | BPF_W | BPF_LEN: case BPF_LD | BPF_W | BPF_LEN:
emit_skb_load32(len, r_A); emit_skb_load32(len, r_A);

Просмотреть файл

@ -1973,7 +1973,7 @@ static u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
#endif #endif
#ifdef CONFIG_DCB #ifdef CONFIG_DCB
if (!skb->vlan_present) if (!skb_vlan_tag_present(skb))
goto pick_tx; goto pick_tx;
vlan_prio = skb->vlan_tci >> 13; vlan_prio = skb->vlan_tci >> 13;

Просмотреть файл

@ -76,7 +76,7 @@ static inline bool is_vlan_dev(const struct net_device *dev)
return dev->priv_flags & IFF_802_1Q_VLAN; return dev->priv_flags & IFF_802_1Q_VLAN;
} }
#define skb_vlan_tag_present(__skb) ((__skb)->vlan_present) #define skb_vlan_tag_present(__skb) (!!(__skb)->vlan_all)
#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci) #define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci)
#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) #define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
#define skb_vlan_tag_get_cfi(__skb) (!!((__skb)->vlan_tci & VLAN_CFI_MASK)) #define skb_vlan_tag_get_cfi(__skb) (!!((__skb)->vlan_tci & VLAN_CFI_MASK))
@ -471,7 +471,7 @@ static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
*/ */
static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb) static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb)
{ {
skb->vlan_present = 0; skb->vlan_all = 0;
} }
/** /**
@ -483,9 +483,7 @@ static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb)
*/ */
static inline void __vlan_hwaccel_copy_tag(struct sk_buff *dst, const struct sk_buff *src) static inline void __vlan_hwaccel_copy_tag(struct sk_buff *dst, const struct sk_buff *src)
{ {
dst->vlan_present = src->vlan_present; dst->vlan_all = src->vlan_all;
dst->vlan_proto = src->vlan_proto;
dst->vlan_tci = src->vlan_tci;
} }
/* /*
@ -519,7 +517,6 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
{ {
skb->vlan_proto = vlan_proto; skb->vlan_proto = vlan_proto;
skb->vlan_tci = vlan_tci; skb->vlan_tci = vlan_tci;
skb->vlan_present = 1;
} }
/** /**

Просмотреть файл

@ -818,7 +818,7 @@ typedef unsigned char *sk_buff_data_t;
* @mark: Generic packet mark * @mark: Generic packet mark
* @reserved_tailroom: (aka @mark) number of bytes of free space available * @reserved_tailroom: (aka @mark) number of bytes of free space available
* at the tail of an sk_buff * at the tail of an sk_buff
* @vlan_present: VLAN tag is present * @vlan_all: vlan fields (proto & tci)
* @vlan_proto: vlan encapsulation protocol * @vlan_proto: vlan encapsulation protocol
* @vlan_tci: vlan tag control information * @vlan_tci: vlan tag control information
* @inner_protocol: Protocol (encapsulation) * @inner_protocol: Protocol (encapsulation)
@ -951,7 +951,7 @@ struct sk_buff {
/* private: */ /* private: */
__u8 __pkt_vlan_present_offset[0]; __u8 __pkt_vlan_present_offset[0];
/* public: */ /* public: */
__u8 vlan_present:1; /* See PKT_VLAN_PRESENT_BIT */ __u8 remcsum_offload:1;
__u8 csum_complete_sw:1; __u8 csum_complete_sw:1;
__u8 csum_level:2; __u8 csum_level:2;
__u8 dst_pending_confirm:1; __u8 dst_pending_confirm:1;
@ -966,7 +966,6 @@ struct sk_buff {
__u8 ipvs_property:1; __u8 ipvs_property:1;
__u8 inner_protocol_type:1; __u8 inner_protocol_type:1;
__u8 remcsum_offload:1;
#ifdef CONFIG_NET_SWITCHDEV #ifdef CONFIG_NET_SWITCHDEV
__u8 offload_fwd_mark:1; __u8 offload_fwd_mark:1;
__u8 offload_l3_fwd_mark:1; __u8 offload_l3_fwd_mark:1;
@ -999,8 +998,13 @@ struct sk_buff {
__u32 priority; __u32 priority;
int skb_iif; int skb_iif;
__u32 hash; __u32 hash;
__be16 vlan_proto; union {
__u16 vlan_tci; u32 vlan_all;
struct {
__be16 vlan_proto;
__u16 vlan_tci;
};
};
#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS) #if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
union { union {
unsigned int napi_id; unsigned int napi_id;
@ -1059,15 +1063,13 @@ struct sk_buff {
#endif #endif
#define PKT_TYPE_OFFSET offsetof(struct sk_buff, __pkt_type_offset) #define PKT_TYPE_OFFSET offsetof(struct sk_buff, __pkt_type_offset)
/* if you move pkt_vlan_present, tc_at_ingress, or mono_delivery_time /* if you move tc_at_ingress or mono_delivery_time
* around, you also must adapt these constants. * around, you also must adapt these constants.
*/ */
#ifdef __BIG_ENDIAN_BITFIELD #ifdef __BIG_ENDIAN_BITFIELD
#define PKT_VLAN_PRESENT_BIT 7
#define TC_AT_INGRESS_MASK (1 << 0) #define TC_AT_INGRESS_MASK (1 << 0)
#define SKB_MONO_DELIVERY_TIME_MASK (1 << 2) #define SKB_MONO_DELIVERY_TIME_MASK (1 << 2)
#else #else
#define PKT_VLAN_PRESENT_BIT 0
#define TC_AT_INGRESS_MASK (1 << 7) #define TC_AT_INGRESS_MASK (1 << 7)
#define SKB_MONO_DELIVERY_TIME_MASK (1 << 5) #define SKB_MONO_DELIVERY_TIME_MASK (1 << 5)
#endif #endif

Просмотреть файл

@ -14346,7 +14346,6 @@ static struct sk_buff *populate_skb(char *buf, int size)
skb->hash = SKB_HASH; skb->hash = SKB_HASH;
skb->queue_mapping = SKB_QUEUE_MAP; skb->queue_mapping = SKB_QUEUE_MAP;
skb->vlan_tci = SKB_VLAN_TCI; skb->vlan_tci = SKB_VLAN_TCI;
skb->vlan_present = SKB_VLAN_PRESENT;
skb->vlan_proto = htons(ETH_P_IP); skb->vlan_proto = htons(ETH_P_IP);
dev_net_set(&dev, &init_net); dev_net_set(&dev, &init_net);
skb->dev = &dev; skb->dev = &dev;

Просмотреть файл

@ -325,11 +325,11 @@ static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
offsetof(struct sk_buff, vlan_tci)); offsetof(struct sk_buff, vlan_tci));
break; break;
case SKF_AD_VLAN_TAG_PRESENT: case SKF_AD_VLAN_TAG_PRESENT:
*insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET); BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_all) != 4);
if (PKT_VLAN_PRESENT_BIT) *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
*insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, PKT_VLAN_PRESENT_BIT); offsetof(struct sk_buff, vlan_all));
if (PKT_VLAN_PRESENT_BIT < 7) *insn++ = BPF_JMP_IMM(BPF_JEQ, dst_reg, 0, 1);
*insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1); *insn++ = BPF_ALU32_IMM(BPF_MOV, dst_reg, 1);
break; break;
} }
@ -9290,13 +9290,11 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
break; break;
case offsetof(struct __sk_buff, vlan_present): case offsetof(struct __sk_buff, vlan_present):
*target_size = 1; *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
*insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff,
PKT_VLAN_PRESENT_OFFSET); vlan_all, 4, target_size));
if (PKT_VLAN_PRESENT_BIT) *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
*insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, PKT_VLAN_PRESENT_BIT); *insn++ = BPF_ALU32_IMM(BPF_MOV, si->dst_reg, 1);
if (PKT_VLAN_PRESENT_BIT < 7)
*insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1);
break; break;
case offsetof(struct __sk_buff, vlan_tci): case offsetof(struct __sk_buff, vlan_tci):