Add tunnel hdr_size and tunnel proto fields in nf_flowtable_ctx struct in order to store IP tunnel header size and protocol used during IPIP and IP6IP6 tunnel sw offloading decapsulation and avoid recomputing them during tunnel header pop since this is constant for IPv6.
Signed-off-by: Lorenzo Bianconi <[email protected]> --- net/netfilter/nf_flow_table_ip.c | 41 ++++++++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c index 8d3fbeaca2df110180414d44b28475adce8724ae..7d86653478c39cce2e321f3df73dbfde6f7c3e33 100644 --- a/net/netfilter/nf_flow_table_ip.c +++ b/net/netfilter/nf_flow_table_ip.c @@ -142,6 +142,18 @@ static bool ip_has_options(unsigned int thoff) return thoff != sizeof(struct iphdr); } +struct nf_flowtable_ctx { + const struct net_device *in; + u32 offset; + u32 hdrsize; + struct { + /* Tunnel IP header size */ + u32 hdr_size; + /* IP tunnel protocol */ + u8 proto; + } tun; +}; + static void nf_flow_tuple_encap(struct sk_buff *skb, struct flow_offload_tuple *tuple) { @@ -184,12 +196,6 @@ static void nf_flow_tuple_encap(struct sk_buff *skb, } } -struct nf_flowtable_ctx { - const struct net_device *in; - u32 offset; - u32 hdrsize; -}; - static int nf_flow_tuple_ip(struct nf_flowtable_ctx *ctx, struct sk_buff *skb, struct flow_offload_tuple *tuple) { @@ -311,20 +317,22 @@ static bool nf_flow_ip4_tunnel_proto(struct nf_flowtable_ctx *ctx, if (iph->ttl <= 1) return false; - if (iph->protocol == IPPROTO_IPIP) + if (iph->protocol == IPPROTO_IPIP) { + ctx->tun.proto = IPPROTO_IPIP; + ctx->tun.hdr_size = size; ctx->offset += size; + } return true; } -static void nf_flow_ip4_tunnel_pop(struct sk_buff *skb) +static void nf_flow_ip4_tunnel_pop(struct nf_flowtable_ctx *ctx, + struct sk_buff *skb) { - struct iphdr *iph = (struct iphdr *)skb_network_header(skb); - - if (iph->protocol != IPPROTO_IPIP) + if (ctx->tun.proto != IPPROTO_IPIP) return; - skb_pull(skb, iph->ihl << 2); + skb_pull(skb, ctx->tun.hdr_size); skb_reset_network_header(skb); } @@ -362,7 +370,8 @@ static bool nf_flow_skb_encap_protocol(struct nf_flowtable_ctx *ctx, return ret; } -static void nf_flow_encap_pop(struct sk_buff *skb, +static void nf_flow_encap_pop(struct nf_flowtable_ctx *ctx, + struct sk_buff *skb, struct flow_offload_tuple_rhash *tuplehash) { struct vlan_hdr *vlan_hdr; @@ -389,7 +398,7 @@ static void nf_flow_encap_pop(struct sk_buff *skb, } if (skb->protocol == htons(ETH_P_IP)) - nf_flow_ip4_tunnel_pop(skb); + nf_flow_ip4_tunnel_pop(ctx, skb); } struct nf_flow_xmit { @@ -459,7 +468,7 @@ static int nf_flow_offload_forward(struct nf_flowtable_ctx *ctx, flow_offload_refresh(flow_table, flow, false); - nf_flow_encap_pop(skb, tuplehash); + nf_flow_encap_pop(ctx, skb, tuplehash); thoff -= ctx->offset; iph = ip_hdr(skb); @@ -874,7 +883,7 @@ static int nf_flow_offload_ipv6_forward(struct nf_flowtable_ctx *ctx, flow_offload_refresh(flow_table, flow, false); - nf_flow_encap_pop(skb, tuplehash); + nf_flow_encap_pop(ctx, skb, tuplehash); ip6h = ipv6_hdr(skb); nf_flow_nat_ipv6(flow, skb, dir, ip6h); -- 2.52.0
