On Fri, May 18, 2018 at 5:49 PM, Greg Rose <gvrose8...@gmail.com> wrote:
> From: William Tu <u9012...@gmail.com>
>
> Upstream commit:
>     commit 1d7e2ed22f8d9171fa8b629754022f22115b3f03
>     Author: William Tu <u9012...@gmail.com>
>     Date:   Wed Dec 13 16:38:55 2017 -0800
>
>     net: erspan: refactor existing erspan code
>
>     The patch refactors the existing erspan implementation in order
>     to support erspan version 2, which has additional metadata.  So, in
>     stead of having one 'struct erspanhdr' holding erspan version 1,
>     breaks it into 'struct erspan_base_hdr' and 'struct erspan_metadata'.
>
>     Signed-off-by: William Tu <u9012...@gmail.com>
>     Signed-off-by: David S. Miller <da...@davemloft.net>
>
> Partial of the upstream commit.  While doing backports it is pretty
> much impossible to fully reconstitute all upstream commits but we're
> doing our best.  Other parts of this commit are introduced in the
> upcoming monster patch for ip6 gre support.

I agree it's better breaking it into separate part for ipv4 and ipv6.
I will review the ip6 gre part.

>
> Cc: William Tu <u9012...@gmail.com>
> Signed-off-by: Greg Rose <gvrose8...@gmail.com>
> ---

Acked-by: William Tu <u9012...@gmail.com>

>  datapath/linux/compat/ip_gre.c | 19 +++++++++++++------
>  1 file changed, 13 insertions(+), 6 deletions(-)
>
> diff --git a/datapath/linux/compat/ip_gre.c b/datapath/linux/compat/ip_gre.c
> index 7160e37..dbe86c4 100644
> --- a/datapath/linux/compat/ip_gre.c
> +++ b/datapath/linux/compat/ip_gre.c
> @@ -190,23 +190,26 @@ static int erspan_rcv(struct sk_buff *skb, struct 
> tnl_ptk_info *tpi,
>                       int gre_hdr_len)
>  {
>         struct net *net = dev_net(skb->dev);
> -       struct metadata_dst tun_dst;
> +       struct metadata_dst tun_dst = NULL;

nit: this line is removed in previous patch and added back here.


> +       struct erspan_base_hdr *ershdr;
> +       struct erspan_metadata *pkt_md;
>         struct ip_tunnel_net *itn;
>         struct ip_tunnel *tunnel;
> -       struct erspanhdr *ershdr;
>         const struct iphdr *iph;
> -       __be32 index;
> +       int ver;
>         int len;
>
>         itn = net_generic(net, erspan_net_id);
>         iph = ip_hdr(skb);
>         len = gre_hdr_len + sizeof(*ershdr);
>
> +       /* Check based hdr len */
>         if (unlikely(!pskb_may_pull(skb, len)))
>                 return -ENOMEM;
>
>         iph = ip_hdr(skb);
> -       ershdr = (struct erspanhdr *)(skb->data + gre_hdr_len);
> +       ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
> +       ver = (ntohs(ershdr->ver_vlan) & VER_MASK) >> VER_OFFSET;
>
>         /* The original GRE header does not have key field,
>          * Use ERSPAN 10-bit session ID as key.
> @@ -218,8 +221,12 @@ static int erspan_rcv(struct sk_buff *skb, struct 
> tnl_ptk_info *tpi,
>                                   iph->saddr, iph->daddr, 0);
>
>         if (tunnel) {
> +               len = gre_hdr_len + erspan_hdr_len(ver);
> +               if (unlikely(!pskb_may_pull(skb, len)))
> +                       return -ENOMEM;
> +
>                 if (__iptunnel_pull_header(skb,
> -                                          gre_hdr_len + sizeof(*ershdr),
> +                                          len,
>                                            htons(ETH_P_TEB),
>                                            false, false) < 0)
>                         goto drop;
> @@ -1000,7 +1007,7 @@ static int erspan_tunnel_init(struct net_device *dev)
>         tunnel->tun_hlen = 8;
>         tunnel->parms.iph.protocol = IPPROTO_GRE;
>         tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
> -                      sizeof(struct erspanhdr);
> +                      sizeof(struct erspan_base_hdr) + ERSPAN_V1_MDSIZE;
>         t_hlen = tunnel->hlen + sizeof(struct iphdr);
>
>         dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4;
> --
> 1.8.3.1
>
_______________________________________________
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to