Add graph node to the IP lookup feature arc. See attached code, from a 
known-to-work example. Program src routes into a non-default FIB.

HTH... Dave

From: vpp-dev@lists.fd.io <vpp-dev@lists.fd.io> On Behalf Of 
georgi.mel...@gmail.com
Sent: Monday, August 27, 2018 1:28 AM
To: vpp-dev@lists.fd.io
Subject: [vpp-dev] Source Based Routing #vpp

Hi VPP experts,

I would like to shed light on a particular scenario/usecase that I'm trying to 
implement in VPP.

The scenario demands egress packet routing to be done based on the source IP of 
the packet rather than the destination IP. I found a similar discussion in the 
VPP mail 
archive(https://www.mail-archive.com/vpp-dev@lists.fd.io/msg06886.html), but 
the solution discussed there would not be applicable for routing multiple 
source IP packets having different routes.

I understand that we can configure multiple routing table in VPP with unique 
routes in them, but would I be able to make the FIB lookup towards a particular 
table based on source IP?

If I take an analogy from Linux kernel, does VPP support functionality similar 
to 'ip rule' command, wherein we can specify a routing table to be used for a 
particular source IP.

Looking forward to your advice and support in finding a solution to this.

Thanks & Regards,
Georgi
/* *INDENT-OFF* */
VNET_FEATURE_INIT (ip4_sdp_slookup, static) =
{
  .arc_name = "ip4-unicast",
  .node_name = "ip4-sdp-slookup",
  .runs_before = VNET_FEATURES ("ip4-lookup"),
};
/* *INDENT-ON */

/* *INDENT-OFF* */
VNET_FEATURE_INIT (ip6_sdp_slookup, static) =
{
  .arc_name = "ip6-unicast",
  .node_name = "ip6-sdp-slookup",
  .runs_before = VNET_FEATURES ("ip6-lookup"),
};
/* *INDENT-ON */


int
sdp_src_lookup_enable_disable (sdp_main_t * sm,
                               u32 sw_if_index,
                               u8 enable_ip4,
                               u8 enable_ip6, u32 ip4_vrf_id, u32 ip6_vrf_id)
{
  vnet_sw_interface_t *sw;
  int rv = 0;
  u32 ip4_fib_index = 0, ip6_fib_index = 0;

  /* Not a physical port? */
  sw = vnet_get_sw_interface (sm->vnet_main, sw_if_index);
  if (sw->type != VNET_SW_INTERFACE_TYPE_HARDWARE)
    return VNET_API_ERROR_INVALID_SW_IF_INDEX;

  if (enable_ip4)
    {
      ip4_fib_index = fib_table_find (FIB_PROTOCOL_IP4, ip4_vrf_id);

      if (ip4_fib_index == ~0)
        return VNET_API_ERROR_NO_SUCH_FIB;

      vec_validate (sm->ip4_fib_index_by_sw_if_index, sw_if_index);
      sm->ip4_fib_index_by_sw_if_index[sw_if_index] = ip4_fib_index;
    }
  vnet_feature_enable_disable ("ip4-unicast", "ip4-sdp-slookup",
                               sw_if_index, enable_ip4, 0, 0);
  if (enable_ip6)
    {
      ip6_fib_index = fib_table_find (FIB_PROTOCOL_IP6, ip4_vrf_id);

      if (ip6_fib_index == ~0)
        return VNET_API_ERROR_NO_SUCH_INNER_FIB;

      vec_validate (sm->ip6_fib_index_by_sw_if_index, sw_if_index);
      sm->ip6_fib_index_by_sw_if_index[sw_if_index] = ip6_fib_index;
    }
  vnet_feature_enable_disable ("ip6-unicast", "ip6-sdp-slookup",
                               sw_if_index, enable_ip6, 0, 0);

  return rv;
}

/*
 * node.c - sdp plugin dst lookup, policy enforcement
 *
 * Copyright (c) <current-year> <your-organization>
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
#include <vlib/vlib.h>
#include <vnet/vnet.h>
#include <vnet/pg/pg.h>
#include <vppinfra/error.h>
#include <sdp/sdp.h>
#include <vnet/fib/ip4_fib.h>
#include <vnet/fib/ip6_fib.h>

typedef struct
{
  u32 next_index;
  u32 policy;
} sdp_slookup_trace_t;

/* packet trace format function */
#ifndef CLIB_MARCH_VARIANT
static u8 *
format_sdp_slookup_trace (u8 * s, va_list * args)
{
  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
  sdp_slookup_trace_t *t = va_arg (*args, sdp_slookup_trace_t *);

  s = format (s, "SDP src lookup: next index %d policy 0x%08x (%d)",
              t->next_index, t->policy, t->policy);
  return s;
}
#endif

vlib_node_registration_t sdp_slookup_node;

#define foreach_sdp_slookup_error \
_(SWAPPED, "Mac swap packets processed")

typedef enum
{
#define _(sym,str) SDP_SLOOKUP_ERROR_##sym,
  foreach_sdp_slookup_error
#undef _
    SDP_SLOOKUP_N_ERROR,
} sdp_slookup_error_t;

#ifndef CLIB_MARCH_VARIANT
static char *sdp_slookup_error_strings[] = {
#define _(sym,string) string,
  foreach_sdp_slookup_error
#undef _
};
#endif

/**
 * Add the drop arc for packets that don't pass muster.
 * All other arcs are automatically added when the SDP DPO is stacked onto
 * the various parent DPO types
 */
typedef enum
{
  SDP_SLOOKUP_NEXT_DROP,
  SDP_SLOOKUP_N_NEXT,
} sdp_slookup_next_t;

always_inline uword
ip46_sdp_slookup_inline (vlib_main_t * vm,
                         vlib_node_runtime_t * node, vlib_frame_t * frame,
                         int is_ip4, int is_traced)
{
  u32 n_left_from, *from;
  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
  u16 nexts[VLIB_FRAME_SIZE], *next;
  sdp_main_t *sm = &sdp_main;
  ip4_main_t *im4 = &ip4_main;
  ip_lookup_main_t *lm4 = &im4->lookup_main;
  u8 arc4 = lm4->ucast_feature_arc_index;
  vnet_feature_config_main_t *fcm4;

  ASSERT (is_ip4);

  fcm4 = vnet_feature_get_config_main (arc4);

  from = vlib_frame_vector_args (frame);
  n_left_from = frame->n_vectors;

  vlib_get_buffers (vm, from, bufs, n_left_from);
  b = bufs;
  next = nexts;

  while (n_left_from >= 4)
    {
      sdp_dpo_t *sdp0, *sdp1, *sdp2, *sdp3;
      ip4_header_t *ip0, *ip1, *ip2, *ip3;
      u32 next0, next1, next2, next3;
      const load_balance_t *lb0, *lb1, *lb2, *lb3;
      ip4_fib_mtrie_t *mtrie0, *mtrie1, *mtrie2, *mtrie3;
      ip4_fib_mtrie_leaf_t leaf0, leaf1, leaf2, leaf3;
      ip4_address_t *src_addr0, *src_addr1, *src_addr2, *src_addr3;
      u32 lbi0, lbi1, lbi2, lbi3;
      u32 fib_index0, fib_index1, fib_index2, fib_index3;
      flow_hash_config_t flow_hash_config0, flow_hash_config1,
        flow_hash_config2, flow_hash_config3;
      const dpo_id_t *dpo0, *dpo1, *dpo2, *dpo3;
      u32 hash_c0, hash_c1, hash_c2, hash_c3;
      
      if (PREDICT_TRUE (n_left_from >= 8))
        {
          vlib_prefetch_buffer_header (b[4], STORE);
          vlib_prefetch_buffer_header (b[5], STORE);
          vlib_prefetch_buffer_header (b[6], STORE);
          vlib_prefetch_buffer_header (b[7], STORE);
          
          CLIB_PREFETCH (b[4]->data, sizeof (ip0[0]), LOAD);
          CLIB_PREFETCH (b[5]->data, sizeof (ip0[0]), LOAD);
          CLIB_PREFETCH (b[6]->data, sizeof (ip0[0]), LOAD);
          CLIB_PREFETCH (b[7]->data, sizeof (ip0[0]), LOAD);
        }
      
      /* Send pkt to next feature */
      vnet_get_config_data (&fcm4->config_main,
                            &b[0]->current_config_index, &next0,
                            /* # bytes of config data */ 0);
      vnet_get_config_data (&fcm4->config_main,
                            &b[1]->current_config_index, &next1,
                            /* # bytes of config data */ 0);
      vnet_get_config_data (&fcm4->config_main,
                            &b[2]->current_config_index, &next2,
                            /* # bytes of config data */ 0);
      vnet_get_config_data (&fcm4->config_main,
                            &b[3]->current_config_index, &next3,
                            /* # bytes of config data */ 0);

      /* This sucks slightly */
      next[0] = next0;
      next[1] = next1;
      next[2] = next2;
      next[3] = next3;

      ip0 = vlib_buffer_get_current (b[0]);
      ip1 = vlib_buffer_get_current (b[1]);
      ip2 = vlib_buffer_get_current (b[2]);
      ip3 = vlib_buffer_get_current (b[3]);

      /* Do a src address lookup */
      src_addr0 = &ip0->src_address;
      src_addr1 = &ip1->src_address;
      src_addr2 = &ip2->src_address;
      src_addr3 = &ip3->src_address;
        
      /*
       * Src lookup in the configured fib, almost certainly NOT
       * the normal forwarding fib for the interface
       */
      fib_index0 = sm->ip4_fib_index_by_sw_if_index
        [vnet_buffer (b[0])->sw_if_index[VLIB_RX]];
      fib_index1 = sm->ip4_fib_index_by_sw_if_index
        [vnet_buffer (b[1])->sw_if_index[VLIB_RX]];
      fib_index2 = sm->ip4_fib_index_by_sw_if_index
        [vnet_buffer (b[2])->sw_if_index[VLIB_RX]];
      fib_index3 = sm->ip4_fib_index_by_sw_if_index
        [vnet_buffer (b[3])->sw_if_index[VLIB_RX]];
        
      mtrie0 = &ip4_fib_get (fib_index0)->mtrie;
      leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, src_addr0);
      leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, src_addr0, 2);
      leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, src_addr0, 3);
      lbi0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);

      mtrie1 = &ip4_fib_get (fib_index1)->mtrie;
      leaf1 = ip4_fib_mtrie_lookup_step_one (mtrie1, src_addr1);
      leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, src_addr1, 2);
      leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, src_addr1, 3);
      lbi1 = ip4_fib_mtrie_leaf_get_adj_index (leaf1);

      mtrie2 = &ip4_fib_get (fib_index2)->mtrie;
      leaf2 = ip4_fib_mtrie_lookup_step_one (mtrie2, src_addr2);
      leaf2 = ip4_fib_mtrie_lookup_step (mtrie2, leaf2, src_addr2, 2);
      leaf2 = ip4_fib_mtrie_lookup_step (mtrie2, leaf2, src_addr2, 3);
      lbi2 = ip4_fib_mtrie_leaf_get_adj_index (leaf2);

      mtrie3 = &ip4_fib_get (fib_index3)->mtrie;
      leaf3 = ip4_fib_mtrie_lookup_step_one (mtrie3, src_addr3);
      leaf3 = ip4_fib_mtrie_lookup_step (mtrie3, leaf3, src_addr3, 2);
      leaf3 = ip4_fib_mtrie_lookup_step (mtrie3, leaf3, src_addr3, 3);
      lbi3 = ip4_fib_mtrie_leaf_get_adj_index (leaf3);

      lb0 = load_balance_get (lbi0);
      lb1 = load_balance_get (lbi1);
      lb2 = load_balance_get (lbi2);
      lb3 = load_balance_get (lbi3);

      /* Use flow hash to compute multipath adjacency. */
      hash_c0 = vnet_buffer (b[0])->ip.flow_hash = 0;
      if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
        {
          flow_hash_config0 = lb0->lb_hash_config;

          hash_c0 = vnet_buffer (b[0])->ip.flow_hash =
            ip4_compute_flow_hash (ip0, flow_hash_config0);
          dpo0 =
            load_balance_get_fwd_bucket (lb0,
                                         (hash_c0 &
                                          (lb0->lb_n_buckets_minus_1)));
        }
      else
        dpo0 = load_balance_get_bucket_i (lb0, 0);

      hash_c1 = vnet_buffer (b[1])->ip.flow_hash = 0;
      if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
        {
          flow_hash_config1 = lb1->lb_hash_config;

          hash_c1 = vnet_buffer (b[1])->ip.flow_hash =
            ip4_compute_flow_hash (ip1, flow_hash_config1);
          dpo1 =
            load_balance_get_fwd_bucket (lb1,
                                         (hash_c1 &
                                          (lb1->lb_n_buckets_minus_1)));
        }
      else
        dpo1 = load_balance_get_bucket_i (lb1, 0);

      hash_c2 = vnet_buffer (b[2])->ip.flow_hash = 2;
      if (PREDICT_FALSE (lb2->lb_n_buckets > 1))
        {
          flow_hash_config2 = lb2->lb_hash_config;

          hash_c2 = vnet_buffer (b[2])->ip.flow_hash =
            ip4_compute_flow_hash (ip2, flow_hash_config2);
          dpo2 =
            load_balance_get_fwd_bucket (lb2,
                                         (hash_c2 &
                                          (lb2->lb_n_buckets_minus_1)));
        }
      else
        dpo2 = load_balance_get_bucket_i (lb2, 0);

      hash_c3 = vnet_buffer (b[3])->ip.flow_hash = 0;
      if (PREDICT_FALSE (lb3->lb_n_buckets > 1))
        {
          flow_hash_config3 = lb3->lb_hash_config;

          hash_c3 = vnet_buffer (b[3])->ip.flow_hash =
            ip4_compute_flow_hash (ip3, flow_hash_config3);
          dpo3 =
            load_balance_get_fwd_bucket (lb3,
                                         (hash_c3 &
                                          (lb3->lb_n_buckets_minus_1)));
        }
      else
        dpo3 = load_balance_get_bucket_i (lb3, 0);

      /*
       * If the lookup returns something other than our
       * interposed dpo, supply default src policy bits
       * $$$ configurable default policy.
       */
      if (PREDICT_FALSE (dpo0->dpoi_type != sdp_dpo_type))
        vnet_buffer (b[0])->ip.sdp.src_policy = 0xFFFFFFFF;
      else
        {
          /* Save the src policy in buffer metadata */
          sdp0 = sdp_dpo_get (dpo0->dpoi_index);
          vnet_buffer (b[0])->ip.sdp.src_policy = sdp0->sdp_policy;
        }
      if (PREDICT_FALSE (dpo1->dpoi_type != sdp_dpo_type))
        vnet_buffer (b[1])->ip.sdp.src_policy = 0xFFFFFFFF;
      else
        {
          /* Save the src policy in buffer metadata */
          sdp1 = sdp_dpo_get (dpo1->dpoi_index);
          vnet_buffer (b[1])->ip.sdp.src_policy = sdp1->sdp_policy;
        }
      if (PREDICT_FALSE (dpo2->dpoi_type != sdp_dpo_type))
        vnet_buffer (b[2])->ip.sdp.src_policy = 0xFFFFFFFF;
      else
        {
          /* Save the src policy in buffer metadata */
          sdp2 = sdp_dpo_get (dpo2->dpoi_index);
          vnet_buffer (b[2])->ip.sdp.src_policy = sdp2->sdp_policy;
        }

      if (PREDICT_FALSE (dpo3->dpoi_type != sdp_dpo_type))
        vnet_buffer (b[3])->ip.sdp.src_policy = 0xFFFFFFFF;
      else
        {
          /* Save the src policy in buffer metadata */
          sdp3 = sdp_dpo_get (dpo3->dpoi_index);
          vnet_buffer (b[3])->ip.sdp.src_policy = sdp3->sdp_policy;
        }

      if (is_traced)
        {
          if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
            {
              sdp_slookup_trace_t *t =
                vlib_add_trace (vm, node, b[0], sizeof (*t));
              t->next_index = next0;
              t->policy = vnet_buffer (b[0])->ip.sdp.src_policy;
            }
          if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
            {
              sdp_slookup_trace_t *t =
                vlib_add_trace (vm, node, b[1], sizeof (*t));
              t->next_index = next1;
              t->policy = vnet_buffer (b[1])->ip.sdp.src_policy;
            }
          if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
            {
              sdp_slookup_trace_t *t =
                vlib_add_trace (vm, node, b[2], sizeof (*t));
              t->next_index = next2;
              t->policy = vnet_buffer (b[2])->ip.sdp.src_policy;
            }
          if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
            {
              sdp_slookup_trace_t *t =
                vlib_add_trace (vm, node, b[3], sizeof (*t));
              t->next_index = next3;
              t->policy = vnet_buffer (b[3])->ip.sdp.src_policy;
            }
        }

      b += 4;
      next += 4;
      n_left_from -= 4;
    }
  while (n_left_from > 0)
    {
      sdp_dpo_t *sdp0;
      ip4_header_t *ip0;
      u32 next0;
      const load_balance_t *lb0;
      ip4_fib_mtrie_t *mtrie0;
      ip4_fib_mtrie_leaf_t leaf0;
      ip4_address_t *src_addr0;
      u32 lbi0;
      u32 fib_index0;
      flow_hash_config_t flow_hash_config0;
      const dpo_id_t *dpo0;
      u32 hash_c0;

          /* Send pkt to next feature */
      vnet_get_config_data (&fcm4->config_main,
                            &b[0]->current_config_index, &next0,
                            /* # bytes of config data */ 0);

      next[0] = next0;

      ip0 = vlib_buffer_get_current (b[0]);
      /* Do a src address lookup */
      src_addr0 = &ip0->src_address;

      /*
       * Src lookup in the configured fib, almost certainly NOT
       * the normal forwarding fib for the interface
       */
      fib_index0 = sm->ip4_fib_index_by_sw_if_index
        [vnet_buffer (b[0])->sw_if_index[VLIB_RX]];

      mtrie0 = &ip4_fib_get (fib_index0)->mtrie;
      leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, src_addr0);
      leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, src_addr0, 2);
      leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, src_addr0, 3);
      lbi0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);

      ASSERT (lbi0);
      lb0 = load_balance_get (lbi0);

      ASSERT (lb0->lb_n_buckets > 0);
      ASSERT (is_pow2 (lb0->lb_n_buckets));

      /* Use flow hash to compute multipath adjacency. */
      hash_c0 = vnet_buffer (b[0])->ip.flow_hash = 0;
      if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
        {
          flow_hash_config0 = lb0->lb_hash_config;

          hash_c0 = vnet_buffer (b[0])->ip.flow_hash =
            ip4_compute_flow_hash (ip0, flow_hash_config0);
          dpo0 =
            load_balance_get_fwd_bucket (lb0,
                                         (hash_c0 &
                                          (lb0->lb_n_buckets_minus_1)));
        }
      else
        {
          dpo0 = load_balance_get_bucket_i (lb0, 0);
        }

      /*
       * If the lookup returns something other than our
       * interposed dpo, supply default src policy bits
       * $$$ configurable default policy.
       */
      if (PREDICT_FALSE (dpo0->dpoi_type != sdp_dpo_type))
        vnet_buffer (b[0])->ip.sdp.src_policy = 0xFFFFFFFF;
      else
        {
          /* Save the src policy in buffer metadata */
          sdp0 = sdp_dpo_get (dpo0->dpoi_index);
          vnet_buffer (b[0])->ip.sdp.src_policy = sdp0->sdp_policy;
        }

      if (is_traced)
        {
          if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
            {
              sdp_slookup_trace_t *t =
                vlib_add_trace (vm, node, b[0], sizeof (*t));
              t->next_index = next0;
              t->policy = vnet_buffer (b[0])->ip.sdp.src_policy;
            }
        }

      b += 1;
      next += 1;
      n_left_from -= 1;
    }

  vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);

  return frame->n_vectors;
}

VLIB_NODE_FN (ip4_sdp_slookup_node) (vlib_main_t * vm, vlib_node_runtime_t * 
node,
                                     vlib_frame_t * frame)
{
  if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
    return ip46_sdp_slookup_inline (vm, node, frame, 1 /* is_ip4 */ ,
                                    1 /* is_traced */ );
  else
    return ip46_sdp_slookup_inline (vm, node, frame, 1 /* is_ip4 */ ,
                                    0 /* is_traced */ );
}


/* *INDENT-OFF* */
#ifndef CLIB_MARCH_VARIANT
VLIB_REGISTER_NODE (ip4_sdp_slookup_node) =
{
  .name = "ip4-sdp-slookup",
  .vector_size = sizeof (u32),
  .format_trace = format_sdp_slookup_trace,
  .type = VLIB_NODE_TYPE_INTERNAL,

  .n_errors = ARRAY_LEN(sdp_slookup_error_strings),
  .error_strings = sdp_slookup_error_strings,

  .n_next_nodes = SDP_SLOOKUP_N_NEXT,

  /* edit / add dispositions here */
  .next_nodes =
  {
    [SDP_SLOOKUP_NEXT_DROP] = "ip4-drop",
  },
};
#endif
/* *INDENT-ON* */

VLIB_NODE_FN(ip6_sdp_slookup_node) (vlib_main_t * vm, vlib_node_runtime_t * 
node,
                                    vlib_frame_t * frame)
{
  if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
    return ip46_sdp_slookup_inline (vm, node, frame, 0 /* is_ip4 */ ,
                                    1 /* is_traced */ );
  else
    return ip46_sdp_slookup_inline (vm, node, frame, 0 /* is_ip4 */ ,
                                    0 /* is_traced */ );
}

/* *INDENT-OFF* */
#ifndef CLIB_MARCH_VARIANT
VLIB_REGISTER_NODE (ip6_sdp_slookup_node) =
{
  .name = "ip6-sdp-slookup",
  .vector_size = sizeof (u32),
  .format_trace = format_sdp_slookup_trace,
  .type = VLIB_NODE_TYPE_INTERNAL,

  .n_errors = ARRAY_LEN(sdp_slookup_error_strings),
  .error_strings = sdp_slookup_error_strings,

  .n_next_nodes = SDP_SLOOKUP_N_NEXT,

  /* edit / add dispositions here */
  .next_nodes =
  {
    [SDP_SLOOKUP_NEXT_DROP] = "ip6-drop",
  },
};
#endif
/* *INDENT-ON* */

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */
-=-=-=-=-=-=-=-=-=-=-=-
Links: You receive all messages sent to this group.

View/Reply Online (#10296): https://lists.fd.io/g/vpp-dev/message/10296
Mute This Topic: https://lists.fd.io/mt/24970841/21656
Mute #vpp: https://lists.fd.io/mk?hashtag=vpp&subid=1480452
Group Owner: vpp-dev+ow...@lists.fd.io
Unsubscribe: https://lists.fd.io/g/vpp-dev/unsub  [arch...@mail-archive.com]
-=-=-=-=-=-=-=-=-=-=-=-

Reply via email to