On Thu, May 26, 2022 at 9:54 PM Numan Siddique <[email protected]> wrote:
>
> On Thu, May 26, 2022 at 12:19 PM Ihar Hrachyshka <[email protected]> wrote:
> >
> > When multiple chassis are set in requested-chassis, port binding is
> > configured in multiple cluster locations. In case of live migration
> > scenario, only one of the locations run a workload at a particular
> > point in time. Yet, it's expected that the workload may switch to
> > running at an additional chassis at any moment during live migration
> > (depends on libvirt / qemu migration progress). To speed up the switch
> > to near instant, do the following:
> >
> > When a port located sends a packet to another port that has multiple
> > chassis then, in addition to sending the packet to the main chassis,
> > also send it to additional chassis. When the sending port is bound on
> > either the main or additional chassis, then handle the packet locally
> > plus send it to all other chassis.
> >
> > This is achieved with additional flows in tables 37 and 38.
> >
> > Signed-off-by: Ihar Hrachyshka <[email protected]>
>
> Hi Ihar,
>
> Looks like this patch has  a memory leak.
>
> Please check it out here -
> https://github.com/ovsrobot/ovn/runs/6613074054?check_suite_focus=true
>
> I can see it locally with libasan configured
>
> ----
> Indirect leak of 816 byte(s) in 1 object(s) allocated from:
>     #0 0x7fd26d981ad7 in calloc (/lib64/libasan.so.6+0xaead7)
>     #1 0x60194e in xcalloc__ lib/util.c:121
>     #2 0x60194e in xzalloc__ lib/util.c:131
>     #3 0x60194e in xzalloc lib/util.c:165
>     #4 0x41fed3 in ha_chassis_get_ordered ../controller/ha-chassis.c:227
>     #5 0x4b0f2e in consider_port_binding ../controller/physical.c:1162
>     #6 0x4b4949 in physical_run ../controller/physical.c:1787
>     #7 0x49f263 in en_pflow_output_run ../controller/ovn-controller.c:3053
>     #8 0x508cdd in engine_recompute ../lib/inc-proc-eng.c:377
>     #9 0x509454 in engine_run_node ../lib/inc-proc-eng.c:439
>     #10 0x5096b4 in engine_run ../lib/inc-proc-eng.c:490
>     #11 0x4a66da in main ../controller/ovn-controller.c:3890
>     #12 0x7fd26d145f5f in __libc_start_call_main (/lib64/libc.so.6+0x40f5f)
>
> SUMMARY: AddressSanitizer: 94016 byte(s) leaked in 226 allocation(s).
> ../../tests/ovs-macros.at:234: hard failure
>
> ----
>
> Thanks
> Numan
>
> > ---
> >  controller/binding.c  |   2 +-
> >  controller/binding.h  |   3 +
> >  controller/physical.c | 377 ++++++++++++++---------
> >  ovn-nb.xml            |   9 +
> >  ovn-sb.xml            |   9 +
> >  tests/ovn.at          | 693 ++++++++++++++++++++++++++++++++++++++++++
> >  6 files changed, 949 insertions(+), 144 deletions(-)
> >
> > diff --git a/controller/binding.c b/controller/binding.c
> > index fc553f4f0..fd35e0a94 100644
> > --- a/controller/binding.c
> > +++ b/controller/binding.c
> > @@ -994,7 +994,7 @@ update_port_additional_encap_if_needed(
> >      return true;
> >  }
> >
> > -static bool
> > +bool
> >  is_additional_chassis(const struct sbrec_port_binding *pb,
> >                        const struct sbrec_chassis *chassis_rec)
> >  {
> > diff --git a/controller/binding.h b/controller/binding.h
> > index d5e9502f6..1fed06674 100644
> > --- a/controller/binding.h
> > +++ b/controller/binding.h
> > @@ -175,6 +175,9 @@ void binding_tracked_dp_destroy(struct hmap 
> > *tracked_datapaths);
> >
> >  void binding_dump_local_bindings(struct local_binding_data *, struct ds *);
> >
> > +bool is_additional_chassis(const struct sbrec_port_binding *pb,
> > +                           const struct sbrec_chassis *chassis_rec);
> > +
> >  /* Corresponds to each Port_Binding.type. */
> >  enum en_lport_type {
> >      LP_UNKNOWN,
> > diff --git a/controller/physical.c b/controller/physical.c
> > index 1fae4ef2c..43aeb1007 100644
> > --- a/controller/physical.c
> > +++ b/controller/physical.c
> > @@ -60,6 +60,11 @@ struct zone_ids {
> >      int snat;                   /* MFF_LOG_SNAT_ZONE. */
> >  };
> >
> > +struct tunnel {
> > +    struct ovs_list list_node;
> > +    const struct chassis_tunnel *tun;
> > +};
> > +
> >  static void
> >  load_logical_ingress_metadata(const struct sbrec_port_binding *binding,
> >                                const struct zone_ids *zone_ids,
> > @@ -286,25 +291,83 @@ match_outport_dp_and_port_keys(struct match *match,
> >      match_set_reg(match, MFF_LOG_OUTPORT - MFF_REG0, port_key);
> >  }
> >
> > +static struct sbrec_encap *
> > +find_additional_encap_for_chassis(const struct sbrec_port_binding *pb,
> > +                                  const struct sbrec_chassis *chassis_rec)
> > +{
> > +    for (int i = 0; i < pb->n_additional_encap; i++) {
> > +        if (!strcmp(pb->additional_encap[i]->chassis_name,
> > +                    chassis_rec->name)) {
> > +            return pb->additional_encap[i];
> > +        }
> > +    }
> > +    return NULL;
> > +}
> > +
> > +static struct ovs_list *
> > +get_remote_tunnels(const struct sbrec_port_binding *binding,
> > +                   const struct sbrec_chassis *chassis,
> > +                   const struct hmap *chassis_tunnels)
> > +{
> > +    const struct chassis_tunnel *tun;
> > +
> > +    struct ovs_list *tunnels = xmalloc(sizeof *tunnels);
> > +    ovs_list_init(tunnels);
> > +
> > +    if (binding->chassis && binding->chassis != chassis) {
> > +        tun = get_port_binding_tun(binding->encap, binding->chassis,
> > +                                   chassis_tunnels);
> > +        if (!tun) {
> > +            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
> > +            VLOG_WARN_RL(
> > +                &rl, "Failed to locate tunnel to reach main chassis %s "
> > +                     "for port %s. Cloning packets disabled for the 
> > chassis.",
> > +                binding->chassis->name, binding->logical_port);
> > +        } else {
> > +            struct tunnel *tun_elem = xmalloc(sizeof *tun_elem);
> > +            tun_elem->tun = tun;
> > +            ovs_list_push_back(tunnels, &tun_elem->list_node);
> > +        }
> > +    }
> > +
> > +    for (int i = 0; i < binding->n_additional_chassis; i++) {
> > +        if (binding->additional_chassis[i] == chassis) {
> > +            continue;
> > +        }
> > +        const struct sbrec_encap *additional_encap;
> > +        additional_encap = find_additional_encap_for_chassis(binding, 
> > chassis);
> > +        tun = get_port_binding_tun(additional_encap,
> > +                                   binding->additional_chassis[i],
> > +                                   chassis_tunnels);
> > +        if (!tun) {
> > +            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
> > +            VLOG_WARN_RL(
> > +                &rl, "Failed to locate tunnel to reach additional chassis 
> > %s "
> > +                     "for port %s. Cloning packets disabled for the 
> > chassis.",
> > +                binding->additional_chassis[i]->name, 
> > binding->logical_port);
> > +            continue;
> > +        }
> > +        struct tunnel *tun_elem = xmalloc(sizeof *tun_elem);
> > +        tun_elem->tun = tun;
> > +        ovs_list_push_back(tunnels, &tun_elem->list_node);
> > +    }
> > +    return tunnels;
> > +}
> > +
> >  static void
> > -put_remote_port_redirect_overlay(const struct
> > -                                 sbrec_port_binding *binding,
> > -                                 bool is_ha_remote,
> > -                                 struct ha_chassis_ordered *ha_ch_ordered,
> > +put_remote_port_redirect_overlay(const struct sbrec_port_binding *binding,
> >                                   enum mf_field_id mff_ovn_geneve,
> > -                                 const struct chassis_tunnel *tun,
> >                                   uint32_t port_key,
> >                                   struct match *match,
> >                                   struct ofpbuf *ofpacts_p,
> > +                                 const struct sbrec_chassis *chassis,
> >                                   const struct hmap *chassis_tunnels,
> >                                   struct ovn_desired_flow_table *flow_table)
> >  {
> > -    if (!is_ha_remote) {
> > -        /* Setup encapsulation */
> > -        if (!tun) {
> > -            return;
> > -        }
> > -
> > +    /* Setup encapsulation */
> > +    struct ovs_list *tuns = get_remote_tunnels(binding, chassis,
> > +                                               chassis_tunnels);
> > +    if (!ovs_list_is_empty(tuns)) {
> >          bool is_vtep_port = !strcmp(binding->type, "vtep");
> >          /* rewrite MFF_IN_PORT to bypass OpenFlow loopback check for ARP/ND
> >           * responder in L3 networks. */
> > @@ -312,78 +375,102 @@ put_remote_port_redirect_overlay(const struct
> >              put_load(ofp_to_u16(OFPP_NONE), MFF_IN_PORT, 0, 16, ofpacts_p);
> >          }
> >
> > -        put_encapsulation(mff_ovn_geneve, tun, binding->datapath, port_key,
> > -                          is_vtep_port, ofpacts_p);
> > -        /* Output to tunnel. */
> > -        ofpact_put_OUTPUT(ofpacts_p)->port = tun->ofport;
> > -    } else {
> > -        /* Make sure all tunnel endpoints use the same encapsulation,
> > -         * and set it up */
> > -        for (size_t i = 0; i < ha_ch_ordered->n_ha_ch; i++) {
> > -            const struct sbrec_chassis *ch = 
> > ha_ch_ordered->ha_ch[i].chassis;
> > -            if (!ch) {
> > -                continue;
> > -            }
> > -            if (!tun) {
> > -                tun = chassis_tunnel_find(chassis_tunnels, ch->name, NULL);
> > -            } else {
> > -                struct chassis_tunnel *chassis_tunnel =
> > -                    chassis_tunnel_find(chassis_tunnels, ch->name, NULL);
> > -                if (chassis_tunnel &&
> > -                    tun->type != chassis_tunnel->type) {
> > -                    static struct vlog_rate_limit rl =
> > -                                  VLOG_RATE_LIMIT_INIT(1, 1);
> > -                    VLOG_ERR_RL(&rl, "Port %s has Gateway_Chassis "
> > -                                "with mixed encapsulations, only "
> > -                                "uniform encapsulations are "
> > -                                "supported.", binding->logical_port);
> > -                    return;
> > -                }
> > -            }
> > +        struct tunnel *tun;
> > +        LIST_FOR_EACH (tun, list_node, tuns) {
> > +            put_encapsulation(mff_ovn_geneve, tun->tun,
> > +                              binding->datapath, port_key, is_vtep_port,
> > +                              ofpacts_p);
> > +            ofpact_put_OUTPUT(ofpacts_p)->port = tun->tun->ofport;
> > +        }
> > +        put_resubmit(OFTABLE_LOCAL_OUTPUT, ofpacts_p);
> > +        ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 100,
> > +                        binding->header_.uuid.parts[0], match, ofpacts_p,
> > +                        &binding->header_.uuid);
> > +    }
> > +    struct tunnel *tun_elem;
> > +    LIST_FOR_EACH_POP (tun_elem, list_node, tuns) {
> > +        free(tun_elem);
> > +    }
> > +    free(tuns);
> > +}
> > +
> > +static void
> > +put_remote_port_redirect_overlay_ha_remote(
> > +    const struct sbrec_port_binding *binding,
> > +    struct ha_chassis_ordered *ha_ch_ordered,
> > +    enum mf_field_id mff_ovn_geneve, uint32_t port_key,
> > +    struct match *match, struct ofpbuf *ofpacts_p,
> > +    const struct hmap *chassis_tunnels,
> > +    struct ovn_desired_flow_table *flow_table)
> > +{
> > +    /* Make sure all tunnel endpoints use the same encapsulation,
> > +     * and set it up */
> > +    const struct chassis_tunnel *tun = NULL;
> > +    for (size_t i = 0; i < ha_ch_ordered->n_ha_ch; i++) {
> > +        const struct sbrec_chassis *ch = ha_ch_ordered->ha_ch[i].chassis;
> > +        if (!ch) {
> > +            continue;
> >          }
> >          if (!tun) {
> > -            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
> > -            VLOG_ERR_RL(&rl, "No tunnel endpoint found for HA chassis in "
> > -                        "HA chassis group of port %s",
> > -                        binding->logical_port);
> > -            return;
> > +            tun = chassis_tunnel_find(chassis_tunnels, ch->name, NULL);
> > +        } else {
> > +            struct chassis_tunnel *chassis_tunnel =
> > +                chassis_tunnel_find(chassis_tunnels, ch->name, NULL);
> > +            if (chassis_tunnel &&
> > +                tun->type != chassis_tunnel->type) {
> > +                static struct vlog_rate_limit rl =
> > +                              VLOG_RATE_LIMIT_INIT(1, 1);
> > +                VLOG_ERR_RL(&rl, "Port %s has Gateway_Chassis "
> > +                            "with mixed encapsulations, only "
> > +                            "uniform encapsulations are "
> > +                            "supported.", binding->logical_port);
> > +                return;
> > +            }
> >          }
> > +    }
> > +    if (!tun) {
> > +        static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
> > +        VLOG_ERR_RL(&rl, "No tunnel endpoint found for HA chassis in "
> > +                    "HA chassis group of port %s",
> > +                    binding->logical_port);
> > +        return;
> > +    }
> >
> > -        put_encapsulation(mff_ovn_geneve, tun, binding->datapath, port_key,
> > -                          !strcmp(binding->type, "vtep"),
> > -                          ofpacts_p);
> > +    put_encapsulation(mff_ovn_geneve, tun, binding->datapath, port_key,
> > +                      !strcmp(binding->type, "vtep"),
> > +                      ofpacts_p);
> >
> > -        /* Output to tunnels with active/backup */
> > -        struct ofpact_bundle *bundle = ofpact_put_BUNDLE(ofpacts_p);
> > +    /* Output to tunnels with active/backup */
> > +    struct ofpact_bundle *bundle = ofpact_put_BUNDLE(ofpacts_p);
> >
> > -        for (size_t i = 0; i < ha_ch_ordered->n_ha_ch; i++) {
> > -            const struct sbrec_chassis *ch =
> > -                ha_ch_ordered->ha_ch[i].chassis;
> > -            if (!ch) {
> > -                continue;
> > -            }
> > -            tun = chassis_tunnel_find(chassis_tunnels, ch->name, NULL);
> > -            if (!tun) {
> > -                continue;
> > -            }
> > -            if (bundle->n_members >= BUNDLE_MAX_MEMBERS) {
> > -                static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 
> > 1);
> > -                VLOG_WARN_RL(&rl, "Remote endpoints for port beyond "
> > -                             "BUNDLE_MAX_MEMBERS");
> > -                break;
> > -            }
> > -            ofpbuf_put(ofpacts_p, &tun->ofport, sizeof tun->ofport);
> > -            bundle = ofpacts_p->header;
> > -            bundle->n_members++;
> > +    for (size_t i = 0; i < ha_ch_ordered->n_ha_ch; i++) {
> > +        const struct sbrec_chassis *ch =
> > +            ha_ch_ordered->ha_ch[i].chassis;
> > +        if (!ch) {
> > +            continue;
> >          }
> > -
> > -        bundle->algorithm = NX_BD_ALG_ACTIVE_BACKUP;
> > -        /* Although ACTIVE_BACKUP bundle algorithm seems to ignore
> > -         * the next two fields, those are always set */
> > -        bundle->basis = 0;
> > -        bundle->fields = NX_HASH_FIELDS_ETH_SRC;
> > -        ofpact_finish_BUNDLE(ofpacts_p, &bundle);
> > +        tun = chassis_tunnel_find(chassis_tunnels, ch->name, NULL);
> > +        if (!tun) {
> > +            continue;
> > +        }
> > +        if (bundle->n_members >= BUNDLE_MAX_MEMBERS) {
> > +            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
> > +            VLOG_WARN_RL(&rl, "Remote endpoints for port beyond "
> > +                         "BUNDLE_MAX_MEMBERS");
> > +            break;
> > +        }
> > +        ofpbuf_put(ofpacts_p, &tun->ofport, sizeof tun->ofport);
> > +        bundle = ofpacts_p->header;
> > +        bundle->n_members++;
> >      }
> > +
> > +    bundle->algorithm = NX_BD_ALG_ACTIVE_BACKUP;
> > +    /* Although ACTIVE_BACKUP bundle algorithm seems to ignore
> > +     * the next two fields, those are always set */
> > +    bundle->basis = 0;
> > +    bundle->fields = NX_HASH_FIELDS_ETH_SRC;
> > +    ofpact_finish_BUNDLE(ofpacts_p, &bundle);
> > +
> >      ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 100,
> >                      binding->header_.uuid.parts[0],
> >                      match, ofpacts_p, &binding->header_.uuid);
> > @@ -890,6 +977,13 @@ get_binding_peer(struct ovsdb_idl_index 
> > *sbrec_port_binding_by_name,
> >      return peer;
> >  }
> >
> > +enum access_type {
> > +    PORT_LOCAL = 0,
> > +    PORT_LOCALNET,
> > +    PORT_REMOTE,
> > +    PORT_HA_REMOTE,
> > +};
> > +
> >  static void
> >  consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
> >                        enum mf_field_id mff_ovn_geneve,
> > @@ -952,10 +1046,6 @@ consider_port_binding(struct ovsdb_idl_index 
> > *sbrec_port_binding_by_name,
> >                          &match, ofpacts_p, &binding->header_.uuid);
> >          return;
> >      }
> > -
> > -    struct ha_chassis_ordered *ha_ch_ordered
> > -        = ha_chassis_get_ordered(binding->ha_chassis_group);
> > -
> >      if (!strcmp(binding->type, "chassisredirect")
> >          && (binding->chassis == chassis
> >              || ha_chassis_group_is_active(binding->ha_chassis_group,
> > @@ -1011,14 +1101,14 @@ consider_port_binding(struct ovsdb_idl_index 
> > *sbrec_port_binding_by_name,
> >                          binding->header_.uuid.parts[0],
> >                          &match, ofpacts_p, &binding->header_.uuid);
> >
> > -        goto out;
> > +        return;
> >      }
> >
> >      /* Find the OpenFlow port for the logical port, as 'ofport'.  This is
> >       * one of:
> >       *
> >       *     - If the port is a VIF on the chassis we're managing, the
> > -     *       OpenFlow port for the VIF.  'tun' will be NULL.
> > +     *       OpenFlow port for the VIF.
> >       *
> >       *       The same logic handles ports that OVN implements as Open 
> > vSwitch
> >       *       patch ports, that is, "localnet" and "l2gateway" ports.
> > @@ -1028,20 +1118,15 @@ consider_port_binding(struct ovsdb_idl_index 
> > *sbrec_port_binding_by_name,
> >       *
> >       *       For a localnet or l2gateway patch port, if a VLAN ID was
> >       *       configured, 'tag' is set to that VLAN ID; otherwise 'tag' is 
> > 0.
> > -     *
> > -     *     - If the port is on a remote chassis, the OpenFlow port for a
> > -     *       tunnel to the VIF's remote chassis.  'tun' identifies that
> > -     *       tunnel.
> >       */
> >
> >      int tag = 0;
> >      bool nested_container = false;
> >      const struct sbrec_port_binding *parent_port = NULL;
> >      ofp_port_t ofport;
> > -    bool is_remote = false;
> >      if (binding->parent_port && *binding->parent_port) {
> >          if (!binding->tag) {
> > -            goto out;
> > +            return;
> >          }
> >          ofport = local_binding_get_lport_ofport(local_bindings,
> >                                                  binding->parent_port);
> > @@ -1064,46 +1149,43 @@ consider_port_binding(struct ovsdb_idl_index 
> > *sbrec_port_binding_by_name,
> >                                                  binding->logical_port);
> >          if (ofport && !lport_can_bind_on_this_chassis(chassis, binding)) {
> >              /* Even though there is an ofport for this port_binding, it is
> > -             * requested on a different chassis. So ignore this ofport.
> > +             * requested on different chassis. So ignore this ofport.
> >               */
> >              ofport = 0;
> >          }
> >      }
> >
> > -    bool is_ha_remote = false;
> > -    const struct chassis_tunnel *tun = NULL;
> >      const struct sbrec_port_binding *localnet_port =
> >          get_localnet_port(local_datapaths, dp_key);
> > +
> > +    struct ha_chassis_ordered *ha_ch_ordered;
> > +    ha_ch_ordered = ha_chassis_get_ordered(binding->ha_chassis_group);
> > +
The leak is because of 'ha_ch_ordered' not destroyed.  Below there are
many return's without
destroying 'ha_ch_ordered'.
I'd suggest to use 'goto out' instead of return's and do a cleanup there.

Otherwise the patch LGTM.  Also please use 'size_t' instead of 'int'
just to be consistent.

Thanks
Numan


> > +    /* Determine how the port is accessed. */
> > +    enum access_type access_type = PORT_LOCAL;
> >      if (!ofport) {
> > -        /* It is remote port, may be reached by tunnel or localnet port */
> > -        is_remote = true;
> > -        if (localnet_port) {
> > +        /* Enforce tunneling while we clone packets to additional chassis 
> > b/c
> > +         * otherwise upstream switch won't flood the packet to both 
> > chassis. */
> > +        if (localnet_port && !binding->additional_chassis) {
> >              ofport = u16_to_ofp(simap_get(patch_ofports,
> >                                            localnet_port->logical_port));
> >              if (!ofport) {
> > -                goto out;
> > +                return;
> >              }
> > +            access_type = PORT_LOCALNET;
> >          } else {
> >              if (!ha_ch_ordered || ha_ch_ordered->n_ha_ch < 2) {
> > -                /* It's on a single remote chassis */
> > -                if (!binding->chassis) {
> > -                    goto out;
> > -                }
> > -                tun = get_port_binding_tun(binding->encap, 
> > binding->chassis,
> > -                                           chassis_tunnels);
> > -                if (!tun) {
> > -                    goto out;
> > -                }
> > -                ofport = tun->ofport;
> > +                ha_chassis_destroy_ordered(ha_ch_ordered);
> > +                access_type = PORT_REMOTE;
> >              } else {
> >                  /* It's distributed across the chassis belonging to
> >                   * an HA chassis group. */
> > -                is_ha_remote = true;
> > +                access_type = PORT_HA_REMOTE;
> >              }
> >          }
> >      }
> >
> > -    if (!is_remote) {
> > +    if (access_type == PORT_LOCAL) {
> >          /* Packets that arrive from a vif can belong to a VM or
> >           * to a container located inside that VM. Packets that
> >           * arrive from containers have a tag (vlan) associated with them.
> > @@ -1292,8 +1374,7 @@ consider_port_binding(struct ovsdb_idl_index 
> > *sbrec_port_binding_by_name,
> >                  }
> >              }
> >          }
> > -
> > -    } else if (!tun && !is_ha_remote) {
> > +    } else if (access_type == PORT_LOCALNET) {
> >          /* Remote port connected by localnet port */
> >          /* Table 38, priority 100.
> >           * =======================
> > @@ -1315,39 +1396,37 @@ consider_port_binding(struct ovsdb_idl_index 
> > *sbrec_port_binding_by_name,
> >          ofctrl_add_flow(flow_table, OFTABLE_LOCAL_OUTPUT, 100,
> >                          binding->header_.uuid.parts[0],
> >                          &match, ofpacts_p, &binding->header_.uuid);
> > -    } else {
> >
> > -        const char *redirect_type = smap_get(&binding->options,
> > -                                             "redirect-type");
> > -
> > -        /* Remote port connected by tunnel */
> > +        /* No more tunneling to set up. */
> > +        return;
> > +    }
> >
> > -        /* Table 38, priority 100.
> > -         * =======================
> > -         *
> > -         * Handles traffic that needs to be sent to a remote hypervisor.  
> > Each
> > -         * flow matches an output port that includes a logical port on a 
> > remote
> > -         * hypervisor, and tunnels the packet to that hypervisor.
> > -         */
> > -        ofpbuf_clear(ofpacts_p);
> > +    /* Send packets to additional chassis if needed. */
> > +    const char *redirect_type = smap_get(&binding->options,
> > +                                         "redirect-type");
> >
> > -        /* Match MFF_LOG_DATAPATH, MFF_LOG_OUTPORT. */
> > -        match_outport_dp_and_port_keys(&match, dp_key, port_key);
> > +    /* Table 38, priority 100.
> > +     * =======================
> > +     *
> > +     * Handles traffic that needs to be sent to a remote hypervisor.  Each
> > +     * flow matches an output port that includes a logical port on a remote
> > +     * hypervisor, and tunnels the packet to that hypervisor.
> > +     */
> > +    ofpbuf_clear(ofpacts_p);
> > +    match_outport_dp_and_port_keys(&match, dp_key, port_key);
> >
> > -        if (redirect_type && !strcasecmp(redirect_type, "bridged")) {
> > -            put_remote_port_redirect_bridged(binding, local_datapaths,
> > -                                             ld, &match, ofpacts_p,
> > -                                             flow_table);
> > -        } else {
> > -            put_remote_port_redirect_overlay(binding, is_ha_remote,
> > -                                             ha_ch_ordered, mff_ovn_geneve,
> > -                                             tun, port_key, &match, 
> > ofpacts_p,
> > -                                             chassis_tunnels, flow_table);
> > -        }
> > -    }
> > -out:
> > -    if (ha_ch_ordered) {
> > +    if (redirect_type && !strcasecmp(redirect_type, "bridged")) {
> > +        put_remote_port_redirect_bridged(
> > +            binding, local_datapaths, ld, &match, ofpacts_p, flow_table);
> > +    } else if (access_type == PORT_HA_REMOTE) {
> > +        put_remote_port_redirect_overlay_ha_remote(
> > +            binding, ha_ch_ordered, mff_ovn_geneve, port_key,
> > +            &match, ofpacts_p, chassis_tunnels, flow_table);
> >          ha_chassis_destroy_ordered(ha_ch_ordered);
> > +    } else {
> > +        put_remote_port_redirect_overlay(
> > +            binding, mff_ovn_geneve, port_key, &match, ofpacts_p,
> > +            chassis, chassis_tunnels, flow_table);
> >      }
> >  }
> >
> > @@ -1503,7 +1582,8 @@ consider_mc_group(struct ovsdb_idl_index 
> > *sbrec_port_binding_by_name,
> >              put_load(port->tunnel_key, MFF_LOG_OUTPORT, 0, 32,
> >                       &remote_ofpacts);
> >              put_resubmit(OFTABLE_CHECK_LOOPBACK, &remote_ofpacts);
> > -        } else if (port->chassis == chassis
> > +        } else if ((port->chassis == chassis
> > +                    || is_additional_chassis(port, chassis))
> >                     && (local_binding_get_primary_pb(local_bindings, 
> > lport_name)
> >                         || !strcmp(port->type, "l3gateway"))) {
> >              put_load(port->tunnel_key, MFF_LOG_OUTPORT, 0, 32, &ofpacts);
> > @@ -1526,15 +1606,26 @@ consider_mc_group(struct ovsdb_idl_index 
> > *sbrec_port_binding_by_name,
> >                      put_resubmit(OFTABLE_CHECK_LOOPBACK, &ofpacts);
> >                  }
> >              }
> > -        } else if (port->chassis && !get_localnet_port(
> > -                local_datapaths, mc->datapath->tunnel_key)) {
> > +        } else if (!get_localnet_port(local_datapaths,
> > +                                      mc->datapath->tunnel_key)) {
> >              /* Add remote chassis only when localnet port not exist,
> >               * otherwise multicast will reach remote ports through localnet
> >               * port. */
> > -            if (chassis_is_vtep(port->chassis)) {
> > -                sset_add(&vtep_chassis, port->chassis->name);
> > -            } else {
> > -                sset_add(&remote_chassis, port->chassis->name);
> > +            if (port->chassis) {
> > +                if (chassis_is_vtep(port->chassis)) {
> > +                    sset_add(&vtep_chassis, port->chassis->name);
> > +                } else {
> > +                    sset_add(&remote_chassis, port->chassis->name);
> > +                }
> > +            }
> > +            for (int j = 0; j < port->n_additional_chassis; j++) {
> > +                if (chassis_is_vtep(port->additional_chassis[j])) {
> > +                    sset_add(&vtep_chassis,
> > +                             port->additional_chassis[j]->name);
> > +                } else {
> > +                    sset_add(&remote_chassis,
> > +                             port->additional_chassis[j]->name);
> > +                }
> >              }
> >          }
> >      }
> > diff --git a/ovn-nb.xml b/ovn-nb.xml
> > index 66bedda33..c197f431f 100644
> > --- a/ovn-nb.xml
> > +++ b/ovn-nb.xml
> > @@ -1034,6 +1034,15 @@
> >              main chassis and the rest are one or more additional chassis 
> > that
> >              are allowed to bind the same port.
> >            </p>
> > +
> > +          <p>
> > +            When multiple chassis are set for the port, and the logical 
> > switch
> > +            is connected to an external network through a 
> > <code>localnet</code>
> > +            port, tunneling is enforced for the port to guarantee delivery 
> > of
> > +            packets directed to the port to all its locations. This has MTU
> > +            implications because the network used for tunneling must have 
> > MTU
> > +            larger than <code>localnet</code> for stable connectivity.
> > +          </p>
> >          </column>
> >
> >          <column name="options" key="iface-id-ver">
> > diff --git a/ovn-sb.xml b/ovn-sb.xml
> > index e231da302..2dc0d5bea 100644
> > --- a/ovn-sb.xml
> > +++ b/ovn-sb.xml
> > @@ -3343,6 +3343,15 @@ tcp.flags = RST;
> >            chassis and the rest are one or more additional chassis that are
> >            allowed to bind the same port.
> >          </p>
> > +
> > +        <p>
> > +          When multiple chassis are set for the port, and the logical 
> > switch
> > +          is connected to an external network through a 
> > <code>localnet</code>
> > +          port, tunneling is enforced for the port to guarantee delivery of
> > +          packets directed to the port to all its locations. This has MTU
> > +          implications because the network used for tunneling must have MTU
> > +          larger than <code>localnet</code> for stable connectivity.
> > +        </p>
> >        </column>
> >
> >        <column name="options" key="iface-id-ver">
> > diff --git a/tests/ovn.at b/tests/ovn.at
> > index a9b623d1d..c75705e0c 100644
> > --- a/tests/ovn.at
> > +++ b/tests/ovn.at
> > @@ -14218,6 +14218,699 @@ OVN_CLEANUP([hv1],[hv2],[hv3])
> >  AT_CLEANUP
> >  ])
> >
> > +OVN_FOR_EACH_NORTHD([
> > +AT_SETUP([basic connectivity with multiple requested-chassis])
> > +ovn_start
> > +
> > +net_add n1
> > +for i in 1 2 3; do
> > +    sim_add hv$i
> > +    as hv$i
> > +    check ovs-vsctl add-br br-phys
> > +    ovn_attach n1 br-phys 192.168.0.$i
> > +done
> > +
> > +# Disable local ARP responder to pass ARP requests through tunnels
> > +check ovn-nbctl ls-add ls0 -- add Logical_Switch ls0 other_config 
> > vlan-passthru=true
> > +
> > +check ovn-nbctl lsp-add ls0 first
> > +check ovn-nbctl lsp-add ls0 second
> > +check ovn-nbctl lsp-add ls0 third
> > +check ovn-nbctl lsp-add ls0 migrator
> > +check ovn-nbctl lsp-set-addresses first "00:00:00:00:00:01 10.0.0.1"
> > +check ovn-nbctl lsp-set-addresses second "00:00:00:00:00:02 10.0.0.2"
> > +check ovn-nbctl lsp-set-addresses third "00:00:00:00:00:03 10.0.0.3"
> > +check ovn-nbctl lsp-set-addresses migrator "00:00:00:00:00:ff 10.0.0.100"
> > +
> > +# The test scenario will migrate Migrator port between hv1 and hv2 and 
> > check
> > +# that connectivity to and from the port is functioning properly for both
> > +# chassis locations. Connectivity will be checked for resources located at 
> > hv1
> > +# (First) and hv2 (Second) as well as for hv3 (Third) that does not take 
> > part
> > +# in port migration.
> > +check ovn-nbctl lsp-set-options first requested-chassis=hv1
> > +check ovn-nbctl lsp-set-options second requested-chassis=hv2
> > +check ovn-nbctl lsp-set-options third requested-chassis=hv3
> > +
> > +as hv1 check ovs-vsctl -- add-port br-int first -- \
> > +    set Interface first external-ids:iface-id=first \
> > +    options:tx_pcap=hv1/first-tx.pcap \
> > +    options:rxq_pcap=hv1/first-rx.pcap
> > +as hv2 check ovs-vsctl -- add-port br-int second -- \
> > +    set Interface second external-ids:iface-id=second \
> > +    options:tx_pcap=hv2/second-tx.pcap \
> > +    options:rxq_pcap=hv2/second-rx.pcap
> > +as hv3 check ovs-vsctl -- add-port br-int third -- \
> > +    set Interface third external-ids:iface-id=third \
> > +    options:tx_pcap=hv3/third-tx.pcap \
> > +    options:rxq_pcap=hv3/third-rx.pcap
> > +
> > +# Create Migrator interfaces on both hv1 and hv2
> > +for hv in hv1 hv2; do
> > +    as $hv check ovs-vsctl -- add-port br-int migrator -- \
> > +        set Interface migrator external-ids:iface-id=migrator \
> > +        options:tx_pcap=$hv/migrator-tx.pcap \
> > +        options:rxq_pcap=$hv/migrator-rx.pcap
> > +done
> > +
> > +send_arp() {
> > +    local hv=$1 inport=$2 eth_src=$3 eth_dst=$4 spa=$5 tpa=$6
> > +    local 
> > request=${eth_dst}${eth_src}08060001080006040001${eth_src}${spa}${eth_dst}${tpa}
> > +    as ${hv} ovs-appctl netdev-dummy/receive $inport $request
> > +    echo "${request}"
> > +}
> > +
> > +reset_pcap_file() {
> > +    local hv=$1
> > +    local iface=$2
> > +    local pcap_file=$3
> > +    as $hv check ovs-vsctl -- set Interface $iface 
> > options:tx_pcap=dummy-tx.pcap \
> > +                                                   
> > options:rxq_pcap=dummy-rx.pcap
> > +    check rm -f ${pcap_file}*.pcap
> > +    as $hv check ovs-vsctl -- set Interface $iface 
> > options:tx_pcap=${pcap_file}-tx.pcap \
> > +                                                   
> > options:rxq_pcap=${pcap_file}-rx.pcap
> > +}
> > +
> > +reset_env() {
> > +    reset_pcap_file hv1 first hv1/first
> > +    reset_pcap_file hv2 second hv2/second
> > +    reset_pcap_file hv3 third hv3/third
> > +    reset_pcap_file hv1 migrator hv1/migrator
> > +    reset_pcap_file hv2 migrator hv2/migrator
> > +
> > +    for port in hv1/migrator hv2/migrator hv1/first hv2/second hv3/third; 
> > do
> > +        : > $port.expected
> > +    done
> > +}
> > +
> > +check_packets() {
> > +    OVN_CHECK_PACKETS([hv1/migrator-tx.pcap], [hv1/migrator.expected])
> > +    OVN_CHECK_PACKETS([hv2/migrator-tx.pcap], [hv2/migrator.expected])
> > +    OVN_CHECK_PACKETS([hv1/first-tx.pcap], [hv1/first.expected])
> > +    OVN_CHECK_PACKETS([hv2/second-tx.pcap], [hv2/second.expected])
> > +    OVN_CHECK_PACKETS([hv3/third-tx.pcap], [hv3/third.expected])
> > +}
> > +
> > +migrator_tpa=$(ip_to_hex 10 0 0 100)
> > +first_spa=$(ip_to_hex 10 0 0 1)
> > +second_spa=$(ip_to_hex 10 0 0 2)
> > +third_spa=$(ip_to_hex 10 0 0 3)
> > +
> > +for hv in hv1 hv2 hv3; do
> > +    wait_row_count Chassis 1 name=$hv
> > +done
> > +hv1_uuid=$(fetch_column Chassis _uuid name=hv1)
> > +hv2_uuid=$(fetch_column Chassis _uuid name=hv2)
> > +
> > +# Start with Migrator on hv1 but not hv2
> > +check ovn-nbctl lsp-set-options migrator requested-chassis=hv1
> > +wait_for_ports_up
> > +wait_column "$hv1_uuid" Port_Binding chassis logical_port=migrator
> > +wait_column "$hv1_uuid" Port_Binding requested_chassis 
> > logical_port=migrator
> > +wait_column "" Port_Binding additional_chassis logical_port=migrator
> > +wait_column "" Port_Binding requested_additional_chassis 
> > logical_port=migrator
> > +wait_for_ports_up
> > +
> > +reset_env
> > +
> > +OVN_POPULATE_ARP
> > +
> > +# check that...
> > +# unicast from First arrives to hv1:Migrator
> > +# unicast from First doesn't arrive to hv2:Migrator
> > +request=$(send_arp hv1 first 000000000001 0000000000ff $first_spa 
> > $migrator_tpa)
> > +echo $request >> hv1/migrator.expected
> > +
> > +# mcast from First arrives to hv1:Migrator
> > +# mcast from First doesn't arrive to hv2:Migrator
> > +request=$(send_arp hv1 first 000000000001 ffffffffffff $first_spa 
> > $migrator_tpa)
> > +echo $request >> hv1/migrator.expected
> > +echo $request >> hv2/second.expected
> > +echo $request >> hv3/third.expected
> > +
> > +# unicast from Second arrives to hv1:Migrator
> > +# unicast from Second doesn't arrive to hv2:Migrator
> > +request=$(send_arp hv2 second 000000000002 0000000000ff $second_spa 
> > $migrator_tpa)
> > +echo $request >> hv1/migrator.expected
> > +
> > +# mcast from Second arrives to hv1:Migrator
> > +# mcast from Second doesn't arrive to hv2:Migrator
> > +request=$(send_arp hv2 second 000000000002 ffffffffffff $second_spa 
> > $migrator_tpa)
> > +echo $request >> hv1/migrator.expected
> > +echo $request >> hv1/first.expected
> > +echo $request >> hv3/third.expected
> > +
> > +# unicast from Third arrives to hv1:Migrator
> > +# unicast from Third doesn't arrive to hv2:Migrator
> > +request=$(send_arp hv3 third 000000000003 0000000000ff $third_spa 
> > $migrator_tpa)
> > +echo $request >> hv1/migrator.expected
> > +
> > +# mcast from Third arrives to hv1:Migrator
> > +# mcast from Third doesn't arrive to hv2:Migrator
> > +request=$(send_arp hv3 third 000000000003 ffffffffffff $third_spa 
> > $migrator_tpa)
> > +echo $request >> hv1/migrator.expected
> > +echo $request >> hv1/first.expected
> > +echo $request >> hv2/second.expected
> > +
> > +# unicast from hv1:Migrator arrives to First, Second, and Third
> > +request=$(send_arp hv1 migrator 0000000000ff 000000000001 $migrator_tpa 
> > $first_spa)
> > +echo $request >> hv1/first.expected
> > +request=$(send_arp hv1 migrator 0000000000ff 000000000002 $migrator_tpa 
> > $second_spa)
> > +echo $request >> hv2/second.expected
> > +request=$(send_arp hv1 migrator 0000000000ff 000000000003 $migrator_tpa 
> > $third_spa)
> > +echo $request >> hv3/third.expected
> > +
> > +# unicast from hv2:Migrator doesn't arrive to First, Second, or Third
> > +request=$(send_arp hv2 migrator 0000000000ff 000000000001 $migrator_tpa 
> > $first_spa)
> > +request=$(send_arp hv2 migrator 0000000000ff 000000000002 $migrator_tpa 
> > $second_spa)
> > +request=$(send_arp hv2 migrator 0000000000ff 000000000003 $migrator_tpa 
> > $third_spa)
> > +
> > +# mcast from hv1:Migrator arrives to First, Second, and Third
> > +request=$(send_arp hv1 migrator 0000000000ff ffffffffffff $migrator_tpa 
> > $first_spa)
> > +echo $request >> hv1/first.expected
> > +echo $request >> hv2/second.expected
> > +echo $request >> hv3/third.expected
> > +
> > +# mcast from hv2:Migrator doesn't arrive to First, Second, or Third
> > +request=$(send_arp hv2 migrator 0000000000ff ffffffffffff $migrator_tpa 
> > $first_spa)
> > +
> > +check_packets
> > +reset_env
> > +
> > +# Start port migration hv1 -> hv2: both hypervisors are now bound
> > +check ovn-nbctl lsp-set-options migrator requested-chassis=hv1,hv2
> > +wait_for_ports_up
> > +wait_column "$hv1_uuid" Port_Binding chassis logical_port=migrator
> > +wait_column "$hv1_uuid" Port_Binding requested_chassis 
> > logical_port=migrator
> > +wait_column "$hv2_uuid" Port_Binding additional_chassis 
> > logical_port=migrator
> > +wait_column "$hv2_uuid" Port_Binding requested_additional_chassis 
> > logical_port=migrator
> > +
> > +# check that...
> > +# unicast from First arrives to hv1:Migrator
> > +# unicast from First arrives to hv2:Migrator
> > +request=$(send_arp hv1 first 000000000001 0000000000ff $first_spa 
> > $migrator_tpa)
> > +echo $request >> hv1/migrator.expected
> > +echo $request >> hv2/migrator.expected
> > +
> > +# mcast from First arrives to hv1:Migrator
> > +# mcast from First arrives to hv2:Migrator
> > +request=$(send_arp hv1 first 000000000001 ffffffffffff $first_spa 
> > $migrator_tpa)
> > +echo $request >> hv1/migrator.expected
> > +echo $request >> hv2/migrator.expected
> > +echo $request >> hv3/third.expected
> > +echo $request >> hv2/second.expected
> > +
> > +# unicast from Second arrives to hv1:Migrator
> > +# unicast from Second arrives to hv2:Migrator
> > +request=$(send_arp hv2 second 000000000002 0000000000ff $second_spa 
> > $migrator_tpa)
> > +echo $request >> hv1/migrator.expected
> > +echo $request >> hv2/migrator.expected
> > +
> > +# mcast from Second arrives to hv1:Migrator
> > +# mcast from Second arrives to hv2:Migrator
> > +request=$(send_arp hv2 second 000000000002 ffffffffffff $second_spa 
> > $migrator_tpa)
> > +echo $request >> hv1/migrator.expected
> > +echo $request >> hv2/migrator.expected
> > +echo $request >> hv3/third.expected
> > +echo $request >> hv1/first.expected
> > +
> > +# unicast from Third arrives to hv1:Migrator binding
> > +# unicast from Third arrives to hv2:Migrator binding
> > +request=$(send_arp hv3 third 000000000003 0000000000ff $third_spa 
> > $migrator_tpa)
> > +echo $request >> hv1/migrator.expected
> > +echo $request >> hv2/migrator.expected
> > +
> > +# mcast from Third arrives to hv1:Migrator
> > +# mcast from Third arrives to hv2:Migrator
> > +request=$(send_arp hv3 third 000000000003 ffffffffffff $third_spa 
> > $migrator_tpa)
> > +echo $request >> hv1/migrator.expected
> > +echo $request >> hv2/migrator.expected
> > +echo $request >> hv1/first.expected
> > +echo $request >> hv2/second.expected
> > +
> > +# unicast from hv1:Migrator arrives to First, Second, and Third
> > +request=$(send_arp hv1 migrator 0000000000ff 000000000001 $migrator_tpa 
> > $first_spa)
> > +echo $request >> hv1/first.expected
> > +request=$(send_arp hv1 migrator 0000000000ff 000000000002 $migrator_tpa 
> > $second_spa)
> > +echo $request >> hv2/second.expected
> > +request=$(send_arp hv1 migrator 0000000000ff 000000000003 $migrator_tpa 
> > $third_spa)
> > +echo $request >> hv3/third.expected
> > +
> > +# unicast from hv2:Migrator arrives to First, Second, and Third
> > +request=$(send_arp hv2 migrator 0000000000ff 000000000001 $migrator_tpa 
> > $first_spa)
> > +echo $request >> hv1/first.expected
> > +request=$(send_arp hv2 migrator 0000000000ff 000000000002 $migrator_tpa 
> > $second_spa)
> > +echo $request >> hv2/second.expected
> > +request=$(send_arp hv2 migrator 0000000000ff 000000000003 $migrator_tpa 
> > $third_spa)
> > +echo $request >> hv3/third.expected
> > +
> > +# mcast from hv1:Migrator arrives to First, Second, and Third
> > +request=$(send_arp hv1 migrator 0000000000ff ffffffffffff $migrator_tpa 
> > $first_spa)
> > +echo $request >> hv1/first.expected
> > +echo $request >> hv2/second.expected
> > +echo $request >> hv3/third.expected
> > +
> > +# mcast from hv2:Migrator arrives to First, Second, and Third
> > +request=$(send_arp hv2 migrator 0000000000ff ffffffffffff $migrator_tpa 
> > $first_spa)
> > +echo $request >> hv1/first.expected
> > +echo $request >> hv2/second.expected
> > +echo $request >> hv3/third.expected
> > +
> > +check_packets
> > +reset_env
> > +
> > +# Complete migration: destination is bound
> > +check ovn-nbctl lsp-set-options migrator requested-chassis=hv2
> > +wait_for_ports_up
> > +wait_column "$hv2_uuid" Port_Binding chassis logical_port=migrator
> > +wait_column "$hv2_uuid" Port_Binding requested_chassis 
> > logical_port=migrator
> > +wait_column "" Port_Binding additional_chassis logical_port=migrator
> > +wait_column "" Port_Binding requested_additional_chassis 
> > logical_port=migrator
> > +
> > +# check that...
> > +# unicast from Third doesn't arrive to hv1:Migrator
> > +# unicast from Third arrives to hv2:Migrator
> > +request=$(send_arp hv3 third 000000000003 0000000000ff $third_spa 
> > $migrator_tpa)
> > +echo $request >> hv2/migrator.expected
> > +
> > +# mcast from Third doesn't arrive to hv1:Migrator
> > +# mcast from Third arrives to hv2:Migrator
> > +request=$(send_arp hv3 third 000000000003 ffffffffffff $third_spa 
> > $migrator_tpa)
> > +echo $request >> hv2/migrator.expected
> > +echo $request >> hv1/first.expected
> > +echo $request >> hv2/second.expected
> > +
> > +# unicast from First doesn't arrive to hv1:Migrator
> > +# unicast from First arrives to hv2:Migrator
> > +request=$(send_arp hv1 first 000000000001 0000000000ff $first_spa 
> > $migrator_tpa)
> > +echo $request >> hv2/migrator.expected
> > +
> > +# mcast from First doesn't arrive to hv1:Migrator
> > +# mcast from First arrives to hv2:Migrator binding
> > +request=$(send_arp hv1 first 000000000001 ffffffffffff $first_spa 
> > $migrator_tpa)
> > +echo $request >> hv2/migrator.expected
> > +echo $request >> hv2/second.expected
> > +echo $request >> hv3/third.expected
> > +
> > +# unicast from Second doesn't arrive to hv1:Migrator
> > +# unicast from Second arrives to hv2:Migrator
> > +request=$(send_arp hv2 second 000000000002 0000000000ff $second_spa 
> > $migrator_tpa)
> > +echo $request >> hv2/migrator.expected
> > +
> > +# mcast from Second doesn't arrive to hv1:Migrator
> > +# mcast from Second arrives to hv2:Migrator
> > +request=$(send_arp hv2 second 000000000002 ffffffffffff $second_spa 
> > $migrator_tpa)
> > +echo $request >> hv2/migrator.expected
> > +echo $request >> hv1/first.expected
> > +echo $request >> hv3/third.expected
> > +
> > +# unicast from hv1:Migrator doesn't arrive to First, Second, or Third
> > +request=$(send_arp hv1 migrator 0000000000ff 000000000001 $migrator_tpa 
> > $first_spa)
> > +request=$(send_arp hv1 migrator 0000000000ff 000000000002 $migrator_tpa 
> > $second_spa)
> > +request=$(send_arp hv1 migrator 0000000000ff 000000000003 $migrator_tpa 
> > $third_spa)
> > +
> > +# unicast from hv2:Migrator arrives to First, Second, and Third
> > +request=$(send_arp hv2 migrator 0000000000ff 000000000001 $migrator_tpa 
> > $first_spa)
> > +echo $request >> hv1/first.expected
> > +request=$(send_arp hv2 migrator 0000000000ff 000000000002 $migrator_tpa 
> > $second_spa)
> > +echo $request >> hv2/second.expected
> > +request=$(send_arp hv2 migrator 0000000000ff 000000000003 $migrator_tpa 
> > $third_spa)
> > +echo $request >> hv3/third.expected
> > +
> > +# mcast from hv1:Migrator doesn't arrive to First, Second, or Third
> > +request=$(send_arp hv1 migrator 0000000000ff ffffffffffff $migrator_tpa 
> > $first_spa)
> > +
> > +# mcast from hv2:Migrator arrives to First, Second, and Third
> > +request=$(send_arp hv2 migrator 0000000000ff ffffffffffff $migrator_tpa 
> > $first_spa)
> > +echo $request >> hv1/first.expected
> > +echo $request >> hv2/second.expected
> > +echo $request >> hv3/third.expected
> > +
> > +check_packets
> > +
> > +OVN_CLEANUP([hv1],[hv2],[hv3])
> > +
> > +AT_CLEANUP
> > +])
> > +
> > +OVN_FOR_EACH_NORTHD([
> > +AT_SETUP([localnet connectivity with multiple requested-chassis])
> > +ovn_start
> > +
> > +net_add n1
> > +for i in 1 2 3; do
> > +    sim_add hv$i
> > +    as hv$i
> > +    check ovs-vsctl add-br br-phys
> > +    ovn_attach n1 br-phys 192.168.0.$i
> > +    check ovs-vsctl set open . 
> > external-ids:ovn-bridge-mappings=phys:br-phys
> > +done
> > +
> > +# Disable local ARP responder to pass ARP requests through tunnels
> > +check ovn-nbctl ls-add ls0 -- add Logical_Switch ls0 other_config 
> > vlan-passthru=true
> > +
> > +check ovn-nbctl lsp-add ls0 first
> > +check ovn-nbctl lsp-add ls0 second
> > +check ovn-nbctl lsp-add ls0 third
> > +check ovn-nbctl lsp-add ls0 migrator
> > +check ovn-nbctl lsp-set-addresses first "00:00:00:00:00:01 10.0.0.1"
> > +check ovn-nbctl lsp-set-addresses second "00:00:00:00:00:02 10.0.0.2"
> > +check ovn-nbctl lsp-set-addresses third "00:00:00:00:00:03 10.0.0.3"
> > +check ovn-nbctl lsp-set-addresses migrator "00:00:00:00:00:ff 10.0.0.100"
> > +
> > +check ovn-nbctl lsp-add ls0 public
> > +check ovn-nbctl lsp-set-type public localnet
> > +check ovn-nbctl lsp-set-addresses public unknown
> > +check ovn-nbctl lsp-set-options public network_name=phys
> > +
> > +# The test scenario will migrate Migrator port between hv1 and hv2 and 
> > check
> > +# that connectivity to and from the port is functioning properly for both
> > +# chassis locations. Connectivity will be checked for resources located at 
> > hv1
> > +# (First) and hv2 (Second) as well as for hv3 (Third) that does not take 
> > part
> > +# in port migration.
> > +check ovn-nbctl lsp-set-options first requested-chassis=hv1
> > +check ovn-nbctl lsp-set-options second requested-chassis=hv2
> > +check ovn-nbctl lsp-set-options third requested-chassis=hv3
> > +
> > +as hv1 check ovs-vsctl -- add-port br-int first -- \
> > +    set Interface first external-ids:iface-id=first \
> > +    options:tx_pcap=hv1/first-tx.pcap \
> > +    options:rxq_pcap=hv1/first-rx.pcap
> > +as hv2 check ovs-vsctl -- add-port br-int second -- \
> > +    set Interface second external-ids:iface-id=second \
> > +    options:tx_pcap=hv2/second-tx.pcap \
> > +    options:rxq_pcap=hv2/second-rx.pcap
> > +as hv3 check ovs-vsctl -- add-port br-int third -- \
> > +    set Interface third external-ids:iface-id=third \
> > +    options:tx_pcap=hv3/third-tx.pcap \
> > +    options:rxq_pcap=hv3/third-rx.pcap
> > +
> > +# Create Migrator interfaces on both hv1 and hv2
> > +for hv in hv1 hv2; do
> > +    as $hv check ovs-vsctl -- add-port br-int migrator -- \
> > +        set Interface migrator external-ids:iface-id=migrator \
> > +        options:tx_pcap=$hv/migrator-tx.pcap \
> > +        options:rxq_pcap=$hv/migrator-rx.pcap
> > +done
> > +
> > +send_arp() {
> > +    local hv=$1 inport=$2 eth_src=$3 eth_dst=$4 spa=$5 tpa=$6
> > +    local 
> > request=${eth_dst}${eth_src}08060001080006040001${eth_src}${spa}${eth_dst}${tpa}
> > +    as ${hv} ovs-appctl netdev-dummy/receive $inport $request
> > +    echo "${request}"
> > +}
> > +
> > +send_garp() {
> > +    local hv=$1 inport=$2 eth_src=$3 eth_dst=$4 spa=$5 tpa=$6
> > +    local 
> > request=${eth_dst}${eth_src}08060001080006040002${eth_src}${spa}${eth_dst}${tpa}
> > +    as ${hv} ovs-appctl netdev-dummy/receive $inport $request
> > +    echo "${request}"
> > +}
> > +
> > +reset_pcap_file() {
> > +    local hv=$1
> > +    local iface=$2
> > +    local pcap_file=$3
> > +    as $hv check ovs-vsctl -- set Interface $iface 
> > options:tx_pcap=dummy-tx.pcap \
> > +                                                   
> > options:rxq_pcap=dummy-rx.pcap
> > +    check rm -f ${pcap_file}*.pcap
> > +    as $hv check ovs-vsctl -- set Interface $iface 
> > options:tx_pcap=${pcap_file}-tx.pcap \
> > +                                                   
> > options:rxq_pcap=${pcap_file}-rx.pcap
> > +}
> > +
> > +reset_env() {
> > +    reset_pcap_file hv1 first hv1/first
> > +    reset_pcap_file hv2 second hv2/second
> > +    reset_pcap_file hv3 third hv3/third
> > +    reset_pcap_file hv1 migrator hv1/migrator
> > +    reset_pcap_file hv2 migrator hv2/migrator
> > +
> > +    for port in hv1/migrator hv2/migrator hv1/first hv2/second hv3/third; 
> > do
> > +        : > $port.expected
> > +    done
> > +}
> > +
> > +check_packets() {
> > +    # the test scenario gets spurious garps generated by vifs because of 
> > localnet
> > +    # attachment, hence using CONTAIN instead of strict matching
> > +    OVN_CHECK_PACKETS_CONTAIN([hv1/migrator-tx.pcap], 
> > [hv1/migrator.expected])
> > +    OVN_CHECK_PACKETS_CONTAIN([hv2/migrator-tx.pcap], 
> > [hv2/migrator.expected])
> > +    OVN_CHECK_PACKETS_CONTAIN([hv1/first-tx.pcap], [hv1/first.expected])
> > +    OVN_CHECK_PACKETS_CONTAIN([hv2/second-tx.pcap], [hv2/second.expected])
> > +    OVN_CHECK_PACKETS_CONTAIN([hv3/third-tx.pcap], [hv3/third.expected])
> > +}
> > +
> > +migrator_tpa=$(ip_to_hex 10 0 0 100)
> > +first_spa=$(ip_to_hex 10 0 0 1)
> > +second_spa=$(ip_to_hex 10 0 0 2)
> > +third_spa=$(ip_to_hex 10 0 0 3)
> > +
> > +for hv in hv1 hv2 hv3; do
> > +    wait_row_count Chassis 1 name=$hv
> > +done
> > +hv1_uuid=$(fetch_column Chassis _uuid name=hv1)
> > +hv2_uuid=$(fetch_column Chassis _uuid name=hv2)
> > +
> > +OVN_POPULATE_ARP
> > +
> > +# Start with Migrator on hv1 but not hv2
> > +check ovn-nbctl lsp-set-options migrator requested-chassis=hv1
> > +wait_column "$hv1_uuid" Port_Binding chassis logical_port=migrator
> > +wait_column "$hv1_uuid" Port_Binding requested_chassis 
> > logical_port=migrator
> > +wait_column "" Port_Binding additional_chassis logical_port=migrator
> > +wait_column "" Port_Binding requested_additional_chassis 
> > logical_port=migrator
> > +wait_for_ports_up
> > +
> > +# advertise location of ports through localnet port
> > +send_garp hv1 migrator 0000000000ff ffffffffffff $migrator_spa 
> > $migrator_tpa
> > +send_garp hv1 first 000000000001 ffffffffffff $first_spa $first_tpa
> > +send_garp hv2 second 000000000002 ffffffffffff $second_spa $second_tpa
> > +send_garp hv3 third 000000000003 ffffffffffff $third_spa $third_tpa
> > +reset_env
> > +
> > +# check that...
> > +# unicast from First arrives to hv1:Migrator
> > +# unicast from First doesn't arrive to hv2:Migrator
> > +request=$(send_arp hv1 first 000000000001 0000000000ff $first_spa 
> > $migrator_tpa)
> > +echo $request >> hv1/migrator.expected
> > +
> > +# mcast from First arrives to hv1:Migrator
> > +# mcast from First doesn't arrive to hv2:Migrator
> > +request=$(send_arp hv1 first 000000000001 ffffffffffff $first_spa 
> > $migrator_tpa)
> > +echo $request >> hv1/migrator.expected
> > +echo $request >> hv2/second.expected
> > +echo $request >> hv3/third.expected
> > +
> > +# unicast from Second arrives to hv1:Migrator
> > +# unicast from Second doesn't arrive to hv2:Migrator
> > +request=$(send_arp hv2 second 000000000002 0000000000ff $second_spa 
> > $migrator_tpa)
> > +echo $request >> hv1/migrator.expected
> > +
> > +# mcast from Second arrives to hv1:Migrator
> > +# mcast from Second doesn't arrive to hv2:Migrator
> > +request=$(send_arp hv2 second 000000000002 ffffffffffff $second_spa 
> > $migrator_tpa)
> > +echo $request >> hv1/migrator.expected
> > +echo $request >> hv1/first.expected
> > +echo $request >> hv3/third.expected
> > +
> > +# unicast from Third arrives to hv1:Migrator
> > +# unicast from Third doesn't arrive to hv2:Migrator
> > +request=$(send_arp hv3 third 000000000003 0000000000ff $third_spa 
> > $migrator_tpa)
> > +echo $request >> hv1/migrator.expected
> > +
> > +# mcast from Third arrives to hv1:Migrator
> > +# mcast from Third doesn't arrive to hv2:Migrator
> > +request=$(send_arp hv3 third 000000000003 ffffffffffff $third_spa 
> > $migrator_tpa)
> > +echo $request >> hv1/migrator.expected
> > +echo $request >> hv1/first.expected
> > +echo $request >> hv2/second.expected
> > +
> > +# unicast from hv1:Migrator arrives to First, Second, and Third
> > +request=$(send_arp hv1 migrator 0000000000ff 000000000001 $migrator_tpa 
> > $first_spa)
> > +echo $request >> hv1/first.expected
> > +request=$(send_arp hv1 migrator 0000000000ff 000000000002 $migrator_tpa 
> > $second_spa)
> > +echo $request >> hv2/second.expected
> > +request=$(send_arp hv1 migrator 0000000000ff 000000000003 $migrator_tpa 
> > $third_spa)
> > +echo $request >> hv3/third.expected
> > +
> > +# unicast from hv2:Migrator doesn't arrive to First, Second, or Third
> > +request=$(send_arp hv2 migrator 0000000000ff 000000000001 $migrator_tpa 
> > $first_spa)
> > +request=$(send_arp hv2 migrator 0000000000ff 000000000002 $migrator_tpa 
> > $second_spa)
> > +request=$(send_arp hv2 migrator 0000000000ff 000000000003 $migrator_tpa 
> > $third_spa)
> > +
> > +# mcast from hv1:Migrator arrives to First, Second, and Third
> > +request=$(send_arp hv1 migrator 0000000000ff ffffffffffff $migrator_tpa 
> > $first_spa)
> > +echo $request >> hv1/first.expected
> > +echo $request >> hv2/second.expected
> > +echo $request >> hv3/third.expected
> > +
> > +# mcast from hv2:Migrator doesn't arrive to First, Second, or Third
> > +request=$(send_arp hv2 migrator 0000000000ff ffffffffffff $migrator_tpa 
> > $first_spa)
> > +
> > +check_packets
> > +reset_env
> > +
> > +# Start port migration hv1 -> hv2: both hypervisors are now bound
> > +check ovn-nbctl lsp-set-options migrator requested-chassis=hv1,hv2
> > +wait_for_ports_up
> > +wait_column "$hv1_uuid" Port_Binding chassis logical_port=migrator
> > +wait_column "$hv1_uuid" Port_Binding requested_chassis 
> > logical_port=migrator
> > +wait_column "$hv2_uuid" Port_Binding additional_chassis 
> > logical_port=migrator
> > +wait_column "$hv2_uuid" Port_Binding requested_additional_chassis 
> > logical_port=migrator
> > +
> > +# check that...
> > +# unicast from First arrives to hv1:Migrator
> > +# unicast from First arrives to hv2:Migrator
> > +request=$(send_arp hv1 first 000000000001 0000000000ff $first_spa 
> > $migrator_tpa)
> > +echo $request >> hv1/migrator.expected
> > +echo $request >> hv2/migrator.expected
> > +
> > +# mcast from First arrives to hv1:Migrator
> > +# mcast from First arrives to hv2:Migrator
> > +request=$(send_arp hv1 first 000000000001 ffffffffffff $first_spa 
> > $migrator_tpa)
> > +echo $request >> hv1/migrator.expected
> > +echo $request >> hv2/migrator.expected
> > +echo $request >> hv3/third.expected
> > +echo $request >> hv2/second.expected
> > +
> > +# unicast from Second arrives to hv1:Migrator
> > +# unicast from Second arrives to hv2:Migrator
> > +request=$(send_arp hv2 second 000000000002 0000000000ff $second_spa 
> > $migrator_tpa)
> > +echo $request >> hv1/migrator.expected
> > +echo $request >> hv2/migrator.expected
> > +
> > +# mcast from Second arrives to hv1:Migrator
> > +# mcast from Second arrives to hv2:Migrator
> > +request=$(send_arp hv2 second 000000000002 ffffffffffff $second_spa 
> > $migrator_tpa)
> > +echo $request >> hv1/migrator.expected
> > +echo $request >> hv2/migrator.expected
> > +echo $request >> hv3/third.expected
> > +echo $request >> hv1/first.expected
> > +
> > +# unicast from Third arrives to hv1:Migrator binding
> > +# unicast from Third arrives to hv2:Migrator binding
> > +request=$(send_arp hv3 third 000000000003 0000000000ff $third_spa 
> > $migrator_tpa)
> > +echo $request >> hv1/migrator.expected
> > +echo $request >> hv2/migrator.expected
> > +
> > +# mcast from Third arrives to hv1:Migrator
> > +# mcast from Third arrives to hv2:Migrator
> > +request=$(send_arp hv3 third 000000000003 ffffffffffff $third_spa 
> > $migrator_tpa)
> > +echo $request >> hv1/migrator.expected
> > +echo $request >> hv2/migrator.expected
> > +echo $request >> hv1/first.expected
> > +echo $request >> hv2/second.expected
> > +
> > +# unicast from hv1:Migrator arrives to First, Second, and Third
> > +request=$(send_arp hv1 migrator 0000000000ff 000000000001 $migrator_tpa 
> > $first_spa)
> > +echo $request >> hv1/first.expected
> > +request=$(send_arp hv1 migrator 0000000000ff 000000000002 $migrator_tpa 
> > $second_spa)
> > +echo $request >> hv2/second.expected
> > +request=$(send_arp hv1 migrator 0000000000ff 000000000003 $migrator_tpa 
> > $third_spa)
> > +echo $request >> hv3/third.expected
> > +
> > +# unicast from hv2:Migrator arrives to First, Second, and Third
> > +request=$(send_arp hv2 migrator 0000000000ff 000000000001 $migrator_tpa 
> > $first_spa)
> > +echo $request >> hv1/first.expected
> > +request=$(send_arp hv2 migrator 0000000000ff 000000000002 $migrator_tpa 
> > $second_spa)
> > +echo $request >> hv2/second.expected
> > +request=$(send_arp hv2 migrator 0000000000ff 000000000003 $migrator_tpa 
> > $third_spa)
> > +echo $request >> hv3/third.expected
> > +
> > +# mcast from hv1:Migrator arrives to First, Second, and Third
> > +request=$(send_arp hv1 migrator 0000000000ff ffffffffffff $migrator_tpa 
> > $first_spa)
> > +echo $request >> hv1/first.expected
> > +echo $request >> hv2/second.expected
> > +echo $request >> hv3/third.expected
> > +
> > +# mcast from hv2:Migrator arrives to First, Second, and Third
> > +request=$(send_arp hv2 migrator 0000000000ff ffffffffffff $migrator_tpa 
> > $first_spa)
> > +echo $request >> hv1/first.expected
> > +echo $request >> hv2/second.expected
> > +echo $request >> hv3/third.expected
> > +
> > +check_packets
> > +
> > +# Complete migration: destination is bound
> > +check ovn-nbctl lsp-set-options migrator requested-chassis=hv2
> > +wait_column "$hv2_uuid" Port_Binding chassis logical_port=migrator
> > +wait_column "$hv2_uuid" Port_Binding requested_chassis 
> > logical_port=migrator
> > +wait_column "" Port_Binding additional_chassis logical_port=migrator
> > +wait_column "" Port_Binding requested_additional_chassis 
> > logical_port=migrator
> > +wait_for_ports_up
> > +
> > +check ovn-nbctl --wait=hv sync
> > +sleep 1
> > +
> > +# advertise new location of the port through localnet port
> > +send_garp hv2 migrator 0000000000ff ffffffffffff $migrator_spa 
> > $migrator_tpa
> > +reset_env
> > +
> > +# check that...
> > +# unicast from Third doesn't arrive to hv1:Migrator
> > +# unicast from Third arrives to hv2:Migrator
> > +request=$(send_arp hv3 third 000000000003 0000000000ff $third_spa 
> > $migrator_tpa)
> > +echo $request >> hv2/migrator.expected
> > +
> > +# mcast from Third doesn't arrive to hv1:Migrator
> > +# mcast from Third arrives to hv2:Migrator
> > +request=$(send_arp hv3 third 000000000003 ffffffffffff $third_spa 
> > $migrator_tpa)
> > +echo $request >> hv2/migrator.expected
> > +echo $request >> hv1/first.expected
> > +echo $request >> hv2/second.expected
> > +
> > +# unicast from First doesn't arrive to hv1:Migrator
> > +# unicast from First arrives to hv2:Migrator
> > +request=$(send_arp hv1 first 000000000001 0000000000ff $first_spa 
> > $migrator_tpa)
> > +echo $request >> hv2/migrator.expected
> > +
> > +# mcast from First doesn't arrive to hv1:Migrator
> > +# mcast from First arrives to hv2:Migrator binding
> > +request=$(send_arp hv1 first 000000000001 ffffffffffff $first_spa 
> > $migrator_tpa)
> > +echo $request >> hv2/migrator.expected
> > +echo $request >> hv2/second.expected
> > +echo $request >> hv3/third.expected
> > +
> > +# unicast from Second doesn't arrive to hv1:Migrator
> > +# unicast from Second arrives to hv2:Migrator
> > +request=$(send_arp hv2 second 000000000002 0000000000ff $second_spa 
> > $migrator_tpa)
> > +echo $request >> hv2/migrator.expected
> > +
> > +# mcast from Second doesn't arrive to hv1:Migrator
> > +# mcast from Second arrives to hv2:Migrator
> > +request=$(send_arp hv2 second 000000000002 ffffffffffff $second_spa 
> > $migrator_tpa)
> > +echo $request >> hv2/migrator.expected
> > +echo $request >> hv1/first.expected
> > +echo $request >> hv3/third.expected
> > +
> > +# unicast from hv1:Migrator doesn't arrive to First, Second, or Third
> > +request=$(send_arp hv1 migrator 0000000000ff 000000000001 $migrator_tpa 
> > $first_spa)
> > +request=$(send_arp hv1 migrator 0000000000ff 000000000002 $migrator_tpa 
> > $second_spa)
> > +request=$(send_arp hv1 migrator 0000000000ff 000000000003 $migrator_tpa 
> > $third_spa)
> > +
> > +# unicast from hv2:Migrator arrives to First, Second, and Third
> > +request=$(send_arp hv2 migrator 0000000000ff 000000000001 $migrator_tpa 
> > $first_spa)
> > +echo $request >> hv1/first.expected
> > +request=$(send_arp hv2 migrator 0000000000ff 000000000002 $migrator_tpa 
> > $second_spa)
> > +echo $request >> hv2/second.expected
> > +request=$(send_arp hv2 migrator 0000000000ff 000000000003 $migrator_tpa 
> > $third_spa)
> > +echo $request >> hv3/third.expected
> > +
> > +# mcast from hv1:Migrator doesn't arrive to First, Second, or Third
> > +request=$(send_arp hv1 migrator 0000000000ff ffffffffffff $migrator_tpa 
> > $first_spa)
> > +
> > +# mcast from hv2:Migrator arrives to First, Second, and Third
> > +request=$(send_arp hv2 migrator 0000000000ff ffffffffffff $migrator_tpa 
> > $first_spa)
> > +echo $request >> hv1/first.expected
> > +echo $request >> hv2/second.expected
> > +echo $request >> hv3/third.expected
> > +
> > +check_packets
> > +
> > +OVN_CLEANUP([hv1],[hv2],[hv3])
> > +
> > +AT_CLEANUP
> > +])
> > +
> >  OVN_FOR_EACH_NORTHD([
> >  AT_SETUP([options:requested-chassis for logical port])
> >  ovn_start
> > --
> > 2.34.1
> >
> > _______________________________________________
> > dev mailing list
> > [email protected]
> > https://mail.openvswitch.org/mailman/listinfo/ovs-dev
> >
_______________________________________________
dev mailing list
[email protected]
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to