In large scale scenarios this option hugely reduces the size of the
Southbound database positively affecting end to end performance.  In
such scenarios there's no real reason to ever disable datapath groups.

In lower scale scenarios any potential overhead due to logical datapath
groups is, very likely, negligible.

Aside from potential scalability concerns, the
NB.NB_Global.options:use_logical_dp_group knob was kept until now to
ensure that in case of a bug in the logical datapath groups code a CMS
may turn it off and fall back to the mode in which logical flows are not
grouped together.  As far as I know, this has never happened until now.

Moreover, datpath_groups are enabled by default since v21.09.0 (4 stable
releases ago), via 90daa7ce18dc ("northd: Enable logical dp groups by
default.").

>From a testing perspective removing this knob will halve the CI matrix.
This is desirable, especially in the context of more tests being added,
e.g.:

https://patchwork.ozlabs.org/project/ovn/patch/20220531093318.2270409-1-mh...@redhat.com/

Signed-off-by: Dumitru Ceara <dce...@redhat.com>
---
 NEWS                |  1 +
 northd/northd.c     | 77 +++++++++++--------------------------
 ovn-nb.xml          | 19 +++-------
 tests/ovn-macros.at | 23 ++----------
 tests/ovn-northd.at | 92 ++++-----------------------------------------
 tests/ovs-macros.at |  4 +-
 6 files changed, 41 insertions(+), 175 deletions(-)

diff --git a/NEWS b/NEWS
index e015ae8e7..20b4c5d91 100644
--- a/NEWS
+++ b/NEWS
@@ -4,6 +4,7 @@ Post v22.06.0
     "ovn-encap-df_default" to enable or disable tunnel DF flag.
   - Add option "localnet_learn_fdb" to LSP that will allow localnet
     ports to learn MAC addresses and store them in FDB table.
+  - Removed possibility of disabling logical datapath groups.
 
 OVN v22.06.0 - XX XXX XXXX
 --------------------------
diff --git a/northd/northd.c b/northd/northd.c
index 0207f6ce1..a97a321cd 100644
--- a/northd/northd.c
+++ b/northd/northd.c
@@ -4813,10 +4813,6 @@ ovn_lflow_equal(const struct ovn_lflow *a, const struct 
ovn_datapath *od,
             && nullable_string_is_equal(a->ctrl_meter, ctrl_meter));
 }
 
-/* If this option is 'true' northd will combine logical flows that differ by
- * logical datapath only by creating a datapath group. */
-static bool use_logical_dp_groups = false;
-
 enum {
     STATE_NULL,               /* parallelization is off */
     STATE_INIT_HASH_SIZES,    /* parallelization is on; hashes sizing needed */
@@ -4841,8 +4837,7 @@ ovn_lflow_init(struct ovn_lflow *lflow, struct 
ovn_datapath *od,
     lflow->ctrl_meter = ctrl_meter;
     lflow->dpg = NULL;
     lflow->where = where;
-    if ((parallelization_state != STATE_NULL)
-        && use_logical_dp_groups) {
+    if (parallelization_state != STATE_NULL) {
         ovs_mutex_init(&lflow->odg_lock);
     }
 }
@@ -4852,7 +4847,7 @@ ovn_dp_group_add_with_reference(struct ovn_lflow 
*lflow_ref,
                                 struct ovn_datapath *od)
                                 OVS_NO_THREAD_SAFETY_ANALYSIS
 {
-    if (!use_logical_dp_groups || !lflow_ref) {
+    if (!lflow_ref) {
         return false;
     }
 
@@ -4931,13 +4926,11 @@ do_ovn_lflow_add(struct hmap *lflow_map, struct 
ovn_datapath *od,
     struct ovn_lflow *old_lflow;
     struct ovn_lflow *lflow;
 
-    if (use_logical_dp_groups) {
-        old_lflow = ovn_lflow_find(lflow_map, NULL, stage, priority, match,
-                                   actions, ctrl_meter, hash);
-        if (old_lflow) {
-            ovn_dp_group_add_with_reference(old_lflow, od);
-            return old_lflow;
-        }
+    old_lflow = ovn_lflow_find(lflow_map, NULL, stage, priority, match,
+                               actions, ctrl_meter, hash);
+    if (old_lflow) {
+        ovn_dp_group_add_with_reference(old_lflow, od);
+        return old_lflow;
     }
 
     lflow = xmalloc(sizeof *lflow);
@@ -4993,8 +4986,7 @@ ovn_lflow_add_at_with_hash(struct hmap *lflow_map, struct 
ovn_datapath *od,
     struct ovn_lflow *lflow;
 
     ovs_assert(ovn_stage_to_datapath_type(stage) == ovn_datapath_get_type(od));
-    if (use_logical_dp_groups
-        && (parallelization_state == STATE_USE_PARALLELIZATION)) {
+    if (parallelization_state == STATE_USE_PARALLELIZATION) {
         lflow = do_ovn_lflow_add_pd(lflow_map, od, hash, stage, priority,
                                     match, actions, io_port, stage_hint, where,
                                     ctrl_meter);
@@ -5080,8 +5072,7 @@ static void
 ovn_lflow_destroy(struct hmap *lflows, struct ovn_lflow *lflow)
 {
     if (lflow) {
-        if ((parallelization_state != STATE_NULL)
-            && use_logical_dp_groups) {
+        if (parallelization_state != STATE_NULL) {
             ovs_mutex_destroy(&lflow->odg_lock);
         }
         if (lflows) {
@@ -13883,24 +13874,15 @@ build_lswitch_and_lrouter_flows(const struct hmap 
*datapaths,
         int index;
 
         lsiv = xcalloc(sizeof(*lsiv), build_lflows_pool->size);
-        if (use_logical_dp_groups) {
-            lflow_segs = NULL;
-        } else {
-            lflow_segs = xcalloc(sizeof(*lflow_segs), build_lflows_pool->size);
-        }
+        lflow_segs = NULL;
 
         /* Set up "work chunks" for each thread to work on. */
 
         for (index = 0; index < build_lflows_pool->size; index++) {
-            if (use_logical_dp_groups) {
-                /* if dp_groups are in use we lock a shared lflows hash
-                 * on a per-bucket level instead of merging hash frags */
-                lsiv[index].lflows = lflows;
-            } else {
-                fast_hmap_init(&lflow_segs[index], lflows->mask);
-                lsiv[index].lflows = &lflow_segs[index];
-            }
-
+            /* dp_groups are in use so we lock a shared lflows hash
+             * on a per-bucket level.
+             */
+            lsiv[index].lflows = lflows;
             lsiv[index].datapaths = datapaths;
             lsiv[index].ports = ports;
             lsiv[index].port_groups = port_groups;
@@ -13919,13 +13901,8 @@ build_lswitch_and_lrouter_flows(const struct hmap 
*datapaths,
         }
 
         /* Run thread pool. */
-        if (use_logical_dp_groups) {
-            run_pool_callback(build_lflows_pool, NULL, NULL,
-                              noop_callback);
-            fix_flow_map_size(lflows, lsiv, build_lflows_pool->size);
-        } else {
-            run_pool_hash(build_lflows_pool, lflows, lflow_segs);
-        }
+        run_pool_callback(build_lflows_pool, NULL, NULL, noop_callback);
+        fix_flow_map_size(lflows, lsiv, build_lflows_pool->size);
 
         for (index = 0; index < build_lflows_pool->size; index++) {
             ds_destroy(&lsiv[index].match);
@@ -14077,21 +14054,15 @@ void run_update_worker_pool(int n_threads)
             lflow_hash_lock_destroy();
             parallelization_state = STATE_NULL;
         } else if (parallelization_state != STATE_USE_PARALLELIZATION) {
-            if (use_logical_dp_groups) {
-                lflow_hash_lock_init();
-                parallelization_state = STATE_INIT_HASH_SIZES;
-            } else {
-                parallelization_state = STATE_USE_PARALLELIZATION;
-            }
+            lflow_hash_lock_init();
+            parallelization_state = STATE_INIT_HASH_SIZES;
         }
     }
 }
 
-static void worker_pool_init_for_ldp(void)
+static void init_worker_pool(void)
 {
-    /* If parallelization is enabled, make sure locks are initialized
-     * when ldp are used.
-     */
+    /* If parallelization is enabled, make sure locks are initialized. */
     if (parallelization_state != STATE_NULL) {
         lflow_hash_lock_init();
         parallelization_state = STATE_INIT_HASH_SIZES;
@@ -15361,13 +15332,6 @@ ovnnb_db_run(struct northd_input *input_data,
         nbrec_nb_global_set_options(nb, &options);
     }
 
-    bool old_use_ldp = use_logical_dp_groups;
-    use_logical_dp_groups = smap_get_bool(&nb->options,
-                                          "use_logical_dp_groups", true);
-    if (use_logical_dp_groups && !old_use_ldp) {
-        worker_pool_init_for_ldp();
-    }
-
     use_ct_inv_match = smap_get_bool(&nb->options,
                                      "use_ct_inv_match", true);
 
@@ -15666,6 +15630,7 @@ void northd_run(struct northd_input *input_data,
                 struct ovsdb_idl_txn *ovnsb_txn)
 {
     stopwatch_start(OVNNB_DB_RUN_STOPWATCH_NAME, time_msec());
+    init_worker_pool();
     ovnnb_db_run(input_data, data, ovnnb_txn, ovnsb_txn,
                  input_data->sbrec_chassis_by_name,
                  input_data->sbrec_chassis_by_hostname);
diff --git a/ovn-nb.xml b/ovn-nb.xml
index 618a48b97..dcc75ea73 100644
--- a/ovn-nb.xml
+++ b/ovn-nb.xml
@@ -196,21 +196,14 @@
 
       <column name="options" key="use_logical_dp_groups">
         <p>
-          If set to <code>true</code>, <code>ovn-northd</code> will combine
-          logical flows that differs only by logical datapath into a single
-          logical flow with logical datapath group attached.
+          Note: This option is deprecated, the only behavior is to always
+          combine logical flows by datapath groups.  Changing the value or
+          removing this option all toghether will have no effect.
         </p>
         <p>
-          While this should significantly reduce number of logical flows stored
-          in Southbound database this could also increase processing complexity
-          on the <code>ovn-controller</code> side, e.g.,
-          <code>ovn-controller</code> will re-consider logical flow for all
-          logical datapaths in a group.  If the option set to
-          <code>false</code>, there will be separate logical flow per logical
-          datapath and only this flow will be re-considered.
-        </p>
-        <p>
-          The default value is <code>false</code>.
+          <code>ovn-northd</code> combines logical flows that differs
+          only by logical datapath into a single logical flow with
+          logical datapath group attached.
         </p>
       </column>
       <column name="options" key="use_parallel_build">
diff --git a/tests/ovn-macros.at b/tests/ovn-macros.at
index c6f0f6251..89f432f16 100644
--- a/tests/ovn-macros.at
+++ b/tests/ovn-macros.at
@@ -246,12 +246,6 @@ ovn_start () {
                --ic-nb-db=unix:"$ovs_base"/ovn-ic-nb/ovn-ic-nb.sock \
                --ic-sb-db=unix:"$ovs_base"/ovn-ic-sb/ovn-ic-sb.sock
     fi
-
-    if test X$NORTHD_USE_DP_GROUPS = Xyes; then
-        ovn-nbctl set NB_Global . options:use_logical_dp_groups=true
-    else
-        ovn-nbctl set NB_Global . options:use_logical_dp_groups=false
-    fi
 }
 
 # Interconnection networks.
@@ -749,21 +743,12 @@ m4_define([OVN_POPULATE_ARP], 
[AT_CHECK(ovn_populate_arp__, [0], [ignore])])
 # datapath groups.
 m4_define([OVN_FOR_EACH_NORTHD],
   [m4_foreach([NORTHD_TYPE], [ovn-northd, ovn-northd-ddlog],
-     [m4_foreach([NORTHD_USE_DP_GROUPS], [yes, no],
-       [m4_foreach([NORTHD_USE_PARALLELIZATION], [yes, no], [$1
-])])])])
-
-# Some tests aren't prepared for dp groups to be enabled.
-m4_define([OVN_FOR_EACH_NORTHD_WITHOUT_DP_GROUPS],
-  [m4_foreach([NORTHD_TYPE], [ovn-northd, ovn-northd-ddlog],
-     [m4_foreach([NORTHD_USE_DP_GROUPS], [no],
-       [m4_foreach([NORTHD_USE_PARALLELIZATION], [yes, no], [$1
-])])])])
+     [m4_foreach([NORTHD_USE_PARALLELIZATION], [yes, no], [$1
+])])])
 
 # Some tests aren't prepared for ddlog to be enabled.
 m4_define([OVN_FOR_EACH_NORTHD_WITHOUT_DDLOG],
   [m4_foreach([NORTHD_TYPE], [ovn-northd],
-     [m4_foreach([NORTHD_USE_DP_GROUPS], [yes, no],
-       [m4_foreach([NORTHD_USE_PARALLELIZATION], [yes, no], [$1
-])])])])
+     [m4_foreach([NORTHD_USE_PARALLELIZATION], [yes, no], [$1
+])])])
 
diff --git a/tests/ovn-northd.at b/tests/ovn-northd.at
index a94a7d441..e6a2e9d83 100644
--- a/tests/ovn-northd.at
+++ b/tests/ovn-northd.at
@@ -755,7 +755,7 @@ wait_row_count Datapath_Binding 2
 AT_CLEANUP
 ])
 
-OVN_FOR_EACH_NORTHD_WITHOUT_DP_GROUPS([
+OVN_FOR_EACH_NORTHD([
 AT_SETUP([northbound database reconnection])
 
 ovn_start --backup-northd=none
@@ -765,7 +765,7 @@ ovn_start --backup-northd=none
 check_row_count Datapath_Binding 0
 check ovn-nbctl --wait=sb ls-add sw0
 check_row_count Datapath_Binding 1
-lf=$(count_rows Logical_Flow)
+dp1=$(fetch_column Datapath_Binding _uuid external_ids:name=sw0)
 
 # Make nbdb ovsdb-server drop connection from ovn-northd.
 conn=$(as ovn-nb ovs-appctl -t ovsdb-server ovsdb-server/list-remotes|grep 
^punix)
@@ -777,7 +777,6 @@ check as ovn-nb ovs-appctl -t ovsdb-server 
ovsdb-server/add-remote "$conn2"
 check ovn-nbctl --db="${conn2#p}" ls-add sw1
 sleep 5
 check_row_count Datapath_Binding 1
-check_row_count Logical_Flow $lf
 
 # Now re-enable the nbdb connection and observe ovn-northd catch up.
 #
@@ -786,12 +785,13 @@ check_row_count Logical_Flow $lf
 # differently on reconnection.
 check as ovn-nb ovs-appctl -t ovsdb-server ovsdb-server/add-remote "$conn"
 wait_row_count Datapath_Binding 2
-wait_row_count Logical_Flow $(expr 2 \* $lf)
+dp2=$(fetch_column Datapath_Binding _uuid external_ids:name=sw1)
+wait_column "$dp1 $dp2" Logical_DP_Group datapaths
 
 AT_CLEANUP
 ])
 
-OVN_FOR_EACH_NORTHD_WITHOUT_DP_GROUPS([
+OVN_FOR_EACH_NORTHD([
 AT_SETUP([southbound database reconnection])
 
 ovn_start --backup-northd=none
@@ -801,7 +801,7 @@ ovn_start --backup-northd=none
 check_row_count Datapath_Binding 0
 check ovn-nbctl --wait=sb ls-add sw0
 check_row_count Datapath_Binding 1
-lf=$(count_rows Logical_Flow)
+dp1=$(fetch_column Datapath_Binding _uuid external_ids:name=sw0)
 
 # Make sbdb ovsdb-server drop connection from ovn-northd.
 conn=$(as ovn-sb ovs-appctl -t ovsdb-server ovsdb-server/list-remotes|grep 
^punix)
@@ -813,7 +813,6 @@ check as ovn-sb ovs-appctl -t ovsdb-server 
ovsdb-server/add-remote "$conn2"
 check ovn-nbctl ls-add sw1
 sleep 5
 OVN_SB_DB=${conn2#p} check_row_count Datapath_Binding 1
-OVN_SB_DB=${conn2#p} check_row_count Logical_Flow $lf
 
 # Now re-enable the sbdb connection and observe ovn-northd catch up.
 #
@@ -822,7 +821,8 @@ OVN_SB_DB=${conn2#p} check_row_count Logical_Flow $lf
 # differently on reconnection.
 check as ovn-sb ovs-appctl -t ovsdb-server ovsdb-server/add-remote "$conn"
 wait_row_count Datapath_Binding 2
-wait_row_count Logical_Flow $(expr 2 \* $lf)
+dp2=$(fetch_column Datapath_Binding _uuid external_ids:name=sw1)
+wait_column "$dp1 $dp2" Logical_DP_Group datapaths
 
 AT_CLEANUP
 ])
@@ -2648,82 +2648,6 @@ AT_CHECK([ovn-sbctl lflow-list sw0 | grep ls_in_hairpin 
| sort | sed 's/table=..
 AT_CLEANUP
 ])
 
-OVN_FOR_EACH_NORTHD([
-AT_SETUP([logical gatapath groups])
-AT_KEYWORDS([use_logical_dp_groups])
-ovn_start
-
-dnl Disabling datapath groups.
-ovn-nbctl --wait=sb set NB_Global . options:use_logical_dp_groups=false
-
-ovn-nbctl ls-add sw1
-ovn-nbctl ls-add sw2
-ovn-nbctl lsp-add sw1 swp1
-ovn-nbctl lsp-add sw2 swp2
-ovn-nbctl --wait=sb sync
-
-sw1_sb_uuid=$(fetch_column datapath_binding _uuid external_ids:name=sw1)
-sw2_sb_uuid=$(fetch_column datapath_binding _uuid external_ids:name=sw2)
-
-dnl Check that we have no datapath groups.
-check_row_count Logical_DP_Group 0
-
-dnl Number of logical flows that depends on logical switch or multicast group.
-dnl These will not be combined.
-n_flows_specific=$(ovn-sbctl -f table find Logical_Flow | grep -cE 'swp')
-echo "Number of specific flows: "${n_flows_specific}
-
-dnl Both logical switches configured identically, so there should be same
-dnl number of logical flows per logical switch/logical datapath.
-n_flows=$(count_rows Logical_Flow)
-echo "Total number of flows with datapath groups disabled: "${n_flows}
-n_flows_half=$((${n_flows} / 2))
-check_row_count Logical_Flow ${n_flows_half} logical_datapath=${sw1_sb_uuid}
-check_row_count Logical_Flow ${n_flows_half} logical_datapath=${sw2_sb_uuid}
-
-dnl Enabling datapath groups.
-ovn-nbctl --wait=sb set NB_Global . options:use_logical_dp_groups=true
-
-dnl Check that one datapath group created.
-check_row_count Logical_DP_Group 1
-dp_group_uuid=$(fetch_column logical_dp_group _uuid)
-
-dnl Check that datapath group contains both datapaths.
-check_column "${sw1_sb_uuid} ${sw2_sb_uuid}" Logical_DP_Group datapaths
-
-dnl Calculating number of flows that should be combined for a datapath group.
-n_flows=$(count_rows Logical_Flow)
-echo "Total number of flows with datapath groups enabled: "${n_flows}
-n_flows_common=$((${n_flows} - ${n_flows_specific}))
-
-check_row_count Logical_Flow ${n_flows_common} 
logical_dp_group=${dp_group_uuid}
-check_row_count Logical_Flow ${n_flows_common} logical_datapath=[[]]
-check_row_count Logical_Flow ${n_flows_common} \
-    logical_dp_group=${dp_group_uuid} logical_datapath=[[]]
-
-dnl Adding 8 more logical switches and ports.
-for i in $(seq 3 10); do
-    ovn-nbctl ls-add sw${i}
-    ovn-nbctl lsp-add sw${i} swp${i}
-done
-ovn-nbctl --wait=sb sync
-
-dnl Number of logical flows should be increased only due to specific flows.
-expected_n_flows=$((${n_flows_common} + 5 * ${n_flows_specific}))
-echo "Total number of flows with 10 logical switches should be: " \
-     ${expected_n_flows}
-check_row_count Logical_Flow ${expected_n_flows}
-
-dnl Should be still only one datapath group.
-check_row_count Logical_DP_Group 1
-dp_group_uuid=$(fetch_column logical_dp_group _uuid)
-
-dnl Number of common flows should be the same.
-check_row_count Logical_Flow ${n_flows_common} 
logical_dp_group=${dp_group_uuid}
-
-AT_CLEANUP
-])
-
 OVN_FOR_EACH_NORTHD([
 AT_SETUP([Router policies - ECMP reroute])
 AT_KEYWORDS([router policies ecmp reroute])
diff --git a/tests/ovs-macros.at b/tests/ovs-macros.at
index 09c05cf16..5d325559c 100644
--- a/tests/ovs-macros.at
+++ b/tests/ovs-macros.at
@@ -10,11 +10,9 @@ dnl   set it as a shell variable as well.
 dnl - Skip the test if it's for ovn-northd-ddlog but it didn't get built.
 m4_rename([AT_SETUP], [OVS_AT_SETUP])
 m4_define([AT_SETUP],
-  [OVS_AT_SETUP($@[]m4_ifdef([NORTHD_TYPE], [ -- 
NORTHD_TYPE])[]m4_ifdef([NORTHD_USE_DP_GROUPS], [ -- 
dp-groups=NORTHD_USE_DP_GROUPS])[]m4_ifdef([NORTHD_USE_PARALLELIZATION], [ -- 
parallelization=NORTHD_USE_PARALLELIZATION]))
+  [OVS_AT_SETUP($@[]m4_ifdef([NORTHD_TYPE], [ -- 
NORTHD_TYPE])[]m4_ifdef([NORTHD_USE_PARALLELIZATION], [ -- 
parallelization=NORTHD_USE_PARALLELIZATION]))
 m4_ifdef([NORTHD_TYPE], [[NORTHD_TYPE]=NORTHD_TYPE
 ])dnl
-m4_ifdef([NORTHD_USE_DP_GROUPS], [[NORTHD_USE_DP_GROUPS]=NORTHD_USE_DP_GROUPS
-])dnl
 m4_ifdef([NORTHD_USE_PARALLELIZATION], 
[[NORTHD_USE_PARALLELIZATION]=NORTHD_USE_PARALLELIZATION
 ])dnl
 m4_ifdef([NORTHD_DUMMY_NUMA], [[NORTHD_DUMMY_NUMA]=NORTHD_DUMMY_NUMA
-- 
2.31.1

_______________________________________________
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to