On Tue, Dec 5, 2023 at 11:32 AM Dumitru Ceara <[email protected]> wrote:
> On 12/5/23 10:10, Ales Musil wrote: > > Add unixctl command called "ofctrl/flow-install-time" > > that returns the last time it took OvS to process > > and install all flows. The initial time is taken right > > before controller queues the updates to rconn. > > The end is marked when we receive barrier reply with > > corresponding xid. > > > > Reported-at: https://issues.redhat.com/browse/FDP-134 > > Signed-off-by: Ales Musil <[email protected]> > > --- > > controller/ofctrl.c | 31 +++++++++++++++++++++++++++++-- > > 1 file changed, 29 insertions(+), 2 deletions(-) > > Hi Ales, > > Thanks for the patch! > Hi Dumitru, thank you for the review. > > This is supposed to be a rather "stable" command as the CMS wants to > rely on this to gather relevant load statistics. We should make sure we > document its behavior in ovn-controller.8.xml and mention it in the NEWS > file. > You are right, it should definitely be documented. > > > > > diff --git a/controller/ofctrl.c b/controller/ofctrl.c > > index 7aac0128b..4f8b80012 100644 > > --- a/controller/ofctrl.c > > +++ b/controller/ofctrl.c > > @@ -51,6 +51,7 @@ > > #include "openvswitch/rconn.h" > > #include "socket-util.h" > > #include "timeval.h" > > +#include "unixctl.h" > > #include "util.h" > > #include "vswitch-idl.h" > > > > @@ -323,6 +324,7 @@ struct ofctrl_flow_update { > > struct ovs_list list_node; /* In 'flow_updates'. */ > > ovs_be32 xid; /* OpenFlow transaction ID for barrier. > */ > > uint64_t req_cfg; /* Requested sequence number. */ > > + long long start_msec; /* Timestamp when the update started. */ > > }; > > > > static struct ofctrl_flow_update * > > @@ -402,6 +404,10 @@ static enum mf_field_id mff_ovn_geneve; > > * (e.g. after OVS restart). */ > > static bool ofctrl_initial_clear; > > > > +/* The time in ms it took for the last flow installation to be processed > > + * by OvS. */ > > +static long long last_flow_install_time_ms = 0; > > + > > static ovs_be32 queue_msg(struct ofpbuf *); > > > > static struct ofpbuf *encode_flow_mod(struct ofputil_flow_mod *); > > @@ -413,6 +419,8 @@ static struct ofpbuf *encode_meter_mod(const struct > ofputil_meter_mod *); > > static void ovn_installed_flow_table_clear(void); > > static void ovn_installed_flow_table_destroy(void); > > > > +static void flow_install_time_report(struct unixctl_conn *conn, int > argc, > > + const char *argv[], void *param); > > > > static void ofctrl_recv(const struct ofp_header *, enum ofptype); > > > > @@ -429,6 +437,8 @@ ofctrl_init(struct ovn_extend_table *group_table, > > groups = group_table; > > meters = meter_table; > > shash_init(&meter_bands); > > + unixctl_command_register("ofctrl/flow-install-time", "", 0, 0, > > + flow_install_time_report, NULL); > > } > > > > /* S_NEW, for a new connection. > > @@ -732,6 +742,9 @@ recv_S_UPDATE_FLOWS(const struct ofp_header *oh, > enum ofptype type, > > if (fup->req_cfg >= cur_cfg) { > > cur_cfg = fup->req_cfg; > > } > > + > > + last_flow_install_time_ms = time_msec() - fup->start_msec; > > Nit: very unlikely but this can underflow. > As discussed offline this very unlikely is closer to almost impossible. > > Nit2: too many empty lines IMO. I'd just move this line immediately > before the "if (fup->req_cfg >= cur_cfg) {". > Done in v2. > > > + > > mem_stats.oflow_update_usage -= > ofctrl_flow_update_size(fup); > > ovs_list_remove(&fup->list_node); > > free(fup); > > @@ -2900,6 +2913,7 @@ ofctrl_put(struct ovn_desired_flow_table > *lflow_table, > > const struct ofp_header *oh = barrier->data; > > ovs_be32 xid_ = oh->xid; > > ovs_list_push_back(&msgs, &barrier->list_node); > > + long long flow_update_start = time_msec(); > > > > /* Queue the messages. */ > > struct ofpbuf *msg; > > @@ -2945,9 +2959,13 @@ ofctrl_put(struct ovn_desired_flow_table > *lflow_table, > > > > /* Add a flow update. */ > > fup = xmalloc(sizeof *fup); > > + *fup = (struct ofctrl_flow_update) { > > + .xid = xid_, > > + .req_cfg = req_cfg, > > + .start_msec = flow_update_start, > > + }; > > + > > ovs_list_push_back(&flow_updates, &fup->list_node); > > - fup->xid = xid_; > > - fup->req_cfg = req_cfg; > > mem_stats.oflow_update_usage += ofctrl_flow_update_size(fup); > > done:; > > } else if (!ovs_list_is_empty(&flow_updates)) { > > @@ -3090,3 +3108,12 @@ ofctrl_get_memory_usage(struct simap *usage) > > ROUND_UP(rconn_packet_counter_n_bytes(tx_counter), > 1024) > > / 1024); > > } > > + > > +static void > > +flow_install_time_report(struct unixctl_conn *conn, int argc OVS_UNUSED, > > + const char *argv[] OVS_UNUSED, void *param > OVS_UNUSED) > > +{ > > + char *msg = xasprintf("%lld ms", last_flow_install_time_ms); > > + unixctl_command_reply(conn, msg); > > + free(msg); > > +} > > Thanks, > Dumitru > > > Thanks, Ales -- Ales Musil Senior Software Engineer - OVN Core Red Hat EMEA <https://www.redhat.com> [email protected] <https://red.ht/sig> _______________________________________________ dev mailing list [email protected] https://mail.openvswitch.org/mailman/listinfo/ovs-dev
