Modified ovs-appctl dpctl/dump-flows command to output
the miniflow bits for a given flow when -m option is passed.
$ ovs-appctl dpctl/dump-flows -m
Signed-off-by: Emma Finn
---
RFC -> v1
* Changed revision from RFC to v1
* Reformatted based on comments
* Fixed same classifier being dumped multiple times
flagged by Ilya
* Fixed print of subtables flagged by William
* Updated print count of bits as well as bits themselves
---
v1 -> v2
* Reformatted based on comments
* Refactored code to make output part of ovs-appctl
dpctl/dump-flows -m command.
---
NEWS | 2 ++
lib/dpctl.c | 4
lib/dpif-netdev.c | 43 +--
lib/dpif.c| 9 +
lib/dpif.h| 2 ++
5 files changed, 54 insertions(+), 6 deletions(-)
diff --git a/NEWS b/NEWS
index 965faca..1c9d2db 100644
--- a/NEWS
+++ b/NEWS
@@ -8,6 +8,8 @@ Post-v2.12.0
* Add option to enable, disable and query TCP sequence checking in
conntrack.
* Add support for conntrack zone limits.
+ * Command "ovs-appctl dpctl/dump-flows" refactored to show subtable
+ miniflow bits for userspace datapath.
- AF_XDP:
* New option 'use-need-wakeup' for netdev-afxdp to control enabling
of corresponding 'need_wakeup' flag in AF_XDP rings. Enabled by default
diff --git a/lib/dpctl.c b/lib/dpctl.c
index a1ea25b..9242407 100644
--- a/lib/dpctl.c
+++ b/lib/dpctl.c
@@ -825,6 +825,10 @@ format_dpif_flow(struct ds *ds, const struct dpif_flow *f,
struct hmap *ports,
}
ds_put_cstr(ds, ", actions:");
format_odp_actions(ds, f->actions, f->actions_len, ports);
+if (dpctl_p->verbosity && f->attrs.subtable) {
+ds_put_cstr(ds, ", dp-extra-info:");
+dpif_flow_attrs_format(>attrs, ds);
+}
}
struct dump_types {
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 079bd1b..314d3cb 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -867,6 +867,8 @@ static inline bool
pmd_perf_metrics_enabled(const struct dp_netdev_pmd_thread *pmd);
static void queue_netdev_flow_del(struct dp_netdev_pmd_thread *pmd,
struct dp_netdev_flow *flow);
+static struct dpcls_subtable *get_subtable(struct dp_netdev_pmd_thread *,
+ const struct dp_netdev_flow *);
static void
emc_cache_init(struct emc_cache *flow_cache)
@@ -3054,10 +3056,14 @@ get_dpif_flow_stats(const struct dp_netdev_flow
*netdev_flow_,
* 'mask_buf'. Actions will be returned without copying, by relying on RCU to
* protect them. */
static void
-dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow *netdev_flow,
+dp_netdev_flow_to_dpif_flow(struct dp_netdev *dp,
+const struct dp_netdev_flow *netdev_flow,
struct ofpbuf *key_buf, struct ofpbuf *mask_buf,
struct dpif_flow *flow, bool terse)
{
+struct dp_netdev_pmd_thread *pmd_thread;
+struct dpcls_subtable *subtable;
+
if (terse) {
memset(flow, 0, sizeof *flow);
} else {
@@ -3101,6 +3107,10 @@ dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow
*netdev_flow,
flow->attrs.offloaded = false;
flow->attrs.dp_layer = "ovs";
+
+pmd_thread = dp_netdev_get_pmd(dp, flow->pmd_id);
+subtable = get_subtable(pmd_thread, netdev_flow);
+flow->attrs.subtable = subtable;
}
static int
@@ -3203,8 +3213,8 @@ dpif_netdev_flow_get(const struct dpif *dpif, const
struct dpif_flow_get *get)
netdev_flow = dp_netdev_pmd_find_flow(pmd, get->ufid, get->key,
get->key_len);
if (netdev_flow) {
-dp_netdev_flow_to_dpif_flow(netdev_flow, get->buffer, get->buffer,
-get->flow, false);
+dp_netdev_flow_to_dpif_flow(dp, netdev_flow, get->buffer,
+get->buffer, get->flow, false);
error = 0;
break;
} else {
@@ -3634,11 +3644,11 @@ dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread
*thread_,
struct dp_netdev_flow *netdev_flows[FLOW_DUMP_MAX_BATCH];
int n_flows = 0;
int i;
+struct dpif_netdev *dpif = dpif_netdev_cast(thread->up.dpif);
+struct dp_netdev *dp = get_dp_netdev(>dpif);
ovs_mutex_lock(>mutex);
if (!dump->status) {
-struct dpif_netdev *dpif = dpif_netdev_cast(thread->up.dpif);
-struct dp_netdev *dp = get_dp_netdev(>dpif);
struct dp_netdev_pmd_thread *pmd = dump->cur_pmd;
int flow_limit = MIN(max_flows, FLOW_DUMP_MAX_BATCH);
@@ -3695,7 +3705,7 @@ dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread
*thread_,
ofpbuf_use_stack(, keybuf, sizeof *keybuf);
ofpbuf_use_stack(, maskbuf, sizeof *maskbuf);
-dp_netdev_flow_to_dpif_flow(netdev_flow, , , f,
+dp_netdev_flow_to_dpif_flow(dp, netdev_flow, , , f,