---
Version 2:
- Storing error code from netdev_flow_get() to mimic the actual result
of the last call.
AUTHORS.rst | 1 +
lib/dpif-netdev.c | 93
++++++++++++++++++++++++++++++++++++++++++++---
2 files changed, 88 insertions(+), 6 deletions(-)
diff --git a/AUTHORS.rst b/AUTHORS.rst index 8e6a0769f..eb36a01d0
100644
--- a/AUTHORS.rst
+++ b/AUTHORS.rst
@@ -504,6 +504,7 @@ Edwin Chiu ec...@vmware.com
Eivind Bulie Haanaes
Enas Ahmad enas.ah...@kaust.edu.sa
Eric Lopez
+Frank Wang (王培辉) wangpei...@inspur.com
Frido Roose fr.ro...@gmail.com
Gaetano Catalli gaetano.cata...@gmail.com
Gavin Remaley gavin_rema...@selinc.com
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index
5bb392cba..2aad24511 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -492,6 +492,12 @@ struct dp_netdev_flow_stats {
atomic_uint16_t tcp_flags; /* Bitwise-OR of seen tcp_flags values. */
};
+/* Contained by struct dp_netdev_flow's 'last_attrs' member. */
+struct dp_netdev_flow_attrs {
+ atomic_bool offloaded; /* True if flow is offloaded to HW. */
+ ATOMIC(const char *) dp_layer; /* DP layer the flow is handled
+in. */ };
+
/* A flow in 'dp_netdev_pmd_thread's 'flow_table'.
*
*
@@ -552,6 +558,11 @@ struct dp_netdev_flow {
/* Statistics. */
struct dp_netdev_flow_stats stats;
+ /* Statistics and attributes received from the netdev offload provider. */
+ atomic_int netdev_flow_get_result;
+ struct dp_netdev_flow_stats last_stats;
+ struct dp_netdev_flow_attrs last_attrs;
+
/* Actions. */
OVSRCU_TYPE(struct dp_netdev_actions *) actions;
@@ -3277,9 +3288,56 @@ dp_netdev_pmd_find_flow(const struct
dp_netdev_pmd_thread *pmd,
return NULL;
}
+static void
+dp_netdev_flow_set_last_stats_attrs(struct dp_netdev_flow *netdev_flow,
+ const struct dpif_flow_stats *stats,
+ const struct dpif_flow_attrs *attrs,
+ int result) {
+ struct dp_netdev_flow_stats *last_stats = &netdev_flow->last_stats;
+ struct dp_netdev_flow_attrs *last_attrs =
+&netdev_flow->last_attrs;
+
+ atomic_store_relaxed(&netdev_flow->netdev_flow_get_result, result);
+ if (result) {
+ return;
+ }
+
+ atomic_store_relaxed(&last_stats->used, stats->used);
+ atomic_store_relaxed(&last_stats->packet_count, stats->n_packets);
+ atomic_store_relaxed(&last_stats->byte_count, stats->n_bytes);
+ atomic_store_relaxed(&last_stats->tcp_flags, stats->tcp_flags);
+
+ atomic_store_relaxed(&last_attrs->offloaded, attrs->offloaded);
+ atomic_store_relaxed(&last_attrs->dp_layer, attrs->dp_layer);
+
+}
+
+static void
+dp_netdev_flow_get_last_stats_attrs(struct dp_netdev_flow *netdev_flow,
+ struct dpif_flow_stats *stats,
+ struct dpif_flow_attrs *attrs,
+ int *result) {
+ struct dp_netdev_flow_stats *last_stats = &netdev_flow->last_stats;
+ struct dp_netdev_flow_attrs *last_attrs =
+&netdev_flow->last_attrs;
+
+ atomic_read_relaxed(&netdev_flow->netdev_flow_get_result, result);
+ if (*result) {
+ return;
+ }
+
+ atomic_read_relaxed(&last_stats->used, &stats->used);
+ atomic_read_relaxed(&last_stats->packet_count, &stats->n_packets);
+ atomic_read_relaxed(&last_stats->byte_count, &stats->n_bytes);
+ atomic_read_relaxed(&last_stats->tcp_flags, &stats->tcp_flags);
+
+ atomic_read_relaxed(&last_attrs->offloaded, &attrs->offloaded);
+ atomic_read_relaxed(&last_attrs->dp_layer, &attrs->dp_layer);
+}
+
static bool
dpif_netdev_get_flow_offload_status(const struct dp_netdev *dp,
- const struct dp_netdev_flow *netdev_flow,
+ struct dp_netdev_flow
+ *netdev_flow,
struct dpif_flow_stats *stats,
struct dpif_flow_attrs *attrs) {
@@ -3302,11 +3360,31 @@ dpif_netdev_get_flow_offload_status(const
struct dp_netdev *dp,
}
ofpbuf_use_stack(&buf, &act_buf, sizeof act_buf);
/* Taking a global 'port_mutex' to fulfill thread safety
- * restrictions for the netdev-offload-dpdk module. */
- ovs_mutex_lock(&dp->port_mutex);
- ret = netdev_flow_get(netdev, &match, &actions, &netdev_flow-
mega_ufid,
- stats, attrs, &buf);
- ovs_mutex_unlock(&dp->port_mutex);
+ * restrictions for the netdev-offload-dpdk module.
+ *
+ * XXX: Main thread will try to pause/stop all revalidators during datapath
+ * reconfiguration via datapath purge callback (dp_purge_cb) while
+ * holding 'dp->port_mutex'. So we're not waiting for mutex here.
+ * Otherwise, deadlock is possible, bcause revalidators might sleep
+ * waiting for the main thread to release the lock and main thread
+ * will wait for them to stop processing.
+ * This workaround might make statistics less accurate. Especially
+ * for flow deletion case, since there will be no other attempt. */
+ if (!ovs_mutex_trylock(&dp->port_mutex)) {
+ ret = netdev_flow_get(netdev, &match, &actions,
+ &netdev_flow->mega_ufid, stats, attrs, &buf);
+ /* Storing statistics and attributes from the last request for
+ * later use on mutex contention. */
+ dp_netdev_flow_set_last_stats_attrs(netdev_flow, stats, attrs, ret);
+ ovs_mutex_unlock(&dp->port_mutex);
+ } else {
+ dp_netdev_flow_get_last_stats_attrs(netdev_flow, stats, attrs, &ret);
+ if (!ret && !attrs->dp_layer) {
+ /* Flow was never reported as 'offloaded' so it's harmless
+ * to continue to think so. */
+ ret = EAGAIN;
+ }
+ }
netdev_close(netdev);
if (ret) {
return false;
@@ -3575,6 +3653,9 @@ dp_netdev_flow_add(struct
dp_netdev_pmd_thread *pmd,
/* Do not allocate extra space. */
flow = xmalloc(sizeof *flow - sizeof flow->cr.flow.mf + mask.len);
memset(&flow->stats, 0, sizeof flow->stats);
+ atomic_init(&flow->netdev_flow_get_result, 0);
+ memset(&flow->last_stats, 0, sizeof flow->last_stats);
+ memset(&flow->last_attrs, 0, sizeof flow->last_attrs);
flow->dead = false;
flow->batch = NULL;
flow->mark = INVALID_FLOW_MARK;