Commit a48c85b2d6 (revalidator: Use xcache when revalidation is
required.) introduced a bug where xlate_actions effects from the old
cache would be performed. This could cause stale mac-learning entries to
re-appear, and in some cases, learnt flows to be re-learnt even after
the parent flow is deleted.

This patch splits the attribution of stats from the execution of side
effects for the flow, always attributing the stats to the rules that
created them, but selectively executing side-effects based on the
validity of the flow.

This handles three cases in slightly different ways:
* If revalidation is unnecessary, attribute stats and perform
  side-effects using the existing xcache.
* If revalidation is necessary, and the flow is valid, then attribute
  stats using the existing xcache, then perform full revalidation.
  Perform side-effects based on the newly built xcache.
* If revalidation is necessary, and the flow is invalid, then attribute
  stats using the old xcache, and do not perform side-effects.

VMware-BZ: #1268574

Reported-by: Len Gao <l...@vmware.com>
Signed-off-by: Joe Stringer <joestrin...@nicira.com>
---
 ofproto/ofproto-dpif-upcall.c |   10 ++++--
 ofproto/ofproto-dpif-xlate.c  |   73 +++++++++++++++++++++++++++++++----------
 ofproto/ofproto-dpif-xlate.h  |    3 +-
 3 files changed, 65 insertions(+), 21 deletions(-)

diff --git a/ofproto/ofproto-dpif-upcall.c b/ofproto/ofproto-dpif-upcall.c
index b38f226..281955f 100644
--- a/ofproto/ofproto-dpif-upcall.c
+++ b/ofproto/ofproto-dpif-upcall.c
@@ -1148,12 +1148,14 @@ revalidate_ukey(struct udpif *udpif, struct udpif_key 
*ukey,
 
     may_learn = push.n_packets > 0;
     if (ukey->xcache) {
-        xlate_push_stats(ukey->xcache, may_learn, &push);
+        /* Defer side-effects if the xcache might be out of date. */
+        bool execute_side_effects = may_learn && !udpif->need_revalidate;
+
+        xlate_push_stats(ukey->xcache, execute_side_effects, &push);
         if (udpif->need_revalidate) {
             xlate_cache_clear(ukey->xcache);
             push.n_packets = 0;
             push.n_bytes = 0;
-            may_learn = false;
         } else {
             ok = true;
             goto exit;
@@ -1173,7 +1175,6 @@ revalidate_ukey(struct udpif *udpif, struct udpif_key 
*ukey,
     xlate_in_init(&xin, ofproto, &flow, NULL, push.tcp_flags, NULL);
     xin.resubmit_stats = push.n_packets ? &push : NULL;
     xin.xcache = ukey->xcache;
-    xin.may_learn = may_learn;
     xin.skip_wildcards = !udpif->need_revalidate;
     xlate_actions(&xin, &xout);
     xoutp = &xout;
@@ -1214,6 +1215,9 @@ revalidate_ukey(struct udpif *udpif, struct udpif_key 
*ukey,
         }
     }
     ok = true;
+    if (may_learn) {
+        xlate_push_effects(ukey->xcache, &push);
+    }
 
 exit:
     if (netflow) {
diff --git a/ofproto/ofproto-dpif-xlate.c b/ofproto/ofproto-dpif-xlate.c
index 71eaad1..ca1b62e 100644
--- a/ofproto/ofproto-dpif-xlate.c
+++ b/ofproto/ofproto-dpif-xlate.c
@@ -338,6 +338,8 @@ static bool dscp_from_skb_priority(const struct xport *, 
uint32_t skb_priority,
 
 static struct xc_entry *xlate_cache_add_entry(struct xlate_cache *xc,
                                               enum xc_type type);
+static void xlate_push_stats__(struct xlate_cache *,
+                               const struct dpif_flow_stats *);
 static void xlate_xbridge_init(struct xlate_cfg *, struct xbridge *);
 static void xlate_xbundle_init(struct xlate_cfg *, struct xbundle *);
 static void xlate_xport_init(struct xlate_cfg *, struct xport *);
@@ -3886,10 +3888,46 @@ xlate_cache_normal(struct ofproto_dpif *ofproto, struct 
flow *flow, int vlan)
     update_learning_table(xbridge, flow, &wc, vlan, xbundle);
 }
 
-/* Push stats and perform side effects of flow translation. */
+/* Perform side effects of flow translation. */
 void
-xlate_push_stats(struct xlate_cache *xcache, bool may_learn,
-                 const struct dpif_flow_stats *stats)
+xlate_push_effects(struct xlate_cache *xcache,
+                   const struct dpif_flow_stats *stats)
+{
+    struct xc_entry *entry;
+    struct ofpbuf entries = xcache->entries;
+
+    XC_ENTRY_FOR_EACH (entry, entries, xcache) {
+        switch (entry->type) {
+        case XC_RULE:
+        case XC_BOND:
+        case XC_NETDEV:
+        case XC_NETFLOW:
+        case XC_MIRROR:
+        case XC_GROUP:
+            /* Handled by xlate_push_stats__(). */
+            break;
+
+        case XC_LEARN:
+            ofproto_dpif_flow_mod(entry->u.learn.ofproto, entry->u.learn.fm);
+            break;
+        case XC_NORMAL:
+            xlate_cache_normal(entry->u.normal.ofproto, entry->u.normal.flow,
+                               entry->u.normal.vlan);
+            break;
+        case XC_FIN_TIMEOUT:
+            xlate_fin_timeout__(entry->u.fin.rule, stats->tcp_flags,
+                                entry->u.fin.idle, entry->u.fin.hard);
+            break;
+        default:
+            OVS_NOT_REACHED();
+        }
+    }
+}
+
+/* Push stats using the cache provided. */
+static void
+xlate_push_stats__(struct xlate_cache *xcache,
+                   const struct dpif_flow_stats *stats)
 {
     struct xc_entry *entry;
     struct ofpbuf entries = xcache->entries;
@@ -3915,30 +3953,31 @@ xlate_push_stats(struct xlate_cache *xcache, bool 
may_learn,
                                 entry->u.mirror.mirrors,
                                 stats->n_packets, stats->n_bytes);
             break;
-        case XC_LEARN:
-            if (may_learn) {
-                ofproto_dpif_flow_mod(entry->u.learn.ofproto,
-                                      entry->u.learn.fm);
-            }
-            break;
-        case XC_NORMAL:
-            xlate_cache_normal(entry->u.normal.ofproto, entry->u.normal.flow,
-                               entry->u.normal.vlan);
-            break;
-        case XC_FIN_TIMEOUT:
-            xlate_fin_timeout__(entry->u.fin.rule, stats->tcp_flags,
-                                entry->u.fin.idle, entry->u.fin.hard);
-            break;
         case XC_GROUP:
             group_dpif_credit_stats(entry->u.group.group, 
entry->u.group.bucket,
                                     stats);
             break;
+        case XC_LEARN:
+        case XC_NORMAL:
+        case XC_FIN_TIMEOUT:
+            /* Handled by xlate_push_effects(). */
+            break;
         default:
             OVS_NOT_REACHED();
         }
     }
 }
 
+void
+xlate_push_stats(struct xlate_cache *xcache, bool execute_side_effects,
+                 const struct dpif_flow_stats *stats)
+{
+    xlate_push_stats__(xcache, stats);
+    if (execute_side_effects) {
+        xlate_push_effects(xcache, stats);
+    }
+}
+
 static void
 xlate_dev_unref(struct xc_entry *entry)
 {
diff --git a/ofproto/ofproto-dpif-xlate.h b/ofproto/ofproto-dpif-xlate.h
index 6065db3..5d40203 100644
--- a/ofproto/ofproto-dpif-xlate.h
+++ b/ofproto/ofproto-dpif-xlate.h
@@ -182,8 +182,9 @@ void xlate_out_copy(struct xlate_out *dst, const struct 
xlate_out *src);
 int xlate_send_packet(const struct ofport_dpif *, struct ofpbuf *);
 
 struct xlate_cache *xlate_cache_new(void);
-void xlate_push_stats(struct xlate_cache *, bool may_learn,
+void xlate_push_stats(struct xlate_cache *, bool execute_side_effects,
                       const struct dpif_flow_stats *);
+void xlate_push_effects(struct xlate_cache *, const struct dpif_flow_stats *);
 void xlate_cache_clear(struct xlate_cache *);
 void xlate_cache_delete(struct xlate_cache *);
 
-- 
1.7.10.4

_______________________________________________
dev mailing list
dev@openvswitch.org
http://openvswitch.org/mailman/listinfo/dev

Reply via email to