On Mon, Jun 23, 2014 at 12:36:11PM -0700, Ryan Wilson wrote:
> Previously, flows were retrieved one by one when dumping flows for
> datapaths of type 'netdev'. This increased contention for the dump's
> mutex, negatively affecting revalidator performance.
> 
> This patch retrieves batches of flows when dumping flows for datapaths
> of type 'netdev'.
> 
> Signed-off-by: Ryan Wilson <wr...@nicira.com>
> ---
> v2: Addressed Joe's comments, split into 2 patches
> v3: Optimized patch by allocating memory in 1 malloc call instead of 3,
> reducing time spent in locked state. Need to only use 1 patch now.
> v4: Minor fixes.

Thanks Ryan!

I folded in this change to ensure that the caller could specify any
'max_flows' that it likes.  It doesn't make sense for the caller to
have to be aware of the implementation's limitations:

@@ -1484,12 +1484,11 @@ dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread 
*thread_,
     int n_flows = 0;
     int i;
 
-    ovs_assert(max_flows <= FLOW_DUMP_MAX_BATCH);
-
     ovs_mutex_lock(&dump->mutex);
     if (!dump->status) {
         fat_rwlock_rdlock(&dp->cls.rwlock);
-        for (n_flows = 0; n_flows < max_flows; n_flows++) {
+        for (n_flows = 0; n_flows < MIN(max_flows, FLOW_DUMP_MAX_BATCH);
+             n_flows++) {
             struct hmap_node *node;
 
             node = hmap_at_position(&dp->flow_table, &dump->bucket,

I also folded in the following to minimize the size of the overall
change (when ignoring whitespace changes):

@@ -1473,7 +1473,7 @@ dpif_netdev_flow_dump_thread_destroy(struct 
dpif_flow_dump_thread *thread_)
 /* XXX the caller must use 'actions' without quiescing */
 static int
 dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_,
-                           struct dpif_flow *f, int max_flows)
+                           struct dpif_flow *flows, int max_flows)
 {
     struct dpif_netdev_flow_dump_thread *thread
         = dpif_netdev_flow_dump_thread_cast(thread_);
@@ -1509,7 +1508,7 @@ dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread 
*thread_,
         struct odputil_keybuf *maskbuf = &thread->maskbuf[i];
         struct odputil_keybuf *keybuf = &thread->keybuf[i];
         struct dp_netdev_flow *netdev_flow = netdev_flows[i];
-        struct dpif_flow *flow = &f[i];
+        struct dpif_flow *f = &flows[i];
         struct dp_netdev_actions *dp_actions;
         struct flow_wildcards wc;
         struct ofpbuf buf;
@@ -1520,24 +1519,24 @@ dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread 
*thread_,
         ofpbuf_use_stack(&buf, keybuf, sizeof *keybuf);
         odp_flow_key_from_flow(&buf, &netdev_flow->flow, &wc.masks,
                                netdev_flow->flow.in_port.odp_port, true);
-        flow->key = ofpbuf_data(&buf);
-        flow->key_len = ofpbuf_size(&buf);
+        f->key = ofpbuf_data(&buf);
+        f->key_len = ofpbuf_size(&buf);
 
         /* Mask. */
         ofpbuf_use_stack(&buf, maskbuf, sizeof *maskbuf);
         odp_flow_key_from_mask(&buf, &wc.masks, &netdev_flow->flow,
                                odp_to_u32(wc.masks.in_port.odp_port),
                                SIZE_MAX, true);
-        flow->mask = ofpbuf_data(&buf);
-        flow->mask_len = ofpbuf_size(&buf);
+        f->mask = ofpbuf_data(&buf);
+        f->mask_len = ofpbuf_size(&buf);
 
         /* Actions. */
         dp_actions = dp_netdev_flow_get_actions(netdev_flow);
-        flow->actions = dp_actions->actions;
-        flow->actions_len = dp_actions->size;
+        f->actions = dp_actions->actions;
+        f->actions_len = dp_actions->size;
 
         /* Stats. */
-        get_dpif_flow_stats(netdev_flow, &flow->stats);
+        get_dpif_flow_stats(netdev_flow, &f->stats);
     }
 
     return n_flows;

I'll apply the final patch in a minute.

Thanks,

Ben.
_______________________________________________
dev mailing list
dev@openvswitch.org
http://openvswitch.org/mailman/listinfo/dev

Reply via email to