On Fri, Jul 18, 2014 at 11:33:38PM -0700, Ben Pfaff wrote:
> On Fri, Jul 18, 2014 at 09:05:49PM -0700, Jarno Rajahalme wrote:
> > Older GCC (e.g. 4.1.2) did not like the pointer casts used for
> > initializing the iteration cursors. This patch changes the code to
> > avoid the void casts for GCC, and makes the classifier interface more
> > similar to that of the cmap. These changes make the code work with
> > GCC 4.1.2 strict aliasing rules.
> >
> > VMware-BZ: #1287651
> >
> > Signed-off-by: Jarno Rajahalme <[email protected]>
>
> I see why GCC didn't like this code.
>
> There are lots of tricky constraints here. We don't want to violate ISO
> aliasing rules, we have to stick to exactly one declaration in the first
> clause of the for statement, and we don't want to have different code
> for GCC and other compilers if we don't have to. The proposed solution
> here compromises on the last one.
>
> I spent a bunch of time experimenting and came up with what I think is
> the outline of another solution. How about adding a 'node' member to
> cmap_cursor, like this:
>
> struct cmap_cursor {
> const struct cmap_impl *impl;
> uint32_t bucket_idx;
> int entry_idx;
> struct cmap_node *node;
> };
>
> struct cmap_cursor cmap_cursor_start(const struct cmap *);
> void cmap_cursor_advance(struct cmap_cursor *);
>
> #define CMAP_FOR_EACH(NODE, MEMBER, CMAP) \
> for (struct cmap_cursor cursor__ = cmap_cursor_start(CMAP); \
> (cursor__.node \
> ? (ASSIGN_CONTAINER(NODE, cursor__.node, MEMBER), true) \
> : false); \
> cmap_cursor_advance(&cursor__))
>
> #define CMAP_FOR_EACH_SAFE(NODE, MEMBER, CMAP) \
> for (struct cmap_cursor cursor__ = cmap_cursor_start(CMAP); \
> (cursor__.node \
> ? (ASSIGN_CONTAINER(NODE, cursor__.node, MEMBER), \
> cmap_cursor_advance(&cursor__), \
> true) \
> : false); \
> )
>
> with corresponding support functions:
>
> struct cmap_cursor
> cmap_cursor_start(const struct cmap *cmap)
> {
> struct cmap_cursor cursor;
>
> cursor.impl = cmap_get_impl(cmap);
> cursor.bucket_idx = 0;
> cursor.entry_idx = 0;
> cursor.node = NULL;
> cmap_cursor_advance(&cursor);
>
> return cursor;
> }
>
> void
> cmap_cursor_advance(struct cmap_cursor *cursor)
> {
> const struct cmap_impl *impl = cursor->impl;
>
> if (cursor->node) {
> cursor->node = cmap_node_next(cursor->node);
> if (cursor->node) {
> return;
> }
> }
>
> while (cursor->bucket_idx <= impl->mask) {
> const struct cmap_bucket *b = &impl->buckets[cursor->bucket_idx];
>
> while (cursor->entry_idx < CMAP_K) {
> cursor->node = cmap_node_next(&b->nodes[cursor->entry_idx++]);
> if (cursor->node) {
> return;
> }
> }
>
> cursor->bucket_idx++;
> cursor->entry_idx = 0;
> }
> }
Here's a fully worked out version of this idea that compiles and passes
all the tests (at least with GCC 4.7.2). With this, the _SAFE variants
don't take an extra named variable or really have any extra cost, so one
thinks about just getting making the normal variant the same as _SAFE:
diff --git a/lib/classifier.c b/lib/classifier.c
index 332e05a..9c23cd6 100644
--- a/lib/classifier.c
+++ b/lib/classifier.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
+ * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -476,8 +476,8 @@ classifier_destroy(struct classifier *cls)
OVS_EXCLUDED(cls->mutex)
{
if (cls) {
- struct cls_partition *partition, *next_partition;
- struct cls_subtable *subtable, *next_subtable;
+ struct cls_partition *partition;
+ struct cls_subtable *subtable;
int i;
ovs_mutex_lock(&cls->mutex);
@@ -485,14 +485,12 @@ classifier_destroy(struct classifier *cls)
trie_destroy(&cls->tries[i].root);
}
- CMAP_FOR_EACH_SAFE (subtable, next_subtable, cmap_node,
- &cls->subtables_map) {
+ CMAP_FOR_EACH_SAFE (subtable, cmap_node, &cls->subtables_map) {
destroy_subtable(cls, subtable);
}
cmap_destroy(&cls->subtables_map);
- CMAP_FOR_EACH_SAFE (partition, next_partition, cmap_node,
- &cls->partitions) {
+ CMAP_FOR_EACH_SAFE (partition, cmap_node, &cls->partitions) {
ovsrcu_postpone(free, partition);
}
cmap_destroy(&cls->partitions);
diff --git a/lib/cmap.c b/lib/cmap.c
index 5d6dbcf..ba744cc 100644
--- a/lib/cmap.c
+++ b/lib/cmap.c
@@ -784,31 +784,29 @@ cmap_rehash(struct cmap *cmap, uint32_t mask)
return new;
}
-/* Initializes 'cursor' for iterating through 'cmap'.
- *
- * Use via CMAP_FOR_EACH. */
-void
-cmap_cursor_init(struct cmap_cursor *cursor, const struct cmap *cmap)
+struct cmap_cursor
+cmap_cursor_start(const struct cmap *cmap)
{
- cursor->impl = cmap_get_impl(cmap);
- cursor->bucket_idx = 0;
- cursor->entry_idx = 0;
+ struct cmap_cursor cursor;
+
+ cursor.impl = cmap_get_impl(cmap);
+ cursor.bucket_idx = 0;
+ cursor.entry_idx = 0;
+ cursor.node = NULL;
+ cmap_cursor_advance(&cursor);
+
+ return cursor;
}
-/* Returns the next node for 'cursor' to visit, following 'node', or NULL if
- * the last node has been visited.
- *
- * Use via CMAP_FOR_EACH. */
-struct cmap_node *
-cmap_cursor_next(struct cmap_cursor *cursor, const struct cmap_node *node)
+void
+cmap_cursor_advance(struct cmap_cursor *cursor)
{
const struct cmap_impl *impl = cursor->impl;
- if (node) {
- struct cmap_node *next = cmap_node_next(node);
-
- if (next) {
- return next;
+ if (cursor->node) {
+ cursor->node = cmap_node_next(cursor->node);
+ if (cursor->node) {
+ return;
}
}
@@ -816,18 +814,15 @@ cmap_cursor_next(struct cmap_cursor *cursor, const struct
cmap_node *node)
const struct cmap_bucket *b = &impl->buckets[cursor->bucket_idx];
while (cursor->entry_idx < CMAP_K) {
- struct cmap_node *node =
cmap_node_next(&b->nodes[cursor->entry_idx++]);
-
- if (node) {
- return node;
+ cursor->node = cmap_node_next(&b->nodes[cursor->entry_idx++]);
+ if (cursor->node) {
+ return;
}
}
cursor->bucket_idx++;
cursor->entry_idx = 0;
}
-
- return NULL;
}
/* Returns the next node in 'cmap' in hash order, or NULL if no nodes remain in
diff --git a/lib/cmap.h b/lib/cmap.h
index b7569f5..87a1d53 100644
--- a/lib/cmap.h
+++ b/lib/cmap.h
@@ -168,70 +168,54 @@ struct cmap_node *cmap_find_protected(const struct cmap
*, uint32_t hash);
* has already expired.
*/
-#define CMAP_CURSOR_FOR_EACH(NODE, MEMBER, CURSOR, CMAP) \
- for ((cmap_cursor_init(CURSOR, CMAP), \
- ASSIGN_CONTAINER(NODE, cmap_cursor_next(CURSOR, NULL), MEMBER)); \
- NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER); \
- ASSIGN_CONTAINER(NODE, cmap_cursor_next(CURSOR, &(NODE)->MEMBER), \
- MEMBER))
-
-#define CMAP_CURSOR_FOR_EACH_SAFE(NODE, NEXT, MEMBER, CURSOR, CMAP) \
- for ((cmap_cursor_init(CURSOR, CMAP), \
- ASSIGN_CONTAINER(NODE, cmap_cursor_next(CURSOR, NULL), MEMBER)); \
- (NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER) \
- ? ASSIGN_CONTAINER(NEXT, cmap_cursor_next(CURSOR, &(NODE)->MEMBER), \
- MEMBER), true \
- : false); \
- (NODE) = (NEXT))
-
-#define CMAP_CURSOR_FOR_EACH_CONTINUE(NODE, MEMBER, CURSOR) \
- for (ASSIGN_CONTAINER(NODE, cmap_cursor_next(CURSOR, &(NODE)->MEMBER), \
- MEMBER); \
- NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER); \
- ASSIGN_CONTAINER(NODE, cmap_cursor_next(CURSOR, &(NODE)->MEMBER), \
- MEMBER))
+#define CMAP_CURSOR_FOR_EACH(NODE, MEMBER, CURSOR, CMAP) \
+ for (*(CURSOR) = cmap_cursor_start(CMAP); \
+ ((CURSOR)->node \
+ ? (ASSIGN_CONTAINER(NODE, (CURSOR)->node, MEMBER), true) \
+ : false); \
+ cmap_cursor_advance(CURSOR))
+
+#define CMAP_CURSOR_FOR_EACH_SAFE(NODE, MEMBER, CURSOR, CMAP) \
+ for (*(CURSOR) = cmap_cursor_start(CMAP); \
+ ((CURSOR)->node \
+ ? (ASSIGN_CONTAINER(NODE, (CURSOR)->node, MEMBER), \
+ cmap_cursor_advance(CURSOR), \
+ true) \
+ : false); \
+ )
+
+#define CMAP_CURSOR_FOR_EACH_CONTINUE(NODE, MEMBER, CURSOR) \
+ for (cmap_cursor_advance(CURSOR); \
+ ((CURSOR)->node \
+ ? (ASSIGN_CONTAINER(NODE, (CURSOR)->node, MEMBER), true) \
+ : false); \
+ cmap_cursor_advance(CURSOR))
struct cmap_cursor {
const struct cmap_impl *impl;
uint32_t bucket_idx;
int entry_idx;
+ struct cmap_node *node;
};
-void cmap_cursor_init(struct cmap_cursor *, const struct cmap *);
-struct cmap_node *cmap_cursor_next(struct cmap_cursor *,
- const struct cmap_node *);
-
-
-static inline struct cmap_cursor cmap_cursor_start(const struct cmap *cmap,
- void **pnode,
- const void *offset)
-{
- struct cmap_cursor cursor;
-
- cmap_cursor_init(&cursor, cmap);
- *pnode = (char *)cmap_cursor_next(&cursor, NULL) + (ptrdiff_t)offset;
-
- return cursor;
-}
-
-#define CMAP_CURSOR_START(NODE, MEMBER, CMAP) \
- cmap_cursor_start(CMAP, (void **)&(NODE), \
- OBJECT_CONTAINING(NULL, NODE, MEMBER))
+struct cmap_cursor cmap_cursor_start(const struct cmap *);
+void cmap_cursor_advance(struct cmap_cursor *);
#define CMAP_FOR_EACH(NODE, MEMBER, CMAP) \
- for (struct cmap_cursor cursor__ = CMAP_CURSOR_START(NODE, MEMBER, CMAP); \
- NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER); \
- ASSIGN_CONTAINER(NODE, cmap_cursor_next(&cursor__, &(NODE)->MEMBER), \
- MEMBER))
-
-#define CMAP_FOR_EACH_SAFE(NODE, NEXT, MEMBER, CMAP) \
- for (struct cmap_cursor cursor__ = CMAP_CURSOR_START(NODE, MEMBER, CMAP); \
- (NODE != OBJECT_CONTAINING(NULL, NODE, MEMBER) \
- ? ASSIGN_CONTAINER(NEXT, \
- cmap_cursor_next(&cursor__, &(NODE)->MEMBER), \
- MEMBER), true \
+ for (struct cmap_cursor cursor__ = cmap_cursor_start(CMAP); \
+ (cursor__.node \
+ ? (ASSIGN_CONTAINER(NODE, cursor__.node, MEMBER), true) \
+ : false); \
+ cmap_cursor_advance(&cursor__))
+
+#define CMAP_FOR_EACH_SAFE(NODE, MEMBER, CMAP) \
+ for (struct cmap_cursor cursor__ = cmap_cursor_start(CMAP); \
+ (cursor__.node \
+ ? (ASSIGN_CONTAINER(NODE, cursor__.node, MEMBER), \
+ cmap_cursor_advance(&cursor__), \
+ true) \
: false); \
- (NODE) = (NEXT))
+ )
static inline struct cmap_node *cmap_first(const struct cmap *);
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 90445d1..9fbe4da 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -963,10 +963,10 @@ dp_netdev_remove_flow(struct dp_netdev *dp, struct
dp_netdev_flow *flow)
static void
dp_netdev_flow_flush(struct dp_netdev *dp)
{
- struct dp_netdev_flow *netdev_flow, *next;
+ struct dp_netdev_flow *netdev_flow;
ovs_mutex_lock(&dp->flow_mutex);
- CMAP_FOR_EACH_SAFE (netdev_flow, next, node, &dp->flow_table) {
+ CMAP_FOR_EACH_SAFE (netdev_flow, node, &dp->flow_table) {
dp_netdev_remove_flow(dp, netdev_flow);
}
ovs_mutex_unlock(&dp->flow_mutex);
Thanks,
Ben.
_______________________________________________
dev mailing list
[email protected]
http://openvswitch.org/mailman/listinfo/dev