I suspect that this approach to a rwlock is faster when lots of threads
try to take the read lock at once, because it should avoid cross-core
contention on cache lines.  I'm sending it out for initial performance
testing.  The code needs some polish, but there's no point in doing it
unless it actually improves the speed.

Signed-off-by: Ben Pfaff <[email protected]>
---
 lib/automake.mk         |    2 +
 lib/classifier.c        |    4 +-
 lib/classifier.h        |    3 +-
 lib/dpif-netdev.c       |   16 ++--
 lib/fat-rwlock.c        |  229 +++++++++++++++++++++++++++++++++++++++++++++++
 lib/fat-rwlock.h        |   35 ++++++++
 ofproto/ofproto-dpif.c  |    8 +-
 ofproto/ofproto.c       |   72 +++++++--------
 tests/test-classifier.c |   28 +++---
 utilities/ovs-ofctl.c   |   16 ++--
 10 files changed, 340 insertions(+), 73 deletions(-)
 create mode 100644 lib/fat-rwlock.c
 create mode 100644 lib/fat-rwlock.h

diff --git a/lib/automake.mk b/lib/automake.mk
index ac6ff4a..81ee413 100644
--- a/lib/automake.mk
+++ b/lib/automake.mk
@@ -57,6 +57,8 @@ lib_libopenvswitch_la_SOURCES = \
        lib/dynamic-string.h \
        lib/entropy.c \
        lib/entropy.h \
+       lib/fat-rwlock.c \
+       lib/fat-rwlock.h \
        lib/fatal-signal.c \
        lib/fatal-signal.h \
        lib/flow.c \
diff --git a/lib/classifier.c b/lib/classifier.c
index 1675283..30a91b7 100644
--- a/lib/classifier.c
+++ b/lib/classifier.c
@@ -176,7 +176,7 @@ classifier_init(struct classifier *cls, const uint8_t 
*flow_segments)
     hmap_init(&cls->subtables);
     list_init(&cls->subtables_priority);
     hmap_init(&cls->partitions);
-    ovs_rwlock_init(&cls->rwlock);
+    fat_rwlock_init(&cls->rwlock);
     cls->n_flow_segments = 0;
     if (flow_segments) {
         while (cls->n_flow_segments < CLS_MAX_INDICES
@@ -213,7 +213,7 @@ classifier_destroy(struct classifier *cls)
             free(partition);
         }
         hmap_destroy(&cls->partitions);
-        ovs_rwlock_destroy(&cls->rwlock);
+        fat_rwlock_destroy(&cls->rwlock);
     }
 }
 
diff --git a/lib/classifier.h b/lib/classifier.h
index b6b89a0..c3c1c3b 100644
--- a/lib/classifier.h
+++ b/lib/classifier.h
@@ -213,6 +213,7 @@
  * The classifier may safely be accessed by many reader threads concurrently or
  * by a single writer. */
 
+#include "fat-rwlock.h"
 #include "flow.h"
 #include "hindex.h"
 #include "hmap.h"
@@ -254,7 +255,7 @@ struct classifier {
     struct list subtables_priority; /* Subtables in descending priority order.
                                      */
     struct hmap partitions;     /* Contains "struct cls_partition"s. */
-    struct ovs_rwlock rwlock OVS_ACQ_AFTER(ofproto_mutex);
+    struct fat_rwlock rwlock OVS_ACQ_AFTER(ofproto_mutex);
     struct cls_trie tries[CLS_MAX_TRIES]; /* Prefix tries. */
     unsigned int n_tries;
 };
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 6ef7495..47cddba 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -637,9 +637,9 @@ dpif_netdev_get_max_ports(const struct dpif *dpif 
OVS_UNUSED)
 static void
 dp_netdev_free_flow(struct dp_netdev *dp, struct dp_netdev_flow *netdev_flow)
 {
-    ovs_rwlock_wrlock(&dp->cls.rwlock);
+    fat_rwlock_wrlock(&dp->cls.rwlock);
     classifier_remove(&dp->cls, &netdev_flow->cr);
-    ovs_rwlock_unlock(&dp->cls.rwlock);
+    fat_rwlock_unlock(&dp->cls.rwlock);
     cls_rule_destroy(&netdev_flow->cr);
 
     hmap_remove(&dp->flow_table, &netdev_flow->node);
@@ -754,9 +754,9 @@ dp_netdev_lookup_flow(const struct dp_netdev *dp, const 
struct flow *flow)
 {
     struct cls_rule *cr;
 
-    ovs_rwlock_wrlock(&dp->cls.rwlock);
+    fat_rwlock_wrlock(&dp->cls.rwlock);
     cr = classifier_lookup(&dp->cls, flow, NULL);
-    ovs_rwlock_unlock(&dp->cls.rwlock);
+    fat_rwlock_unlock(&dp->cls.rwlock);
 
     return (cr
             ? CONTAINER_OF(cr, struct dp_netdev_flow, cr)
@@ -927,15 +927,15 @@ dp_netdev_flow_add(struct dp_netdev *dp, const struct 
flow *flow,
 
     match_init(&match, flow, wc);
     cls_rule_init(&netdev_flow->cr, &match, NETDEV_RULE_PRIORITY);
-    ovs_rwlock_wrlock(&dp->cls.rwlock);
+    fat_rwlock_wrlock(&dp->cls.rwlock);
     classifier_insert(&dp->cls, &netdev_flow->cr);
-    ovs_rwlock_unlock(&dp->cls.rwlock);
+    fat_rwlock_unlock(&dp->cls.rwlock);
 
     error = set_flow_actions(netdev_flow, actions, actions_len);
     if (error) {
-        ovs_rwlock_wrlock(&dp->cls.rwlock);
+        fat_rwlock_wrlock(&dp->cls.rwlock);
         classifier_remove(&dp->cls, &netdev_flow->cr);
-        ovs_rwlock_unlock(&dp->cls.rwlock);
+        fat_rwlock_unlock(&dp->cls.rwlock);
         cls_rule_destroy(&netdev_flow->cr);
 
         free(netdev_flow);
diff --git a/lib/fat-rwlock.c b/lib/fat-rwlock.c
new file mode 100644
index 0000000..df6c0ca
--- /dev/null
+++ b/lib/fat-rwlock.c
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2013, 2014 Nicira, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+
+#include "fat-rwlock.h"
+
+#include "hmap.h"
+#include "list.h"
+#include "ovs-thread.h"
+#include "random.h"
+
+#define CACHE_LINE_SIZE 64
+
+static pthread_key_t perthread_table_key;
+static struct ovs_mutex mutex = OVS_MUTEX_INITIALIZER;
+
+struct fat_rwlock_perthread {
+    void *base;
+    struct fat_rwlock *rwlock;
+    struct list list_node;
+
+    struct fat_rwlock_perthread_table *table;
+    struct hmap_node hmap_node;
+
+    struct ovs_mutex mutex OVS_ACQ_AFTER(mutex);
+    unsigned int depth;
+};
+
+struct fat_rwlock_perthread_table {
+    /* XXX we could declare this as __thread and use a pthread_key_t just for
+     * garbage collection */
+    struct hmap perthreads;
+};
+
+static void
+fat_rwlock_lockall__(struct fat_rwlock *rwlock)
+    OVS_NO_THREAD_SAFETY_ANALYSIS
+{
+    struct fat_rwlock_perthread *perthread;
+
+    LIST_FOR_EACH (perthread, list_node, &rwlock->threads) {
+        ovs_mutex_lock(&perthread->mutex);
+    }
+}
+
+static void
+fat_rwlock_unlockall__(struct fat_rwlock *rwlock)
+    OVS_NO_THREAD_SAFETY_ANALYSIS
+{
+    struct fat_rwlock_perthread *perthread;
+
+    LIST_FOR_EACH (perthread, list_node, &rwlock->threads) {
+        ovs_mutex_unlock(&perthread->mutex);
+    }
+}
+
+static void
+free_perthread_table__(void *table_)
+{
+    struct fat_rwlock_perthread_table *table = table_;
+    struct fat_rwlock_perthread *perthread, *next;
+
+    HMAP_FOR_EACH_SAFE (perthread, next, hmap_node, &table->perthreads) {
+        struct fat_rwlock *rwlock = perthread->rwlock;
+
+        if (perthread->depth) {
+            abort();
+        }
+
+        ovs_mutex_lock(&mutex);
+        fat_rwlock_lockall__(rwlock);
+        list_remove(&perthread->list_node);
+        fat_rwlock_unlockall__(rwlock);
+        ovs_mutex_unlock(&mutex);
+
+        hmap_remove(&table->perthreads, &perthread->hmap_node);
+    }
+    hmap_destroy(&table->perthreads);
+    free(table);
+}
+
+void
+fat_rwlock_init(struct fat_rwlock *rwlock)
+{
+    static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
+
+    if (ovsthread_once_start(&once)) {
+        xpthread_key_create(&perthread_table_key, free_perthread_table__);
+        ovsthread_once_done(&once);
+    }
+
+    rwlock->id = random_uint32(); /* XXX check for uniqueness */
+    list_init(&rwlock->threads);
+}
+
+void
+fat_rwlock_destroy(struct fat_rwlock *rwlock)
+{
+    struct fat_rwlock_perthread *perthread, *next;
+
+    ovs_mutex_lock(&mutex);
+    LIST_FOR_EACH_SAFE (perthread, next, list_node, &rwlock->threads) {
+        list_remove(&perthread->list_node);
+        hmap_remove(&perthread->table->perthreads, &perthread->hmap_node);
+        ovs_mutex_destroy(&perthread->mutex);
+        free(perthread->base);
+    }
+    ovs_mutex_unlock(&mutex);
+}
+
+static struct fat_rwlock_perthread *
+fat_rwlock_get_perthread__(struct fat_rwlock *rwlock)
+    OVS_NO_THREAD_SAFETY_ANALYSIS
+{
+    struct fat_rwlock_perthread_table *table;
+    struct fat_rwlock_perthread *perthread;
+    void *base;
+
+    table = pthread_getspecific(perthread_table_key);
+    if (!table) {
+        table = xmalloc(sizeof *table);
+        hmap_init(&table->perthreads);
+        xpthread_setspecific(perthread_table_key, table);
+    }
+
+    HMAP_FOR_EACH_WITH_HASH (perthread, hmap_node, rwlock->id,
+                             &table->perthreads) {
+        return perthread;
+    }
+
+    ovs_mutex_lock(&mutex);
+    fat_rwlock_lockall__(rwlock);
+
+    base = xmalloc(sizeof *perthread + CACHE_LINE_SIZE - 1);
+    perthread = (void *) ROUND_UP((uintptr_t) base, CACHE_LINE_SIZE);
+
+    perthread = xmalloc(sizeof *perthread);
+    perthread->base = base;
+    perthread->rwlock = rwlock;
+    list_push_back(&rwlock->threads, &perthread->list_node);
+    perthread->table = table;
+    hmap_insert(&table->perthreads, &perthread->hmap_node, rwlock->id);
+    ovs_mutex_init(&perthread->mutex);
+    ovs_mutex_lock(&perthread->mutex);
+    perthread->depth = 0;
+
+    fat_rwlock_unlockall__(rwlock);
+    ovs_mutex_unlock(&mutex);
+
+    return perthread;
+}
+
+void
+fat_rwlock_rdlock(const struct fat_rwlock *rwlock_)
+    OVS_ACQ_RDLOCK(rwlock_)
+    OVS_NO_THREAD_SAFETY_ANALYSIS
+{
+    struct fat_rwlock *rwlock = CONST_CAST(struct fat_rwlock *, rwlock_);
+    struct fat_rwlock_perthread *perthread;
+
+    perthread = fat_rwlock_get_perthread__(rwlock);
+
+    switch (perthread->depth) {
+    case 0:
+        ovs_mutex_lock(&perthread->mutex);
+        /* fall through */
+    default:
+        perthread->depth++;
+        break;
+
+    case UINT_MAX:
+        abort();
+    }
+}
+
+void
+fat_rwlock_wrlock(const struct fat_rwlock *rwlock_)
+    OVS_ACQ_WRLOCK(rwlock_)
+    OVS_NO_THREAD_SAFETY_ANALYSIS
+{
+    struct fat_rwlock *rwlock = CONST_CAST(struct fat_rwlock *, rwlock_);
+    struct fat_rwlock_perthread *perthread;
+
+    perthread = fat_rwlock_get_perthread__(rwlock);
+    ovs_assert(!perthread->depth);
+    perthread->depth = UINT_MAX;
+
+    fat_rwlock_lockall__(rwlock);
+}
+
+void
+fat_rwlock_unlock(const struct fat_rwlock *rwlock_)
+    OVS_RELEASES(rwlock_)
+    OVS_NO_THREAD_SAFETY_ANALYSIS
+{
+    struct fat_rwlock *rwlock = CONST_CAST(struct fat_rwlock *, rwlock_);
+    struct fat_rwlock_perthread *perthread;
+
+    perthread = fat_rwlock_get_perthread__(rwlock);
+    switch (perthread->depth) {
+    case UINT_MAX:
+        fat_rwlock_unlockall__(rwlock);
+        perthread->depth = 0;
+        break;
+
+    case 0:
+        abort();
+
+    case 1:
+        ovs_mutex_unlock(&perthread->mutex);
+    default:
+        perthread->depth--;
+        break;
+    }
+}
diff --git a/lib/fat-rwlock.h b/lib/fat-rwlock.h
new file mode 100644
index 0000000..0b751c2
--- /dev/null
+++ b/lib/fat-rwlock.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2013 Nicira, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FAT_RWLOCK_H
+#define FAT_RWLOCK_H 1
+
+#include "compiler.h"
+#include "list.h"
+
+struct OVS_LOCKABLE fat_rwlock {
+    uint32_t id;
+    struct list threads;
+};
+
+void fat_rwlock_init(struct fat_rwlock *);
+void fat_rwlock_destroy(struct fat_rwlock *);
+
+void fat_rwlock_rdlock(const struct fat_rwlock *rwlock) OVS_ACQ_RDLOCK(rwlock);
+void fat_rwlock_wrlock(const struct fat_rwlock *rwlock) OVS_ACQ_WRLOCK(rwlock);
+void fat_rwlock_unlock(const struct fat_rwlock *rwlock) OVS_RELEASES(rwlock);
+
+#endif /* fat-rwlock.h */
diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c
index 91ffe23..fa69446 100644
--- a/ofproto/ofproto-dpif.c
+++ b/ofproto/ofproto-dpif.c
@@ -1130,9 +1130,9 @@ destruct(struct ofproto *ofproto_)
     OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) {
         struct cls_cursor cursor;
 
-        ovs_rwlock_rdlock(&table->cls.rwlock);
+        fat_rwlock_rdlock(&table->cls.rwlock);
         cls_cursor_init(&cursor, &table->cls, NULL);
-        ovs_rwlock_unlock(&table->cls.rwlock);
+        fat_rwlock_unlock(&table->cls.rwlock);
         CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) {
             ofproto_rule_delete(&ofproto->up, &rule->up);
         }
@@ -3013,7 +3013,7 @@ rule_dpif_lookup_in_table(struct ofproto_dpif *ofproto,
     }
 
     cls = &ofproto->up.tables[table_id].cls;
-    ovs_rwlock_rdlock(&cls->rwlock);
+    fat_rwlock_rdlock(&cls->rwlock);
     frag = (flow->nw_frag & FLOW_NW_FRAG_ANY) != 0;
     if (frag && ofproto->up.frag_handling == OFPC_FRAG_NORMAL) {
         /* We must pretend that transport ports are unavailable. */
@@ -3030,7 +3030,7 @@ rule_dpif_lookup_in_table(struct ofproto_dpif *ofproto,
 
     *rule = rule_dpif_cast(rule_from_cls_rule(cls_rule));
     rule_dpif_ref(*rule);
-    ovs_rwlock_unlock(&cls->rwlock);
+    fat_rwlock_unlock(&cls->rwlock);
 
     return *rule != NULL;
 }
diff --git a/ofproto/ofproto.c b/ofproto/ofproto.c
index 676a6cb..5045209 100644
--- a/ofproto/ofproto.c
+++ b/ofproto/ofproto.c
@@ -1168,7 +1168,7 @@ ofproto_configure_table(struct ofproto *ofproto, int 
table_id,
     }
 
     table->max_flows = s->max_flows;
-    ovs_rwlock_wrlock(&table->cls.rwlock);
+    fat_rwlock_wrlock(&table->cls.rwlock);
     if (classifier_count(&table->cls) > table->max_flows
         && table->eviction_fields) {
         /* 'table' contains more flows than allowed.  We might not be able to
@@ -1188,7 +1188,7 @@ ofproto_configure_table(struct ofproto *ofproto, int 
table_id,
     classifier_set_prefix_fields(&table->cls,
                                  s->prefix_fields, s->n_prefix_fields);
 
-    ovs_rwlock_unlock(&table->cls.rwlock);
+    fat_rwlock_unlock(&table->cls.rwlock);
 }
 
 bool
@@ -1263,9 +1263,9 @@ ofproto_flush__(struct ofproto *ofproto)
             continue;
         }
 
-        ovs_rwlock_rdlock(&table->cls.rwlock);
+        fat_rwlock_rdlock(&table->cls.rwlock);
         cls_cursor_init(&cursor, &table->cls, NULL);
-        ovs_rwlock_unlock(&table->cls.rwlock);
+        fat_rwlock_unlock(&table->cls.rwlock);
         CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, cr, &cursor) {
             if (!rule->pending) {
                 ofproto_rule_delete__(ofproto, rule, OFPRR_DELETE);
@@ -1454,7 +1454,7 @@ ofproto_run(struct ofproto *p)
                 heap_rebuild(&evg->rules);
             }
 
-            ovs_rwlock_rdlock(&table->cls.rwlock);
+            fat_rwlock_rdlock(&table->cls.rwlock);
             cls_cursor_init(&cursor, &table->cls, NULL);
             CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
                 if (!rule->eviction_group
@@ -1462,7 +1462,7 @@ ofproto_run(struct ofproto *p)
                     eviction_group_add_rule(rule);
                 }
             }
-            ovs_rwlock_unlock(&table->cls.rwlock);
+            fat_rwlock_unlock(&table->cls.rwlock);
             ovs_mutex_unlock(&ofproto_mutex);
         }
     }
@@ -1612,9 +1612,9 @@ ofproto_get_memory_usage(const struct ofproto *ofproto, 
struct simap *usage)
 
     n_rules = 0;
     OFPROTO_FOR_EACH_TABLE (table, ofproto) {
-        ovs_rwlock_rdlock(&table->cls.rwlock);
+        fat_rwlock_rdlock(&table->cls.rwlock);
         n_rules += classifier_count(&table->cls);
-        ovs_rwlock_unlock(&table->cls.rwlock);
+        fat_rwlock_unlock(&table->cls.rwlock);
     }
     simap_increase(usage, "rules", n_rules);
 
@@ -1901,7 +1901,7 @@ ofproto_add_flow(struct ofproto *ofproto, const struct 
match *match,
 
     /* First do a cheap check whether the rule we're looking for already exists
      * with the actions that we want.  If it does, then we're done. */
-    ovs_rwlock_rdlock(&ofproto->tables[0].cls.rwlock);
+    fat_rwlock_rdlock(&ofproto->tables[0].cls.rwlock);
     rule = rule_from_cls_rule(classifier_find_match_exactly(
                                   &ofproto->tables[0].cls, match, priority));
     if (rule) {
@@ -1913,7 +1913,7 @@ ofproto_add_flow(struct ofproto *ofproto, const struct 
match *match,
     } else {
         must_add = true;
     }
-    ovs_rwlock_unlock(&ofproto->tables[0].cls.rwlock);
+    fat_rwlock_unlock(&ofproto->tables[0].cls.rwlock);
 
     /* If there's no such rule or the rule doesn't have the actions we want,
      * fall back to a executing a full flow mod.  We can't optimize this at
@@ -1952,10 +1952,10 @@ ofproto_delete_flow(struct ofproto *ofproto,
 
     /* First do a cheap check whether the rule we're looking for has already
      * been deleted.  If so, then we're done. */
-    ovs_rwlock_rdlock(&cls->rwlock);
+    fat_rwlock_rdlock(&cls->rwlock);
     rule = rule_from_cls_rule(classifier_find_match_exactly(cls, target,
                                                             priority));
-    ovs_rwlock_unlock(&cls->rwlock);
+    fat_rwlock_unlock(&cls->rwlock);
     if (!rule) {
         return true;
     }
@@ -3078,9 +3078,9 @@ handle_table_stats_request(struct ofconn *ofconn,
         ots[i].instructions = htonl(OFPIT11_ALL);
         ots[i].config = htonl(OFPTC11_TABLE_MISS_MASK);
         ots[i].max_entries = htonl(1000000); /* An arbitrary big number. */
-        ovs_rwlock_rdlock(&p->tables[i].cls.rwlock);
+        fat_rwlock_rdlock(&p->tables[i].cls.rwlock);
         ots[i].active_count = htonl(classifier_count(&p->tables[i].cls));
-        ovs_rwlock_unlock(&p->tables[i].cls.rwlock);
+        fat_rwlock_unlock(&p->tables[i].cls.rwlock);
     }
 
     p->ofproto_class->get_tables(p, ots);
@@ -3442,7 +3442,7 @@ collect_rules_loose(struct ofproto *ofproto,
             struct cls_cursor cursor;
             struct rule *rule;
 
-            ovs_rwlock_rdlock(&table->cls.rwlock);
+            fat_rwlock_rdlock(&table->cls.rwlock);
             cls_cursor_init(&cursor, &table->cls, &criteria->cr);
             CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
                 error = collect_rule(rule, criteria, rules);
@@ -3450,7 +3450,7 @@ collect_rules_loose(struct ofproto *ofproto,
                     break;
                 }
             }
-            ovs_rwlock_unlock(&table->cls.rwlock);
+            fat_rwlock_unlock(&table->cls.rwlock);
         }
     }
 
@@ -3502,10 +3502,10 @@ collect_rules_strict(struct ofproto *ofproto,
         FOR_EACH_MATCHING_TABLE (table, criteria->table_id, ofproto) {
             struct rule *rule;
 
-            ovs_rwlock_rdlock(&table->cls.rwlock);
+            fat_rwlock_rdlock(&table->cls.rwlock);
             rule = rule_from_cls_rule(classifier_find_rule_exactly(
                                           &table->cls, &criteria->cr));
-            ovs_rwlock_unlock(&table->cls.rwlock);
+            fat_rwlock_unlock(&table->cls.rwlock);
             if (rule) {
                 error = collect_rule(rule, criteria, rules);
                 if (error) {
@@ -3653,12 +3653,12 @@ ofproto_get_all_flows(struct ofproto *p, struct ds 
*results)
         struct cls_cursor cursor;
         struct rule *rule;
 
-        ovs_rwlock_rdlock(&table->cls.rwlock);
+        fat_rwlock_rdlock(&table->cls.rwlock);
         cls_cursor_init(&cursor, &table->cls, NULL);
         CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
             flow_stats_ds(rule, results);
         }
-        ovs_rwlock_unlock(&table->cls.rwlock);
+        fat_rwlock_unlock(&table->cls.rwlock);
     }
 }
 
@@ -3969,9 +3969,9 @@ add_flow(struct ofproto *ofproto, struct ofconn *ofconn,
     cls_rule_init(&cr, &fm->match, fm->priority);
 
     /* Transform "add" into "modify" if there's an existing identical flow. */
-    ovs_rwlock_rdlock(&table->cls.rwlock);
+    fat_rwlock_rdlock(&table->cls.rwlock);
     rule = rule_from_cls_rule(classifier_find_rule_exactly(&table->cls, &cr));
-    ovs_rwlock_unlock(&table->cls.rwlock);
+    fat_rwlock_unlock(&table->cls.rwlock);
     if (rule) {
         cls_rule_destroy(&cr);
         if (!rule_is_modifiable(rule)) {
@@ -4001,9 +4001,9 @@ add_flow(struct ofproto *ofproto, struct ofconn *ofconn,
     if (fm->flags & OFPUTIL_FF_CHECK_OVERLAP) {
         bool overlaps;
 
-        ovs_rwlock_rdlock(&table->cls.rwlock);
+        fat_rwlock_rdlock(&table->cls.rwlock);
         overlaps = classifier_rule_overlaps(&table->cls, &cr);
-        ovs_rwlock_unlock(&table->cls.rwlock);
+        fat_rwlock_unlock(&table->cls.rwlock);
 
         if (overlaps) {
             cls_rule_destroy(&cr);
@@ -4824,13 +4824,13 @@ ofproto_collect_ofmonitor_refresh_rules(const struct 
ofmonitor *m,
         struct cls_cursor cursor;
         struct rule *rule;
 
-        ovs_rwlock_rdlock(&table->cls.rwlock);
+        fat_rwlock_rdlock(&table->cls.rwlock);
         cls_cursor_init(&cursor, &table->cls, &target);
         CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
             ovs_assert(!rule->pending); /* XXX */
             ofproto_collect_ofmonitor_refresh_rule(m, rule, seqno, rules);
         }
-        ovs_rwlock_unlock(&table->cls.rwlock);
+        fat_rwlock_unlock(&table->cls.rwlock);
     }
 
     HMAP_FOR_EACH (op, hmap_node, &ofproto->deletions) {
@@ -6659,9 +6659,9 @@ oftable_init(struct oftable *table)
 static void
 oftable_destroy(struct oftable *table)
 {
-    ovs_rwlock_rdlock(&table->cls.rwlock);
+    fat_rwlock_rdlock(&table->cls.rwlock);
     ovs_assert(classifier_is_empty(&table->cls));
-    ovs_rwlock_unlock(&table->cls.rwlock);
+    fat_rwlock_unlock(&table->cls.rwlock);
     oftable_disable_eviction(table);
     classifier_destroy(&table->cls);
     free(table->name);
@@ -6743,12 +6743,12 @@ oftable_enable_eviction(struct oftable *table,
     hmap_init(&table->eviction_groups_by_id);
     heap_init(&table->eviction_groups_by_size);
 
-    ovs_rwlock_rdlock(&table->cls.rwlock);
+    fat_rwlock_rdlock(&table->cls.rwlock);
     cls_cursor_init(&cursor, &table->cls, NULL);
     CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
         eviction_group_add_rule(rule);
     }
-    ovs_rwlock_unlock(&table->cls.rwlock);
+    fat_rwlock_unlock(&table->cls.rwlock);
 }
 
 /* Removes 'rule' from the oftable that contains it. */
@@ -6758,9 +6758,9 @@ oftable_remove_rule__(struct ofproto *ofproto, struct 
rule *rule)
 {
     struct classifier *cls = &ofproto->tables[rule->table_id].cls;
 
-    ovs_rwlock_wrlock(&cls->rwlock);
+    fat_rwlock_wrlock(&cls->rwlock);
     classifier_remove(cls, CONST_CAST(struct cls_rule *, &rule->cr));
-    ovs_rwlock_unlock(&cls->rwlock);
+    fat_rwlock_unlock(&cls->rwlock);
 
     cookies_remove(ofproto, rule);
 
@@ -6807,9 +6807,9 @@ oftable_insert_rule(struct rule *rule)
         struct meter *meter = ofproto->meters[meter_id];
         list_insert(&meter->rules, &rule->meter_list_node);
     }
-    ovs_rwlock_wrlock(&table->cls.rwlock);
+    fat_rwlock_wrlock(&table->cls.rwlock);
     classifier_insert(&table->cls, CONST_CAST(struct cls_rule *, &rule->cr));
-    ovs_rwlock_unlock(&table->cls.rwlock);
+    fat_rwlock_unlock(&table->cls.rwlock);
     eviction_group_add_rule(rule);
 }
 
@@ -6878,7 +6878,7 @@ ofproto_get_vlan_usage(struct ofproto *ofproto, unsigned 
long int *vlan_bitmap)
     OFPROTO_FOR_EACH_TABLE (oftable, ofproto) {
         const struct cls_subtable *table;
 
-        ovs_rwlock_rdlock(&oftable->cls.rwlock);
+        fat_rwlock_rdlock(&oftable->cls.rwlock);
         HMAP_FOR_EACH (table, hmap_node, &oftable->cls.subtables) {
             if (minimask_get_vid_mask(&table->mask) == VLAN_VID_MASK) {
                 const struct cls_rule *rule;
@@ -6890,7 +6890,7 @@ ofproto_get_vlan_usage(struct ofproto *ofproto, unsigned 
long int *vlan_bitmap)
                 }
             }
         }
-        ovs_rwlock_unlock(&oftable->cls.rwlock);
+        fat_rwlock_unlock(&oftable->cls.rwlock);
     }
 }
 
diff --git a/tests/test-classifier.c b/tests/test-classifier.c
index 93a2dc1..4282fd4 100644
--- a/tests/test-classifier.c
+++ b/tests/test-classifier.c
@@ -449,13 +449,13 @@ destroy_classifier(struct classifier *cls)
     struct test_rule *rule, *next_rule;
     struct cls_cursor cursor;
 
-    ovs_rwlock_wrlock(&cls->rwlock);
+    fat_rwlock_wrlock(&cls->rwlock);
     cls_cursor_init(&cursor, cls, NULL);
     CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, cls_rule, &cursor) {
         classifier_remove(cls, &rule->cls_rule);
         free_rule(rule);
     }
-    ovs_rwlock_unlock(&cls->rwlock);
+    fat_rwlock_unlock(&cls->rwlock);
     classifier_destroy(cls);
 }
 
@@ -621,13 +621,13 @@ test_empty(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
     struct tcls tcls;
 
     classifier_init(&cls, flow_segment_u32s);
-    ovs_rwlock_wrlock(&cls.rwlock);
+    fat_rwlock_wrlock(&cls.rwlock);
     classifier_set_prefix_fields(&cls, trie_fields, ARRAY_SIZE(trie_fields));
     tcls_init(&tcls);
     assert(classifier_is_empty(&cls));
     assert(tcls_is_empty(&tcls));
     compare_classifiers(&cls, &tcls);
-    ovs_rwlock_unlock(&cls.rwlock);
+    fat_rwlock_unlock(&cls.rwlock);
     classifier_destroy(&cls);
     tcls_destroy(&tcls);
 }
@@ -654,7 +654,7 @@ test_single_rule(int argc OVS_UNUSED, char *argv[] 
OVS_UNUSED)
                          hash_bytes(&wc_fields, sizeof wc_fields, 0), 0);
 
         classifier_init(&cls, flow_segment_u32s);
-        ovs_rwlock_wrlock(&cls.rwlock);
+        fat_rwlock_wrlock(&cls.rwlock);
         classifier_set_prefix_fields(&cls, trie_fields,
                                      ARRAY_SIZE(trie_fields));
         tcls_init(&tcls);
@@ -671,7 +671,7 @@ test_single_rule(int argc OVS_UNUSED, char *argv[] 
OVS_UNUSED)
         compare_classifiers(&cls, &tcls);
 
         free_rule(rule);
-        ovs_rwlock_unlock(&cls.rwlock);
+        fat_rwlock_unlock(&cls.rwlock);
         classifier_destroy(&cls);
         tcls_destroy(&tcls);
     }
@@ -695,7 +695,7 @@ test_rule_replacement(int argc OVS_UNUSED, char *argv[] 
OVS_UNUSED)
         rule2->aux += 5;
 
         classifier_init(&cls, flow_segment_u32s);
-        ovs_rwlock_wrlock(&cls.rwlock);
+        fat_rwlock_wrlock(&cls.rwlock);
         classifier_set_prefix_fields(&cls, trie_fields,
                                      ARRAY_SIZE(trie_fields));
         tcls_init(&tcls);
@@ -713,7 +713,7 @@ test_rule_replacement(int argc OVS_UNUSED, char *argv[] 
OVS_UNUSED)
         check_tables(&cls, 1, 1, 0);
         compare_classifiers(&cls, &tcls);
         tcls_destroy(&tcls);
-        ovs_rwlock_unlock(&cls.rwlock);
+        fat_rwlock_unlock(&cls.rwlock);
         destroy_classifier(&cls);
     }
 }
@@ -809,7 +809,7 @@ test_many_rules_in_one_list (int argc OVS_UNUSED, char 
*argv[] OVS_UNUSED)
             }
 
             classifier_init(&cls, flow_segment_u32s);
-            ovs_rwlock_wrlock(&cls.rwlock);
+            fat_rwlock_wrlock(&cls.rwlock);
             classifier_set_prefix_fields(&cls, trie_fields,
                                          ARRAY_SIZE(trie_fields));
             tcls_init(&tcls);
@@ -850,7 +850,7 @@ test_many_rules_in_one_list (int argc OVS_UNUSED, char 
*argv[] OVS_UNUSED)
                 compare_classifiers(&cls, &tcls);
             }
 
-            ovs_rwlock_unlock(&cls.rwlock);
+            fat_rwlock_unlock(&cls.rwlock);
             classifier_destroy(&cls);
             tcls_destroy(&tcls);
 
@@ -913,7 +913,7 @@ test_many_rules_in_one_table(int argc OVS_UNUSED, char 
*argv[] OVS_UNUSED)
         } while ((1 << count_ones(value_mask)) < N_RULES);
 
         classifier_init(&cls, flow_segment_u32s);
-        ovs_rwlock_wrlock(&cls.rwlock);
+        fat_rwlock_wrlock(&cls.rwlock);
         classifier_set_prefix_fields(&cls, trie_fields,
                                      ARRAY_SIZE(trie_fields));
         tcls_init(&tcls);
@@ -942,7 +942,7 @@ test_many_rules_in_one_table(int argc OVS_UNUSED, char 
*argv[] OVS_UNUSED)
             compare_classifiers(&cls, &tcls);
         }
 
-        ovs_rwlock_unlock(&cls.rwlock);
+        fat_rwlock_unlock(&cls.rwlock);
         classifier_destroy(&cls);
         tcls_destroy(&tcls);
     }
@@ -977,7 +977,7 @@ test_many_rules_in_n_tables(int n_tables)
         shuffle(priorities, ARRAY_SIZE(priorities));
 
         classifier_init(&cls, flow_segment_u32s);
-        ovs_rwlock_wrlock(&cls.rwlock);
+        fat_rwlock_wrlock(&cls.rwlock);
         classifier_set_prefix_fields(&cls, trie_fields,
                                      ARRAY_SIZE(trie_fields));
         tcls_init(&tcls);
@@ -1012,7 +1012,7 @@ test_many_rules_in_n_tables(int n_tables)
             free_rule(target);
         }
 
-        ovs_rwlock_unlock(&cls.rwlock);
+        fat_rwlock_unlock(&cls.rwlock);
         destroy_classifier(&cls);
         tcls_destroy(&tcls);
     }
diff --git a/utilities/ovs-ofctl.c b/utilities/ovs-ofctl.c
index 9b02b25..e8453f3 100644
--- a/utilities/ovs-ofctl.c
+++ b/utilities/ovs-ofctl.c
@@ -2252,13 +2252,13 @@ fte_free_all(struct classifier *cls)
     struct cls_cursor cursor;
     struct fte *fte, *next;
 
-    ovs_rwlock_wrlock(&cls->rwlock);
+    fat_rwlock_wrlock(&cls->rwlock);
     cls_cursor_init(&cursor, cls, NULL);
     CLS_CURSOR_FOR_EACH_SAFE (fte, next, rule, &cursor) {
         classifier_remove(cls, &fte->rule);
         fte_free(fte);
     }
-    ovs_rwlock_unlock(&cls->rwlock);
+    fat_rwlock_unlock(&cls->rwlock);
     classifier_destroy(cls);
 }
 
@@ -2277,9 +2277,9 @@ fte_insert(struct classifier *cls, const struct match 
*match,
     cls_rule_init(&fte->rule, match, priority);
     fte->versions[index] = version;
 
-    ovs_rwlock_wrlock(&cls->rwlock);
+    fat_rwlock_wrlock(&cls->rwlock);
     old = fte_from_cls_rule(classifier_replace(cls, &fte->rule));
-    ovs_rwlock_unlock(&cls->rwlock);
+    fat_rwlock_unlock(&cls->rwlock);
     if (old) {
         fte_version_free(old->versions[index]);
         fte->versions[!index] = old->versions[!index];
@@ -2490,7 +2490,7 @@ ofctl_replace_flows(int argc OVS_UNUSED, char *argv[])
     list_init(&requests);
 
     /* Delete flows that exist on the switch but not in the file. */
-    ovs_rwlock_rdlock(&cls.rwlock);
+    fat_rwlock_rdlock(&cls.rwlock);
     cls_cursor_init(&cursor, &cls, NULL);
     CLS_CURSOR_FOR_EACH (fte, rule, &cursor) {
         struct fte_version *file_ver = fte->versions[FILE_IDX];
@@ -2514,7 +2514,7 @@ ofctl_replace_flows(int argc OVS_UNUSED, char *argv[])
             fte_make_flow_mod(fte, FILE_IDX, OFPFC_ADD, protocol, &requests);
         }
     }
-    ovs_rwlock_unlock(&cls.rwlock);
+    fat_rwlock_unlock(&cls.rwlock);
     transact_multiple_noreply(vconn, &requests);
     vconn_close(vconn);
 
@@ -2556,7 +2556,7 @@ ofctl_diff_flows(int argc OVS_UNUSED, char *argv[])
     ds_init(&a_s);
     ds_init(&b_s);
 
-    ovs_rwlock_rdlock(&cls.rwlock);
+    fat_rwlock_rdlock(&cls.rwlock);
     cls_cursor_init(&cursor, &cls, NULL);
     CLS_CURSOR_FOR_EACH (fte, rule, &cursor) {
         struct fte_version *a = fte->versions[0];
@@ -2576,7 +2576,7 @@ ofctl_diff_flows(int argc OVS_UNUSED, char *argv[])
             }
         }
     }
-    ovs_rwlock_unlock(&cls.rwlock);
+    fat_rwlock_unlock(&cls.rwlock);
 
     ds_destroy(&a_s);
     ds_destroy(&b_s);
-- 
1.7.10.4

_______________________________________________
dev mailing list
[email protected]
http://openvswitch.org/mailman/listinfo/dev

Reply via email to