Signed-off-by: Daniele Di Proietto <[email protected]>
---
 lib/automake.mk        |   2 +
 lib/ovs-thread-stats.c | 393 +++++++++++++++++++++++++++++++++++++++++++++++++
 lib/ovs-thread-stats.h | 236 +++++++++++++++++++++++++++++
 lib/ovs-thread.c       |   8 +-
 4 files changed, 638 insertions(+), 1 deletion(-)
 create mode 100644 lib/ovs-thread-stats.c
 create mode 100644 lib/ovs-thread-stats.h

diff --git a/lib/automake.mk b/lib/automake.mk
index 19fdc2b..8c99d38 100644
--- a/lib/automake.mk
+++ b/lib/automake.mk
@@ -157,6 +157,8 @@ lib_libopenvswitch_la_SOURCES = \
        lib/ovs-atomic.h \
        lib/ovs-rcu.c \
        lib/ovs-rcu.h \
+       lib/ovs-thread-stats.c \
+       lib/ovs-thread-stats.h \
        lib/ovs-thread.c \
        lib/ovs-thread.h \
        lib/ovsdb-data.c \
diff --git a/lib/ovs-thread-stats.c b/lib/ovs-thread-stats.c
new file mode 100644
index 0000000..fcf3279
--- /dev/null
+++ b/lib/ovs-thread-stats.c
@@ -0,0 +1,393 @@
+/*
+ * Copyright (c) 2014 Nicira, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+#include "ovs-thread-stats.h"
+
+#include "ovs-atomic.h"
+#include "ovs-rcu.h"
+#include "rcu-slist.h"
+
+/* Thread local statistics counters.
+ * =================================
+ *
+ * Thread local storage has two important limitations that prevent from using
+ * it directly to implement thread local statistics:
+ *
+ * - Memory cannot be dinamically allocated
+ * - Cross thread access might be restricted
+ *
+ * This module implements data structures to store dinamically-allocated thread
+ * local memory for statistics and to preserve cross thread access.
+ *
+ *
+ * Implementation details
+ * ----------------------
+ *
+ * ovsthread_stats_ptr (thread-local)
+ *          |
+ *          |
+ *          V                                  (next_thread)
+ *  struct thread_stats --(RCU-protected)--> struct thread_stats --> ...
+ *          |
+ *          |
+ *   (RCU-protected)
+ *          |
+ *          |
+ *          V
+ * struct ovsthread_stats_table { n_rows, row[0], row[1],...row[n_rows - 1] }
+ *                                          |
+ *                 -------------------------
+ *                |
+ *                V
+ * struct ovsthread_stats_row{ buckets[0], buckets[1],... }
+ *
+ * Each thread has its own 'struct thread_stats'. These structures are part of
+ * an RCU-protected list, to enable cross-thread access.
+ *
+ * Each 'struct thread_stats' has an RCU protected pointer to a 'struct
+ * ovsthread_stats_table', which is a variable-length array of pointers to the
+ * rows.
+ *
+ * Each row(struct ovsthread_stats_row) contains
+ * OVSTHREAD_STATS_BLOCK_N_BUCKETS buckets.
+ *
+ * Each bucket contains some space for the stats data (which must be written
+ * only by one thread), a counter (which is used by readers to ensure that they
+ * are reading consistent data) and a 'clear' flags, which can be set by any
+ * thread.
+ *
+ * Expansion
+ * ---------
+ *
+ * This module must adapt (at runtime) to two kinds of changes:
+ *
+ * - thread creation/destruction
+ * - bucket creation/destruction
+ *
+ * Thread creation/destruction is detected by two hooks in ovs-thread.c. For
+ * each new thread, we allocate a new 'struct thread_stats', put a pointer in
+ * 'ovsthread_stats_ptr' and add it to 'thread_stats_list'.
+ * When the thread is destroyed, it probably contains useful statistics data.
+ * Therefore we never remove its 'struct thread_stats' from
+ * 'thread_stats_list', but we put it also on 'unused_thread_stats_list', ready
+ * to be recycled for the next thread that will be created.
+ *
+ * When OVSTHREAD_STATS_BLOCK_N_BUCKETS buckets have been created, we need to
+ * allocate a new row in each thread table. This is done by atomically changing
+ * the pointer to 'struct ovsthread_stats_table' (for each thread); the
+ * existing buckets memory remains the same.
+ *
+ * Since the creation of threads/buckets can happen concurrently, this race
+ * condition can happen:
+ * - A new thread is created. We allocate a new 'struct thread_stats' with
+ *   enough memory for N buckets.
+ * - A new bucket is requested. We expand each thread 'struct thread_stats'
+ *   (iterating through 'thread_stats_list') to contain N+1 buckets.
+ * - The new 'struct thread_stats' is inserted into 'thread_stats_list'.
+ *
+ * The new thread will have space for N buckets, while there are N+1 buckets in
+ * use. We solve this race condition by eventually expanding each
+ * 'struct thread_stats' on demand (while this is quite expensive, it should
+ * not happen often).
+ */
+
+
+/* Id generation
+ * =============
+ *
+ * This implements a mechanism useful for generating small identifiers that can
+ * be useful as array indexes. It consists of three functions:
+ *
+ * - newid() to generate a new identifier
+ * - delid() to delete a previously generated identifier
+ * - maxid() to get the current biggest id generated so far
+ *
+ * The generated identifiers are as small as possible: a list is used to
+ * recycle unused identifiers.
+ *
+ * Thread safety
+ * -------------
+ *
+ * Atomic operations (and RCU) are used. newid() and delid() can be called
+ * concurrently by multiple threads
+ */
+
+struct free_id_list {
+    int id;
+    struct rcu_slist list_node;
+};
+
+struct id_generation {
+    struct rcu_slist free_id_list;
+    ATOMIC(int) min_free_id;
+};
+
+#define ID_GENERATION_INITIALIZER \
+    { RCU_SLIST_INIT_NULL, ATOMIC_VAR_INIT(0) }
+
+static bool
+newid(struct id_generation *idgen, int *id)
+{
+    struct rcu_slist *node;
+    int ret;
+
+    node = rcu_slist_pop(&idgen->free_id_list);
+
+    if (node) {
+        struct free_id_list *elem;
+
+        elem = CONTAINER_OF(node, struct free_id_list, list_node);
+        ovsrcu_postpone(free, elem);
+        *id = elem->id;
+
+        return false;
+    }
+
+    atomic_add(&idgen->min_free_id, 1, &ret);
+
+    *id = ret;
+    return true;
+}
+
+static void
+delid(struct id_generation *idgen, int id)
+{
+    struct free_id_list *new_elem = xmalloc(sizeof *new_elem);
+
+    new_elem->id = id;
+
+    rcu_slist_push(&idgen->free_id_list, &new_elem->list_node);
+}
+
+static int
+maxid(struct id_generation *idgen)
+{
+    int ret;
+
+    atomic_read_explicit(&idgen->min_free_id, &ret, memory_order_relaxed);
+
+    return ret - 1;
+}
+
+static struct id_generation bucket_id_generation = ID_GENERATION_INITIALIZER;
+
+static struct ovsthread_stats_row main_thread_first_row;
+
+static struct ovsthread_stats_table main_thread_stats_table = {
+    1,
+    {&main_thread_first_row}
+};
+
+static struct thread_stats main_thread_stats = {
+    RCU_SLIST_INIT_NULL,
+    RCU_SLIST_INIT_NULL,
+    OVSRCU_TYPE_INITIALIZER(&main_thread_stats_table)
+};
+
+static struct rcu_slist thread_stats_list
+    = RCU_SLIST_INITIALIZER(&main_thread_stats.list_node);
+static struct rcu_slist unused_thread_stats_list;
+
+DEFINE_EXTERN_PER_THREAD_DATA(ovs_stats_thread_ptr, &main_thread_stats);
+
+static struct thread_stats *
+alloc_ts(int maxid)
+{
+    struct thread_stats *new_ts;
+    struct ovsthread_stats_table *new_tbl;
+    size_t i, n_rows = OVSTHREAD_STATS_ID_TO_ROW(maxid) + 1;
+
+    new_tbl = xmalloc(OVSTHREAD_STATS_TABLE_SIZE(n_rows));
+
+    for (i = 0; i < n_rows; i++) {
+        new_tbl->rows[i] = xzalloc_cacheline(sizeof *new_tbl->rows[i]);
+    }
+    new_tbl->n_rows = n_rows;
+
+    new_ts = xzalloc(sizeof *new_ts);
+    ovsrcu_set_hidden(&new_ts->tbl, new_tbl);
+
+    return new_ts;
+}
+
+void
+ovsthread_stats_create_thread(void)
+{
+    struct thread_stats *new_ts;
+    struct rcu_slist *unused_ts_node;
+
+    unused_ts_node = rcu_slist_pop(&unused_thread_stats_list);
+    if (unused_ts_node) {
+        struct thread_stats *unused_ts
+        = CONTAINER_OF(unused_ts_node, struct thread_stats, unused_list_node);
+
+        new_ts = unused_ts;
+    } else {
+        new_ts = alloc_ts(maxid(&bucket_id_generation));
+        /* Race condition. If a new bucket is created here, I haven't seen
+         * it, and the other thread has not seen me creating my bucket. We work
+         * around that
+         *
+         * - during aggregation: we skip threads that have not that bucket
+         * - during stats updates: if buckets are not there we create them.
+         */
+        rcu_slist_push(&thread_stats_list, &new_ts->list_node);
+    }
+
+    *ovs_stats_thread_ptr_get() = new_ts;
+}
+
+void
+ovsthread_stats_destroy_thread(void)
+{
+    struct thread_stats *ts = *ovs_stats_thread_ptr_get();
+
+    rcu_slist_push(&unused_thread_stats_list, &ts->unused_list_node);
+}
+
+void ovsthread_stats_ts_expand(struct thread_stats *ts, size_t row)
+{
+    struct ovsthread_stats_table *tbl, *new_tbl = NULL;
+    struct ovsthread_stats_row *new_row = NULL;
+    size_t n_rows = row + 1;
+    size_t newsize;
+
+    tbl = ovsrcu_get(struct ovsthread_stats_table *, &ts->tbl);
+
+    do {
+        free(new_row);
+        free_cacheline(new_tbl);
+
+        if (row < tbl->n_rows && tbl->rows[row]) {
+            break;
+        }
+
+        newsize = MAX(n_rows, tbl->n_rows);
+        new_tbl = xzalloc(OVSTHREAD_STATS_TABLE_SIZE(newsize));
+        memcpy(new_tbl, tbl, OVSTHREAD_STATS_TABLE_SIZE(tbl->n_rows));
+        new_tbl->n_rows = n_rows;
+
+        new_row = xzalloc_cacheline(sizeof(*new_row));
+        new_tbl->rows[row] = new_row;
+    } while (!ovsrcu_compare_exchange_strong(&ts->tbl, &tbl, new_tbl));
+}
+
+int
+ovsthread_stats_create_bucket(void)
+{
+    int ret;
+
+    if (newid(&bucket_id_generation, &ret)) {
+        /* Probably need to allocate new buckets */
+        struct thread_stats *ts;
+        size_t row = OVSTHREAD_STATS_ID_TO_ROW(ret);
+
+        RCU_SLIST_FOR_EACH(ts, list_node, &thread_stats_list) {
+            ovsthread_stats_ts_expand(ts, row);
+        }
+    }
+
+    return ret;
+}
+
+void
+ovsthread_stats_destroy_bucket(int id)
+{
+    ovsthread_stats_clear(id);
+    delid(&bucket_id_generation, id);
+}
+
+static uint32_t
+read_counter(struct ovsthread_stats_bucket *b)
+{
+    uint32_t counter;
+
+    atomic_read_explicit(&b->counter, &counter, memory_order_acquire);
+    return counter;
+}
+
+static uint32_t
+read_even_counter(struct ovsthread_stats_bucket *b)
+{
+    uint32_t counter;
+
+    do {
+        counter = read_counter(b);
+    } while (OVS_UNLIKELY(counter & 1));
+
+    return counter;
+}
+
+static bool
+counter_changed(struct ovsthread_stats_bucket *b, uint32_t c)
+{
+    return OVS_UNLIKELY(read_counter(b) != c);
+}
+
+void
+ovsthread_stats_aggregate(int id, void *aux, aggregate_callback aggr_cb)
+{
+    struct thread_stats *ts;
+    size_t row = OVSTHREAD_STATS_ID_TO_ROW(id);
+    size_t col = OVSTHREAD_STATS_ID_TO_COL(id);
+
+    RCU_SLIST_FOR_EACH(ts, list_node, &thread_stats_list) {
+        struct ovsthread_stats_bucket copy;
+        struct ovsthread_stats_table *tbl;
+        struct ovsthread_stats_bucket *b;
+        uint32_t seq;
+
+        tbl = ovsrcu_get(struct ovsthread_stats_table *, &ts->tbl);
+
+        if (row >= tbl->n_rows) {
+            continue;
+        }
+
+        b = &tbl->rows[row]->buckets[col];
+        do {
+            seq = read_even_counter(b);
+            copy = *b;
+        } while (counter_changed(b, seq));
+
+        if (!copy.clear) {
+            aggr_cb(aux, &copy);
+        }
+    }
+}
+
+void
+ovsthread_stats_clear(int id)
+{
+    struct thread_stats *ts;
+    size_t row = OVSTHREAD_STATS_ID_TO_ROW(id);
+    size_t col = OVSTHREAD_STATS_ID_TO_COL(id);
+
+    RCU_SLIST_FOR_EACH(ts, list_node, &thread_stats_list) {
+        struct ovsthread_stats_table *tbl;
+        struct ovsthread_stats_bucket *b;
+
+        tbl = ovsrcu_get(struct ovsthread_stats_table *, &ts->tbl);
+
+        if (row >= tbl->n_rows) {
+            continue;
+        }
+
+        b = &tbl->rows[row]->buckets[col];
+
+        b->clear = true;
+    }
+}
diff --git a/lib/ovs-thread-stats.h b/lib/ovs-thread-stats.h
new file mode 100644
index 0000000..16d0a55
--- /dev/null
+++ b/lib/ovs-thread-stats.h
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2014 Nicira, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OVS_THREAD_STATS_H
+#define OVS_THREAD_STATS_H 1
+
+#include "ovs-thread.h"
+#include "ovs-rcu.h"
+#include "rcu-slist.h"
+
+/* Thread local lockless statistics counters
+ * =========================================
+ *
+ * Problem: taking a mutex for updating statistics counters is expensive and
+ * reduces scalability.
+ *
+ * Solution: use thread local statistics counter.
+ *
+ * This module allocate buckets and provides facilities for updating (by
+ * writing only to thread local memory) and reading (by iterating through all
+ * the thread local structures which belong to the same bucket) statistics.
+ * ovsthread_stats_create_bucket() returns an integer, which should be used to
+ * update/get/destroy the bucket.
+ *
+ * Usage
+ * -----
+ *
+ * struct flow
+ * {
+ *     ...
+ *     int statsid;
+ *     ...
+ * }
+ *
+ * struct flow_stats
+ * {
+ *     uint64_t bytes;
+ *     uint64_t packets;
+ *     uint64_t timestamp;
+ *     uint16_t flags;
+ * }
+ *
+ *Initialization*
+ *    flow->statsid = ovsthread_stats_create_bucket();
+ *
+ *Deinitialization*
+ *    ovsthread_stats_destroy_bucket(flow->statsid);
+ *
+ *Writing*
+ *     struct ovsthread_stats_bucket *bucket;
+ *     struct flow_stats *stats;
+ *     uint32_t seq;
+ *
+ *     bucket = ovsthread_stats_get_bucket(dp->stats, &seq);
+ *     stats = OVSTHREAD_STATS_BUCKET_CAST(struct flow_stats *, bucket);
+ *
+ *     stats->bytes += new_bytes;
+ *     stats->packets += new_packets;
+ *     stats->timestamp = MAX(stats->timestamp, now());
+ *     stats->flags |= new_flags;
+ *
+ *     ovsthread_stats_bucket_done(bucket, seq);
+ *
+ *Reading*
+ * static void
+ * flow_stats_aggr_cb(void *aux, struct ovsthread_stats_bucket *b)
+ * {
+ *     struct flow_stats *dst = (struct flow_stats *) aux;
+ *     struct flow_stats *src
+ *         = OVSTHREAD_STATS_BUCKET_CAST(struct flow_stats *, b);
+ *     dst->bytes += src->bytes;
+ *     dst->packets += src->packets;
+ *     dst->timestamp = MAX(dst->timestamp, src->timestamp);
+ *     dst->flags += src->flags;
+ * }
+ *
+ *     struct flow_stats dst;
+ *
+ *     memset(&dst, 0, sizeof(dst));
+ *     ovsthread_stats_aggregate(flow->statsid, &dst, flow_stats_aggr_cb);
+ *
+ *     VLOG_INFO("bytes:%"PRIu64", packets:%PRIu64", dst->bytes, dst->packets);
+ *
+ *Resetting*
+ *
+ *     ovsthread_stats_clear(flow->statsid);
+ *
+ * Thread safety
+ * -------------
+ *
+ * Atomic operations (and RCU) are used for creating and destroying buckets.
+ * Each thread writes only to its own copy of a bucket (it is read by
+ * multiple threads during aggregation). An atomic counter is used by readers
+ * to be sure that they've read consistent data.
+ *
+ * Restrictions
+ * ------------
+ * - ovsthread_stats_clear() clears thread-local buckets one at a time. A
+ *   concurrent reader might report an inconsistent view.
+ * - ovsthread_stats creates fixed-size buckets with enough space to store
+ *   OVSTHREAD_STATS_BUCKET_DATA_SIZE bytes of data.
+ * - the buckets memory is recycled but never freed.
+ */
+
+
+#ifdef  __cplusplus
+extern "C" {
+#endif
+
+#define OVSTHREAD_STATS_BUCKET_DATA_SIZE_U64 \
+    ((CACHE_LINE_SIZE - 2*sizeof(uint32_t))/sizeof(uint64_t))
+#define OVSTHREAD_STATS_BUCKET_DATA_SIZE \
+    (OVSTHREAD_STATS_BUCKET_DATA_SIZE_U64 * sizeof(uint64_t))
+struct ovsthread_stats_bucket
+{
+    uint64_t data[OVSTHREAD_STATS_BUCKET_DATA_SIZE_U64];
+
+    /* Allows readers to track in-progress changes.  Initially zero, each
+     * writer increments this value just before and just after each change.
+     * Thus, a reader can ensure that it gets a consistent
+     * snapshot by waiting for the counter to become even, then checking that
+     * its value does not change while examining the bucket. */
+    atomic_uint32_t counter;
+    /* Are these stats cleared? */
+    bool clear;
+};
+BUILD_ASSERT_DECL(sizeof(struct ovsthread_stats_bucket) == CACHE_LINE_SIZE);
+
+#define OVSTHREAD_STATS_BLOCK_N_BUCKETS 65536
+
+struct ovsthread_stats_row
+{
+    struct ovsthread_stats_bucket buckets[OVSTHREAD_STATS_BLOCK_N_BUCKETS];
+};
+
+#define OVSTHREAD_STATS_ID_TO_ROW(ID) \
+    ((ID)/OVSTHREAD_STATS_BLOCK_N_BUCKETS)
+
+#define OVSTHREAD_STATS_ID_TO_COL(ID) \
+    ((ID)%OVSTHREAD_STATS_BLOCK_N_BUCKETS)
+
+/* 'rows' is a variable length array and 'n_rows' contain its size. */
+struct ovsthread_stats_table
+{
+    size_t n_rows;
+    struct ovsthread_stats_row *rows[1];
+};
+
+#define OVSTHREAD_STATS_TABLE_SIZE(N_ROWS) \
+(sizeof(struct ovsthread_stats_table) - sizeof(struct ovsthread_stats_row *) \
+    + N_ROWS * sizeof(struct ovsthread_stats_row *))
+
+struct thread_stats
+{
+    struct rcu_slist list_node;
+    struct rcu_slist unused_list_node;
+    OVSRCU_TYPE(struct ovsthread_stats_table *) tbl;
+};
+
+DECLARE_EXTERN_PER_THREAD_DATA(struct thread_stats *, ovs_stats_thread_ptr);
+
+void ovsthread_stats_create_thread(void);
+void ovsthread_stats_destroy_thread(void);
+
+int ovsthread_stats_create_bucket(void);
+void ovsthread_stats_destroy_bucket(int id);
+
+void ovsthread_stats_ts_expand(struct thread_stats *, size_t row);
+
+static inline struct ovsthread_stats_bucket *
+ovsthread_stats_get_bucket(int id, uint32_t *c)
+{
+    struct thread_stats *ts = *ovs_stats_thread_ptr_get();
+    struct ovsthread_stats_table *tbl;
+    struct ovsthread_stats_bucket *ret;
+
+    size_t row = OVSTHREAD_STATS_ID_TO_ROW(id);
+    size_t col = OVSTHREAD_STATS_ID_TO_COL(id);
+
+    tbl = ovsrcu_get(struct ovsthread_stats_table *, &ts->tbl);
+
+    if (OVS_UNLIKELY(row >= tbl->n_rows)) {
+        /* If this thread has been created while a new bucket (that required
+         * a new row) was being added, we might not have the right number of
+         * rows. This is a very unlikely race condition, which we solve by
+         * creating the rows now */
+        ovsthread_stats_ts_expand(ts, row);
+    }
+
+    ret = &(tbl->rows[row]->buckets[col]);
+
+    atomic_read_explicit(&ret->counter, c, memory_order_acquire);
+    atomic_store_explicit(&ret->counter, *c + 1, memory_order_release);
+
+    if (ret->clear) {
+        memset(&ret->data, 0, sizeof ret->data);
+        ret->clear = false;
+    }
+
+    return ret;
+}
+
+static inline void
+ovsthread_stats_bucket_done(struct ovsthread_stats_bucket *b, uint32_t c)
+{
+    atomic_store_explicit(&b->counter, c + 2, memory_order_release);
+}
+
+typedef void(*aggregate_callback)(void *aux, struct ovsthread_stats_bucket *);
+
+void ovsthread_stats_aggregate(int id, void *aux, aggregate_callback aggr_cb);
+
+void ovsthread_stats_clear(int id);
+
+#define OVSTHREAD_STATS_BUCKET_CAST(TYPE, BUCKET) \
+    (BUILD_ASSERT(sizeof (*((TYPE)NULL)) <= OVSTHREAD_STATS_BUCKET_DATA_SIZE),\
+     (TYPE) CONST_CAST(struct ovsthread_stats_bucket *, BUCKET))
+
+#ifdef  __cplusplus
+}
+#endif
+
+#endif /* u64-stats-lock.h */
diff --git a/lib/ovs-thread.c b/lib/ovs-thread.c
index e2c3971..ac014f5 100644
--- a/lib/ovs-thread.c
+++ b/lib/ovs-thread.c
@@ -27,6 +27,7 @@
 #include "hash.h"
 #include "netdev-dpdk.h"
 #include "ovs-rcu.h"
+#include "ovs-thread-stats.h"
 #include "poll-loop.h"
 #include "seq.h"
 #include "socket-util.h"
@@ -321,6 +322,7 @@ ovsthread_wrapper(void *aux_)
     struct ovsthread_aux *auxp = aux_;
     struct ovsthread_aux aux;
     unsigned int id;
+    void *ret;
 
     id = atomic_count_inc(&next_id);
     *ovsthread_id_get() = id;
@@ -334,8 +336,12 @@ ovsthread_wrapper(void *aux_)
     ovsrcu_quiesce_end();
 
     thread_set_nonpmd();
+    ovsthread_stats_create_thread();
 
-    return aux.start(aux.arg);
+    ret = aux.start(aux.arg);
+
+    ovsthread_stats_destroy_thread();
+    return ret;
 }
 
 /* Starts a thread that calls 'start(arg)'.  Sets the thread's name to 'name'
-- 
2.1.0.rc1

_______________________________________________
dev mailing list
[email protected]
http://openvswitch.org/mailman/listinfo/dev

Reply via email to