This is an automated email from the ASF dual-hosted git repository.

amc pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/trafficserver.git


The following commit(s) were added to refs/heads/master by this push:
     new a3dc6ad  Add active_thread container support to Event Processor. For 
#1997.
a3dc6ad is described below

commit a3dc6ad881671bdca22c0116bdae5272cffbc3b9
Author: Alan M. Carroll <[email protected]>
AuthorDate: Mon Jun 5 21:08:54 2017 -0500

    Add active_thread container support to Event Processor.
    For #1997.
---
 iocore/eventsystem/I_EventProcessor.h |  45 +++++++++++--
 lib/records/RecRawStats.cc            | 123 +++++++++++++++++-----------------
 2 files changed, 102 insertions(+), 66 deletions(-)

diff --git a/iocore/eventsystem/I_EventProcessor.h 
b/iocore/eventsystem/I_EventProcessor.h
index e4554db..0825caf 100644
--- a/iocore/eventsystem/I_EventProcessor.h
+++ b/iocore/eventsystem/I_EventProcessor.h
@@ -30,15 +30,15 @@
 #include "I_Event.h"
 
 #ifdef TS_MAX_THREADS_IN_EACH_THREAD_TYPE
-const int MAX_THREADS_IN_EACH_TYPE = TS_MAX_THREADS_IN_EACH_THREAD_TYPE;
+constexpr int MAX_THREADS_IN_EACH_TYPE = TS_MAX_THREADS_IN_EACH_THREAD_TYPE;
 #else
-const int MAX_THREADS_IN_EACH_TYPE = 3072;
+constexpr int MAX_THREADS_IN_EACH_TYPE = 3072;
 #endif
 
 #ifdef TS_MAX_NUMBER_EVENT_THREADS
-const int MAX_EVENT_THREADS = TS_MAX_NUMBER_EVENT_THREADS;
+constexpr int MAX_EVENT_THREADS = TS_MAX_NUMBER_EVENT_THREADS;
 #else
-const int MAX_EVENT_THREADS = 4096;
+constexpr int MAX_EVENT_THREADS = 4096;
 #endif
 
 class EThread;
@@ -295,7 +295,6 @@ public:
   */
   int n_thread_groups = 0;
 
-public:
   /*------------------------------------------------------*\
   | Unix & non NT Interface                                |
   \*------------------------------------------------------*/
@@ -307,6 +306,42 @@ public:
   volatile int n_dthreads       = 0; // No. of dedicated threads
   volatile int thread_data_used = 0;
   ink_mutex dedicated_spawn_thread_mutex;
+
+  /// Provide container style access to just the active threads, not the 
entire array.
+  class active_threads_type
+  {
+    using iterator = EThread *const *; ///< Internal iterator type, pointer to 
array element.
+  public:
+    iterator
+    begin() const
+    {
+      return _begin;
+    }
+    iterator
+    end() const
+    {
+      return _end;
+    }
+
+  private:
+    iterator _begin; ///< Start of threads.
+    iterator _end;   ///< End of threads.
+    /// Construct from base of the array (@a start) and the current valid 
count (@a n).
+    active_threads_type(iterator start, int n) : _begin(start), _end(start + 
n) {}
+    friend class EventProcessor;
+  };
+
+  // These can be used in container for loops and other range operations.
+  active_threads_type
+  active_ethreads() const
+  {
+    return {all_ethreads, n_ethreads};
+  }
+  active_threads_type
+  active_dthreads() const
+  {
+    return {all_dthreads, n_dthreads};
+  }
 };
 
 extern inkcoreapi class EventProcessor eventProcessor;
diff --git a/lib/records/RecRawStats.cc b/lib/records/RecRawStats.cc
index 224ce56..8a57019 100644
--- a/lib/records/RecRawStats.cc
+++ b/lib/records/RecRawStats.cc
@@ -23,16 +23,25 @@
 
 #include "P_RecCore.h"
 #include "P_RecProcess.h"
+#include <ts/MemView.h>
 
 //-------------------------------------------------------------------------
 // raw_stat_get_total
 //-------------------------------------------------------------------------
+
+namespace
+{
+// Commonly used access to a raw stat, avoid typos.
+inline RecRawStat *
+thread_stat(EThread *et, RecRawStatBlock *rsb, int id)
+{
+  return (reinterpret_cast<RecRawStat *>(reinterpret_cast<char *>(et) + 
rsb->ethr_stat_offset)) + id;
+}
+}
+
 static int
 raw_stat_get_total(RecRawStatBlock *rsb, int id, RecRawStat *total)
 {
-  int i;
-  RecRawStat *tlp;
-
   total->sum   = 0;
   total->count = 0;
 
@@ -41,14 +50,14 @@ raw_stat_get_total(RecRawStatBlock *rsb, int id, RecRawStat 
*total)
   total->count = rsb->global[id]->count;
 
   // get thread local values
-  for (i = 0; i < eventProcessor.n_ethreads; i++) {
-    tlp = ((RecRawStat *)((char *)(eventProcessor.all_ethreads[i]) + 
rsb->ethr_stat_offset)) + id;
+  for (EThread *et : eventProcessor.active_ethreads()) {
+    RecRawStat *tlp = thread_stat(et, rsb, id);
     total->sum += tlp->sum;
     total->count += tlp->count;
   }
 
-  for (i = 0; i < eventProcessor.n_dthreads; i++) {
-    tlp = ((RecRawStat *)((char *)(eventProcessor.all_dthreads[i]) + 
rsb->ethr_stat_offset)) + id;
+  for (EThread *et : eventProcessor.active_dthreads()) {
+    RecRawStat *tlp = thread_stat(et, rsb, id);
     total->sum += tlp->sum;
     total->count += tlp->count;
   }
@@ -66,22 +75,20 @@ raw_stat_get_total(RecRawStatBlock *rsb, int id, RecRawStat 
*total)
 static int
 raw_stat_sync_to_global(RecRawStatBlock *rsb, int id)
 {
-  int i;
-  RecRawStat *tlp;
   RecRawStat total;
 
   total.sum   = 0;
   total.count = 0;
 
   // sum the thread local values
-  for (i = 0; i < eventProcessor.n_ethreads; i++) {
-    tlp = ((RecRawStat *)((char *)(eventProcessor.all_ethreads[i]) + 
rsb->ethr_stat_offset)) + id;
+  for (EThread *et : eventProcessor.active_ethreads()) {
+    RecRawStat *tlp = thread_stat(et, rsb, id);
     total.sum += tlp->sum;
     total.count += tlp->count;
   }
 
-  for (i = 0; i < eventProcessor.n_dthreads; i++) {
-    tlp = ((RecRawStat *)((char *)(eventProcessor.all_dthreads[i]) + 
rsb->ethr_stat_offset)) + id;
+  for (EThread *et : eventProcessor.active_dthreads()) {
+    RecRawStat *tlp = thread_stat(et, rsb, id);
     total.sum += tlp->sum;
     total.count += tlp->count;
   }
@@ -91,27 +98,22 @@ raw_stat_sync_to_global(RecRawStatBlock *rsb, int id)
   }
 
   // lock so the setting of the globals and last values are atomic
-  ink_mutex_acquire(&(rsb->mutex));
+  {
+    ink_scoped_mutex_lock lock(rsb->mutex);
 
-  // get the delta from the last sync
-  RecRawStat delta;
-  delta.sum   = total.sum - rsb->global[id]->last_sum;
-  delta.count = total.count - rsb->global[id]->last_count;
+    // get the delta from the last sync
+    RecRawStat delta;
+    delta.sum   = total.sum - rsb->global[id]->last_sum;
+    delta.count = total.count - rsb->global[id]->last_count;
 
-  // This is too verbose now, so leaving it out / leif
-  // Debug("stats", "raw_stat_sync_to_global(): rsb pointer:%p id:%d delta:%" 
PRId64 " total:%" PRId64 " last:%" PRId64 " global:%"
-  // PRId64 "\n",
-  // rsb, id, delta.sum, total.sum, rsb->global[id]->last_sum, 
rsb->global[id]->sum);
+    // increment the global values by the delta
+    ink_atomic_increment(&(rsb->global[id]->sum), delta.sum);
+    ink_atomic_increment(&(rsb->global[id]->count), delta.count);
 
-  // increment the global values by the delta
-  ink_atomic_increment(&(rsb->global[id]->sum), delta.sum);
-  ink_atomic_increment(&(rsb->global[id]->count), delta.count);
-
-  // set the new totals as the last values seen
-  ink_atomic_swap(&(rsb->global[id]->last_sum), total.sum);
-  ink_atomic_swap(&(rsb->global[id]->last_count), total.count);
-
-  ink_mutex_release(&(rsb->mutex));
+    // set the new totals as the last values seen
+    ink_atomic_swap(&(rsb->global[id]->last_sum), total.sum);
+    ink_atomic_swap(&(rsb->global[id]->last_count), total.count);
+  }
 
   return REC_ERR_OKAY;
 }
@@ -126,23 +128,22 @@ raw_stat_clear(RecRawStatBlock *rsb, int id)
 
   // the globals need to be reset too
   // lock so the setting of the globals and last values are atomic
-  ink_mutex_acquire(&(rsb->mutex));
-  ink_atomic_swap(&(rsb->global[id]->sum), (int64_t)0);
-  ink_atomic_swap(&(rsb->global[id]->last_sum), (int64_t)0);
-  ink_atomic_swap(&(rsb->global[id]->count), (int64_t)0);
-  ink_atomic_swap(&(rsb->global[id]->last_count), (int64_t)0);
-  ink_mutex_release(&(rsb->mutex));
-
+  {
+    ink_scoped_mutex_lock lock(rsb->mutex);
+    ink_atomic_swap(&(rsb->global[id]->sum), (int64_t)0);
+    ink_atomic_swap(&(rsb->global[id]->last_sum), (int64_t)0);
+    ink_atomic_swap(&(rsb->global[id]->count), (int64_t)0);
+    ink_atomic_swap(&(rsb->global[id]->last_count), (int64_t)0);
+  }
   // reset the local stats
-  RecRawStat *tlp;
-  for (int i = 0; i < eventProcessor.n_ethreads; i++) {
-    tlp = ((RecRawStat *)((char *)(eventProcessor.all_ethreads[i]) + 
rsb->ethr_stat_offset)) + id;
+  for (EThread *et : eventProcessor.active_ethreads()) {
+    RecRawStat *tlp = thread_stat(et, rsb, id);
     ink_atomic_swap(&(tlp->sum), (int64_t)0);
     ink_atomic_swap(&(tlp->count), (int64_t)0);
   }
 
-  for (int i = 0; i < eventProcessor.n_dthreads; i++) {
-    tlp = ((RecRawStat *)((char *)(eventProcessor.all_dthreads[i]) + 
rsb->ethr_stat_offset)) + id;
+  for (EThread *et : eventProcessor.active_dthreads()) {
+    RecRawStat *tlp = thread_stat(et, rsb, id);
     ink_atomic_swap(&(tlp->sum), (int64_t)0);
     ink_atomic_swap(&(tlp->count), (int64_t)0);
   }
@@ -160,20 +161,20 @@ raw_stat_clear_sum(RecRawStatBlock *rsb, int id)
 
   // the globals need to be reset too
   // lock so the setting of the globals and last values are atomic
-  ink_mutex_acquire(&(rsb->mutex));
-  ink_atomic_swap(&(rsb->global[id]->sum), (int64_t)0);
-  ink_atomic_swap(&(rsb->global[id]->last_sum), (int64_t)0);
-  ink_mutex_release(&(rsb->mutex));
+  {
+    ink_scoped_mutex_lock lock(rsb->mutex);
+    ink_atomic_swap(&(rsb->global[id]->sum), (int64_t)0);
+    ink_atomic_swap(&(rsb->global[id]->last_sum), (int64_t)0);
+  }
 
   // reset the local stats
-  RecRawStat *tlp;
-  for (int i = 0; i < eventProcessor.n_ethreads; i++) {
-    tlp = ((RecRawStat *)((char *)(eventProcessor.all_ethreads[i]) + 
rsb->ethr_stat_offset)) + id;
+  for (EThread *et : eventProcessor.active_ethreads()) {
+    RecRawStat *tlp = thread_stat(et, rsb, id);
     ink_atomic_swap(&(tlp->sum), (int64_t)0);
   }
 
-  for (int i = 0; i < eventProcessor.n_dthreads; i++) {
-    tlp = ((RecRawStat *)((char *)(eventProcessor.all_dthreads[i]) + 
rsb->ethr_stat_offset)) + id;
+  for (EThread *et : eventProcessor.active_dthreads()) {
+    RecRawStat *tlp = thread_stat(et, rsb, id);
     ink_atomic_swap(&(tlp->sum), (int64_t)0);
   }
 
@@ -190,20 +191,20 @@ raw_stat_clear_count(RecRawStatBlock *rsb, int id)
 
   // the globals need to be reset too
   // lock so the setting of the globals and last values are atomic
-  ink_mutex_acquire(&(rsb->mutex));
-  ink_atomic_swap(&(rsb->global[id]->count), (int64_t)0);
-  ink_atomic_swap(&(rsb->global[id]->last_count), (int64_t)0);
-  ink_mutex_release(&(rsb->mutex));
+  {
+    ink_scoped_mutex_lock lock(rsb->mutex);
+    ink_atomic_swap(&(rsb->global[id]->count), (int64_t)0);
+    ink_atomic_swap(&(rsb->global[id]->last_count), (int64_t)0);
+  }
 
   // reset the local stats
-  RecRawStat *tlp;
-  for (int i = 0; i < eventProcessor.n_ethreads; i++) {
-    tlp = ((RecRawStat *)((char *)(eventProcessor.all_ethreads[i]) + 
rsb->ethr_stat_offset)) + id;
+  for (EThread *et : eventProcessor.active_ethreads()) {
+    RecRawStat *tlp = thread_stat(et, rsb, id);
     ink_atomic_swap(&(tlp->count), (int64_t)0);
   }
 
-  for (int i = 0; i < eventProcessor.n_dthreads; i++) {
-    tlp = ((RecRawStat *)((char *)(eventProcessor.all_dthreads[i]) + 
rsb->ethr_stat_offset)) + id;
+  for (EThread *et : eventProcessor.active_dthreads()) {
+    RecRawStat *tlp = thread_stat(et, rsb, id);
     ink_atomic_swap(&(tlp->count), (int64_t)0);
   }
 

-- 
To stop receiving notification emails like this one, please contact
['"[email protected]" <[email protected]>'].

Reply via email to