This is an automated email from the ASF dual-hosted git repository.

alexey pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kudu.git

commit 2bddb6b072b05480cea68c290ecd14b62ae0aa00
Author: Alexey Serbin <[email protected]>
AuthorDate: Wed Aug 9 17:36:35 2023 -0700

    [util] small cleanup on debug-util
    
    Change-Id: I867e0cd8352cb8ee052019dc6ee32ae52143f0ff
    Reviewed-on: http://gerrit.cloudera.org:8080/20338
    Reviewed-by: Abhishek Chennaka <[email protected]>
    Tested-by: Alexey Serbin <[email protected]>
---
 src/kudu/util/debug-util.cc         | 11 ++++++-----
 src/kudu/util/debug-util.h          | 12 ++++++++----
 src/kudu/util/spinlock_profiling.cc | 22 +++++++++++-----------
 3 files changed, 25 insertions(+), 20 deletions(-)

diff --git a/src/kudu/util/debug-util.cc b/src/kudu/util/debug-util.cc
index 3db8afd97..8c7f42f4b 100644
--- a/src/kudu/util/debug-util.cc
+++ b/src/kudu/util/debug-util.cc
@@ -727,7 +727,7 @@ uint64_t StackTrace::HashCode() const {
                                sizeof(frames_[0]) * num_frames_);
 }
 
-bool StackTrace::LessThan(const StackTrace& s) const {
+bool StackTrace::operator<(const StackTrace& s) const {
   return std::lexicographical_compare(frames_, &frames_[num_frames_],
                                       s.frames_, &s.frames_[num_frames_]);
 }
@@ -784,9 +784,10 @@ Status StackTraceSnapshot::SnapshotAllStacks() {
   }
   collectors_.clear();
 
-  std::sort(infos_.begin(), infos_.end(), [](const ThreadInfo& a, const 
ThreadInfo& b) {
-      return a.stack.LessThan(b.stack);
-    });
+  std::sort(infos_.begin(),
+            infos_.end(), [](const ThreadInfo& a, const ThreadInfo& b) {
+    return a.stack < b.stack; // NOLINT(build/include_what_you_use)
+  });
   return Status::OK();
 }
 
@@ -796,7 +797,7 @@ void StackTraceSnapshot::VisitGroups(const 
StackTraceSnapshot::VisitorFunc& visi
   while (group_end != infos_.end()) {
     do {
       ++group_end;
-    } while (group_end != infos_.end() && 
group_end->stack.Equals(group_start->stack));
+    } while (group_end != infos_.end() && group_end->stack == 
group_start->stack);
     visitor(ArrayView<ThreadInfo>(&*group_start, std::distance(group_start, 
group_end)));
     group_start = group_end;
   }
diff --git a/src/kudu/util/debug-util.h b/src/kudu/util/debug-util.h
index db4e8d1d0..ada2def77 100644
--- a/src/kudu/util/debug-util.h
+++ b/src/kudu/util/debug-util.h
@@ -144,14 +144,18 @@ class StackTrace {
   }
 
   // Returns true if the stack trace 's' matches this trace.
-  bool Equals(const StackTrace& s) const {
+  bool operator==(const StackTrace& s) const {
     return s.num_frames_ == num_frames_ &&
-      strings::memeq(frames_, s.frames_,
-                     num_frames_ * sizeof(frames_[0]));
+        strings::memeq(frames_, s.frames_, num_frames_ * sizeof(frames_[0]));
+  }
+
+  // Returns true if 's' doesn't match this trace.
+  bool operator!=(const StackTrace& s) const {
+    return !(*this == s);
   }
 
   // Comparison operator for use in sorting.
-  bool LessThan(const StackTrace& s) const;
+  bool operator<(const StackTrace& s) const;
 
   // Collect and store the current stack trace. Skips the top 'skip_frames' 
frames
   // from the stack. For example, a value of '1' will skip whichever function
diff --git a/src/kudu/util/spinlock_profiling.cc 
b/src/kudu/util/spinlock_profiling.cc
index a20f5ee70..9bfdf5603 100644
--- a/src/kudu/util/spinlock_profiling.cc
+++ b/src/kudu/util/spinlock_profiling.cc
@@ -142,32 +142,32 @@ Atomic32 g_profiling_enabled = 0;
 ContentionStacks* g_contention_stacks = nullptr;
 
 void ContentionStacks::AddStack(const StackTrace& s, int64_t cycles) {
-  uint64_t hash = s.HashCode();
+  const uint64_t hash = s.HashCode();
 
   // Linear probe up to 4 attempts before giving up
   for (int i = 0; i < kNumLinearProbeAttempts; i++) {
-    Entry* e = &entries_[(hash + i) % kNumEntries];
-    if (!e->lock.TryLock()) {
+    Entry& e = entries_[(hash + i) % kNumEntries];
+    if (!e.lock.TryLock()) {
       // If we fail to lock it, we can safely just use a different slot.
       // It's OK if a single stack shows up multiple times, because pprof
       // aggregates them in the end anyway.
       continue;
     }
 
-    if (e->trip_count == 0) {
+    if (e.trip_count == 0) {
       // It's an un-claimed slot. Claim it.
-      e->hash = hash;
-      e->trace.CopyFrom(s);
-    } else if (e->hash != hash || !e->trace.Equals(s)) {
+      e.hash = hash;
+      e.trace.CopyFrom(s);
+    } else if (e.hash != hash || e.trace != s) {
       // It's claimed by a different stack trace.
-      e->lock.Unlock();
+      e.lock.Unlock();
       continue;
     }
 
     // Contribute to the stats for this stack.
-    e->cycle_count += cycles;
-    e->trip_count++;
-    e->lock.Unlock();
+    e.cycle_count += cycles;
+    ++e.trip_count;
+    e.lock.Unlock();
     return;
   }
 

Reply via email to