dschneider-pivotal commented on a change in pull request #6430:
URL: https://github.com/apache/geode/pull/6430#discussion_r645704890



##########
File path: 
geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java
##########
@@ -388,90 +375,99 @@ public void close(BucketRegion bucketRegion) {
     if (logger.isDebugEnabled()) {
       logger.debug("Clearing entries for {} rvv={}", _getOwner(), rvv);
     }
-    LocalRegion lr = _getOwner();
+    final LocalRegion lr = _getOwner();
     RegionVersionVector localRvv = lr.getVersionVector();
-    incClearCount(lr);
-    // lock for size calcs if the region might have tombstones
-    Object lockObj = lr.getConcurrencyChecksEnabled() ? lr.getSizeGuard() : 
new Object();
-    synchronized (lockObj) {
-      if (rvv == null) {
-        int delta = 0;
-        try {
-          delta = sizeInVM(); // TODO soplog need to determine if stats should
-                              // reflect only size in memory or the complete 
thing
-        } catch (GemFireIOException e) {
-          // ignore rather than throwing an exception during cache close
-        }
-        int tombstones = lr.getTombstoneCount();
-        _mapClear();
-        _getOwner().updateSizeOnClearRegion(delta - tombstones);
-        _getOwner().incTombstoneCount(-tombstones);
-        if (delta != 0) {
-          incEntryCount(-delta);
-        }
-      } else {
-        int delta = 0;
-        int tombstones = 0;
-        VersionSource myId = _getOwner().getVersionMember();
-        if (localRvv != rvv) {
-          localRvv.recordGCVersions(rvv);
-        }
-        final boolean isTraceEnabled = logger.isTraceEnabled();
-        for (RegionEntry re : regionEntries()) {
-          synchronized (re) {
-            Token value = re.getValueAsToken();
-            // if it's already being removed or the entry is being created we 
leave it alone
-            if (value == Token.REMOVED_PHASE1 || value == 
Token.REMOVED_PHASE2) {
-              continue;
-            }
 
-            VersionSource id = re.getVersionStamp().getMemberID();
-            if (id == null) {
-              id = myId;
-            }
-            if (rvv.contains(id, re.getVersionStamp().getRegionVersion())) {
-              if (isTraceEnabled) {
-                logger.trace("region clear op is removing {} {}", re.getKey(),
-                    re.getVersionStamp());
+    long startTime;
+    startTime = _getOwner().startClear();
+
+    try {
+      // lock for size calcs if the region might have tombstones
+      Object lockObj = lr.getConcurrencyChecksEnabled() ? lr.getSizeGuard() : 
new Object();
+      synchronized (lockObj) {
+        if (rvv == null) {
+          int delta = 0;
+          try {
+            delta = sizeInVM(); // TODO soplog need to determine if stats 
should
+            // reflect only size in memory or the complete thing
+          } catch (GemFireIOException e) {
+            // ignore rather than throwing an exception during cache close
+          }
+          int tombstones = lr.getTombstoneCount();
+          _mapClear();
+          _getOwner().updateSizeOnClearRegion(delta - tombstones);
+          _getOwner().incTombstoneCount(-tombstones);
+          if (delta != 0) {
+            incEntryCount(-delta);
+          }
+        } else {
+          int delta = 0;
+          int tombstones = 0;
+          VersionSource myId = _getOwner().getVersionMember();
+          if (localRvv != rvv) {
+            localRvv.recordGCVersions(rvv);
+          }
+          final boolean isTraceEnabled = logger.isTraceEnabled();
+          for (RegionEntry re : regionEntries()) {
+            synchronized (re) {
+              Token value = re.getValueAsToken();
+              // if it's already being removed or the entry is being created 
we leave it alone
+              if (value == Token.REMOVED_PHASE1 || value == 
Token.REMOVED_PHASE2) {
+                continue;
               }
 
-              boolean tombstone = re.isTombstone();
-              // note: it.remove() did not reliably remove the entry so we use 
remove(K,V) here
-              if (getEntryMap().remove(re.getKey(), re)) {
-                if (OffHeapClearRequired.doesClearNeedToCheckForOffHeap()) {
-                  GatewaySenderEventImpl.release(re.getValue()); // OFFHEAP 
_getValue ok
-                }
-                // If this is an overflow only region, we need to free the 
entry on
-                // disk at this point.
-                try {
-                  re.removePhase1(lr, true);
-                } catch (RegionClearedException e) {
-                  // do nothing, it's already cleared.
+              VersionSource id = re.getVersionStamp().getMemberID();
+              if (id == null) {
+                id = myId;
+              }
+              if (rvv.contains(id, re.getVersionStamp().getRegionVersion())) {
+                if (isTraceEnabled) {
+                  logger.trace("region clear op is removing {} {}", 
re.getKey(),
+                      re.getVersionStamp());
                 }
-                re.removePhase2();
-                lruEntryDestroy(re);
-                if (tombstone) {
-                  _getOwner().incTombstoneCount(-1);
-                  tombstones += 1;
-                } else {
-                  delta += 1;
+
+                boolean tombstone = re.isTombstone();
+                // note: it.remove() did not reliably remove the entry so we 
use remove(K,V) here
+                if (getEntryMap().remove(re.getKey(), re)) {
+                  if (OffHeapClearRequired.doesClearNeedToCheckForOffHeap()) {
+                    GatewaySenderEventImpl.release(re.getValue()); // OFFHEAP 
_getValue ok
+                  }
+                  // If this is an overflow only region, we need to free the 
entry on
+                  // disk at this point.
+                  try {
+                    re.removePhase1(lr, true);
+                  } catch (RegionClearedException e) {
+                    // do nothing, it's already cleared.
+                  }
+                  re.removePhase2();
+                  lruEntryDestroy(re);
+                  if (tombstone) {
+                    _getOwner().incTombstoneCount(-1);
+                    tombstones += 1;
+                  } else {
+                    delta += 1;
+                  }
                 }
+              } else { // rvv does not contain this entry so it is retained
+                result.add(id);
               }
-            } else { // rvv does not contain this entry so it is retained
-              result.add(id);
             }
           }
-        }
-        _getOwner().updateSizeOnClearRegion(delta);
-        incEntryCount(-delta);
-        incEntryCount(-tombstones);
-        if (logger.isDebugEnabled()) {
-          logger.debug("Size after clearing = {}", getEntryMap().size());
-        }
-        if (isTraceEnabled && getEntryMap().size() < 20) {
-          _getOwner().dumpBackingMap();
+          _getOwner().updateSizeOnClearRegion(delta);
+          incEntryCount(-delta);
+          incEntryCount(-tombstones);
+          if (logger.isDebugEnabled()) {
+            logger.debug("Size after clearing = {}", getEntryMap().size());
+          }
+          if (isTraceEnabled && getEntryMap().size() < 20) {
+            _getOwner().dumpBackingMap();
+          }
         }
       }
+    } finally {
+      // if (cacheIsOpen) {

Review comment:
       I think you can get rid of the comment. We should always call endClear

##########
File path: 
geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java
##########
@@ -388,90 +375,99 @@ public void close(BucketRegion bucketRegion) {
     if (logger.isDebugEnabled()) {
       logger.debug("Clearing entries for {} rvv={}", _getOwner(), rvv);
     }
-    LocalRegion lr = _getOwner();
+    final LocalRegion lr = _getOwner();
     RegionVersionVector localRvv = lr.getVersionVector();
-    incClearCount(lr);
-    // lock for size calcs if the region might have tombstones
-    Object lockObj = lr.getConcurrencyChecksEnabled() ? lr.getSizeGuard() : 
new Object();
-    synchronized (lockObj) {
-      if (rvv == null) {
-        int delta = 0;
-        try {
-          delta = sizeInVM(); // TODO soplog need to determine if stats should
-                              // reflect only size in memory or the complete 
thing
-        } catch (GemFireIOException e) {
-          // ignore rather than throwing an exception during cache close
-        }
-        int tombstones = lr.getTombstoneCount();
-        _mapClear();
-        _getOwner().updateSizeOnClearRegion(delta - tombstones);
-        _getOwner().incTombstoneCount(-tombstones);
-        if (delta != 0) {
-          incEntryCount(-delta);
-        }
-      } else {
-        int delta = 0;
-        int tombstones = 0;
-        VersionSource myId = _getOwner().getVersionMember();
-        if (localRvv != rvv) {
-          localRvv.recordGCVersions(rvv);
-        }
-        final boolean isTraceEnabled = logger.isTraceEnabled();
-        for (RegionEntry re : regionEntries()) {
-          synchronized (re) {
-            Token value = re.getValueAsToken();
-            // if it's already being removed or the entry is being created we 
leave it alone
-            if (value == Token.REMOVED_PHASE1 || value == 
Token.REMOVED_PHASE2) {
-              continue;
-            }
 
-            VersionSource id = re.getVersionStamp().getMemberID();
-            if (id == null) {
-              id = myId;
-            }
-            if (rvv.contains(id, re.getVersionStamp().getRegionVersion())) {
-              if (isTraceEnabled) {
-                logger.trace("region clear op is removing {} {}", re.getKey(),
-                    re.getVersionStamp());
+    long startTime;
+    startTime = _getOwner().startClear();
+
+    try {
+      // lock for size calcs if the region might have tombstones
+      Object lockObj = lr.getConcurrencyChecksEnabled() ? lr.getSizeGuard() : 
new Object();
+      synchronized (lockObj) {
+        if (rvv == null) {
+          int delta = 0;
+          try {
+            delta = sizeInVM(); // TODO soplog need to determine if stats 
should
+            // reflect only size in memory or the complete thing
+          } catch (GemFireIOException e) {
+            // ignore rather than throwing an exception during cache close
+          }
+          int tombstones = lr.getTombstoneCount();
+          _mapClear();
+          _getOwner().updateSizeOnClearRegion(delta - tombstones);
+          _getOwner().incTombstoneCount(-tombstones);
+          if (delta != 0) {
+            incEntryCount(-delta);
+          }
+        } else {
+          int delta = 0;
+          int tombstones = 0;
+          VersionSource myId = _getOwner().getVersionMember();
+          if (localRvv != rvv) {
+            localRvv.recordGCVersions(rvv);
+          }
+          final boolean isTraceEnabled = logger.isTraceEnabled();
+          for (RegionEntry re : regionEntries()) {
+            synchronized (re) {
+              Token value = re.getValueAsToken();
+              // if it's already being removed or the entry is being created 
we leave it alone
+              if (value == Token.REMOVED_PHASE1 || value == 
Token.REMOVED_PHASE2) {
+                continue;
               }
 
-              boolean tombstone = re.isTombstone();
-              // note: it.remove() did not reliably remove the entry so we use 
remove(K,V) here
-              if (getEntryMap().remove(re.getKey(), re)) {
-                if (OffHeapClearRequired.doesClearNeedToCheckForOffHeap()) {
-                  GatewaySenderEventImpl.release(re.getValue()); // OFFHEAP 
_getValue ok
-                }
-                // If this is an overflow only region, we need to free the 
entry on
-                // disk at this point.
-                try {
-                  re.removePhase1(lr, true);
-                } catch (RegionClearedException e) {
-                  // do nothing, it's already cleared.
+              VersionSource id = re.getVersionStamp().getMemberID();
+              if (id == null) {
+                id = myId;
+              }
+              if (rvv.contains(id, re.getVersionStamp().getRegionVersion())) {
+                if (isTraceEnabled) {
+                  logger.trace("region clear op is removing {} {}", 
re.getKey(),
+                      re.getVersionStamp());
                 }
-                re.removePhase2();
-                lruEntryDestroy(re);
-                if (tombstone) {
-                  _getOwner().incTombstoneCount(-1);
-                  tombstones += 1;
-                } else {
-                  delta += 1;
+
+                boolean tombstone = re.isTombstone();
+                // note: it.remove() did not reliably remove the entry so we 
use remove(K,V) here
+                if (getEntryMap().remove(re.getKey(), re)) {
+                  if (OffHeapClearRequired.doesClearNeedToCheckForOffHeap()) {
+                    GatewaySenderEventImpl.release(re.getValue()); // OFFHEAP 
_getValue ok
+                  }
+                  // If this is an overflow only region, we need to free the 
entry on
+                  // disk at this point.
+                  try {
+                    re.removePhase1(lr, true);
+                  } catch (RegionClearedException e) {
+                    // do nothing, it's already cleared.
+                  }
+                  re.removePhase2();
+                  lruEntryDestroy(re);
+                  if (tombstone) {
+                    _getOwner().incTombstoneCount(-1);
+                    tombstones += 1;
+                  } else {
+                    delta += 1;
+                  }
                 }
+              } else { // rvv does not contain this entry so it is retained
+                result.add(id);
               }
-            } else { // rvv does not contain this entry so it is retained
-              result.add(id);
             }
           }
-        }
-        _getOwner().updateSizeOnClearRegion(delta);
-        incEntryCount(-delta);
-        incEntryCount(-tombstones);
-        if (logger.isDebugEnabled()) {
-          logger.debug("Size after clearing = {}", getEntryMap().size());
-        }
-        if (isTraceEnabled && getEntryMap().size() < 20) {
-          _getOwner().dumpBackingMap();
+          _getOwner().updateSizeOnClearRegion(delta);
+          incEntryCount(-delta);
+          incEntryCount(-tombstones);
+          if (logger.isDebugEnabled()) {
+            logger.debug("Size after clearing = {}", getEntryMap().size());
+          }
+          if (isTraceEnabled && getEntryMap().size() < 20) {
+            _getOwner().dumpBackingMap();
+          }
         }
       }
+    } finally {
+      // if (cacheIsOpen) {
+      _getOwner().endClear(startTime);

Review comment:
       change _getOwner() to lr

##########
File path: 
geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
##########
@@ -574,7 +574,7 @@ public boolean remove(Object key, Object value, Object 
callbackArg) {
   public CachePerfStats getRegionCachePerfStats() {
     if (dataStore != null && dataStore.getAllLocalBucketRegions().size() > 0) {
       BucketRegion bucket = 
dataStore.getAllLocalBucketRegions().iterator().next();
-      return bucket.getCachePerfStats();
+      return bucket.getPartitionedRegion().getCachePerfStats();

Review comment:
       I don't understand this method's purpose. It looks like it is new on the 
clear branch. I think you should probably remove it. The only uses I see of it 
now is in the PR's tests. But with your recent change to it you do a bunch of 
work to find a bucket and then you ask that bucket for the PartitionedRegion 
and then its stats. But this method is the ParitionedRegion. So we could have 
just called getCachePerfStats. But the old impl that also did it on the first 
bucket also seems very suspect.

##########
File path: 
geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionStats.java
##########
@@ -176,8 +176,18 @@
   private static final int prMetaDataSentCountId;
 
   private static final int localMaxMemoryId;
+  static final int bucketClearsId;
+  static final int bucketClearTimeId;
+  static final int bucketClearsInProgressId;
 
   static {
+    final String bucketClearsDesc =
+        "The total number of times a clear has been done on this region and 
it's bucket regions";
+    final String bucketClearTimeDesc =
+        "The total amount of time spent doing a bucket clear onx bucket 
regions";

Review comment:
       note the "onx" type. Better would be: "The total amount of time, in 
nanoseconds, spend clearing buckets on this partitioned region."

##########
File path: 
geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
##########
@@ -2538,4 +2531,9 @@ protected void basicClear(RegionEventImpl regionEvent) {
     basicClear(regionEvent, false);
   }
 
+  @Override
+  public void clear() {

Review comment:
       Right. The code should never call "clear" on the internal BucketRegion. 
If it happens it indicates coding error in geode. IllegalStateException might 
be better than UnsupportedOperationException. Or we could keep unsupported but 
improve the message string so that if it does happen users don't think they did 
an unsupported operation on the region.

##########
File path: 
geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionStats.java
##########
@@ -1188,11 +1210,51 @@ public void endPutLocal(long start) {
     stats.incLong(putLocalTimeId, delta);
   }
 
+  public long startBucketClear() {
+    incBucketClearsInProgress();
+    return clock.getTime();
+  }
+
+  public void endBucketClear(long start) {
+    long delta = clock.getTime() - start;
+    stats.incLong(bucketClearsId, 1);
+    decBucketClearsInProgress();
+    stats.incLong(bucketClearTimeId, delta);
+  }
+
   public void incPRMetaDataSentCount() {
     this.stats.incLong(prMetaDataSentCountId, 1);
   }
 
   public long getPRMetaDataSentCount() {
     return this.stats.getLong(prMetaDataSentCountId);
   }
+
+  public long getBucketClearCount() {
+    return stats.getLong(bucketClearsId);
+  }
+
+  public void incBucketClearCount() {
+    stats.incLong(bucketClearsId, 1L);
+  }
+
+  public void incBucketClearTime(Long nanoseconds) {
+    stats.incLong(bucketClearTimeId, nanoseconds);
+  }
+
+  public long getBucketClearTime() {
+    return stats.getLong(bucketClearTimeId);
+  }
+
+  public long getBucketClearsInProgress() {
+    return stats.getLong(bucketClearsInProgressId);
+  }
+
+  public void incBucketClearsInProgress() {

Review comment:
       now that you have startBucketClear and endBucketClear these methods 
(inc, dec) should be inlines in start and and end or they should be made 
private. We want to force external classes to call start and end.

##########
File path: 
geode-core/src/main/java/org/apache/geode/internal/cache/CachePerfStats.java
##########
@@ -476,12 +472,9 @@
             f.createIntCounter("retries",
                 "Number of times a concurrent destroy followed by a create has 
caused an entry operation to need to retry.",
                 "operations"),
-            f.createLongCounter("regionClears", regionClearsDesc, 
"operations"),
-            f.createLongCounter("bucketClears", bucketClearsDesc, 
"operations"),
-            f.createLongCounter("partitionedRegionClearLocalDuration",
-                partitionedRegionClearLocalDurationDesc, "nanoseconds"),
-            f.createLongCounter("partitionedRegionClearTotalDuration",
-                partitionedRegionClearTotalDurationDesc, "nanoseconds"),
+            f.createLongCounter("clears", clearsDesc, "operations"),
+            f.createLongGauge("clearsInProgress", clearsInProgressDesc, 
"operations"),
+            f.createLongCounter("clearTime", clearTimeDesc, "operations"),

Review comment:
       the units on this should be "nanoseconds" instead of "operations"

##########
File path: 
geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java
##########
@@ -388,90 +375,99 @@ public void close(BucketRegion bucketRegion) {
     if (logger.isDebugEnabled()) {
       logger.debug("Clearing entries for {} rvv={}", _getOwner(), rvv);
     }
-    LocalRegion lr = _getOwner();
+    final LocalRegion lr = _getOwner();
     RegionVersionVector localRvv = lr.getVersionVector();
-    incClearCount(lr);
-    // lock for size calcs if the region might have tombstones
-    Object lockObj = lr.getConcurrencyChecksEnabled() ? lr.getSizeGuard() : 
new Object();
-    synchronized (lockObj) {
-      if (rvv == null) {
-        int delta = 0;
-        try {
-          delta = sizeInVM(); // TODO soplog need to determine if stats should
-                              // reflect only size in memory or the complete 
thing
-        } catch (GemFireIOException e) {
-          // ignore rather than throwing an exception during cache close
-        }
-        int tombstones = lr.getTombstoneCount();
-        _mapClear();
-        _getOwner().updateSizeOnClearRegion(delta - tombstones);
-        _getOwner().incTombstoneCount(-tombstones);
-        if (delta != 0) {
-          incEntryCount(-delta);
-        }
-      } else {
-        int delta = 0;
-        int tombstones = 0;
-        VersionSource myId = _getOwner().getVersionMember();
-        if (localRvv != rvv) {
-          localRvv.recordGCVersions(rvv);
-        }
-        final boolean isTraceEnabled = logger.isTraceEnabled();
-        for (RegionEntry re : regionEntries()) {
-          synchronized (re) {
-            Token value = re.getValueAsToken();
-            // if it's already being removed or the entry is being created we 
leave it alone
-            if (value == Token.REMOVED_PHASE1 || value == 
Token.REMOVED_PHASE2) {
-              continue;
-            }
 
-            VersionSource id = re.getVersionStamp().getMemberID();
-            if (id == null) {
-              id = myId;
-            }
-            if (rvv.contains(id, re.getVersionStamp().getRegionVersion())) {
-              if (isTraceEnabled) {
-                logger.trace("region clear op is removing {} {}", re.getKey(),
-                    re.getVersionStamp());
+    long startTime;
+    startTime = _getOwner().startClear();

Review comment:
       make this: "final long startTime = lr.startClear();"

##########
File path: 
geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
##########
@@ -2538,4 +2531,20 @@ protected void basicClear(RegionEventImpl regionEvent) {
     basicClear(regionEvent, false);
   }
 
+  @Override
+  public long startClear() {
+    long time = getPartitionedRegion().getPrStats().startBucketClear();

Review comment:
       these two lines could be simplified to "return 
getPartitionedRegion().getPrStats().startBucketClear();"

##########
File path: 
geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionClear.java
##########
@@ -203,6 +201,7 @@ protected void 
waitForPrimary(PartitionedRegion.RetryTimeKeeper retryTimer) {
         try {
           boolean retry;
           do {
+            int retryTime = 2 * 60 * 1000;

Review comment:
       I liked the old code better but it should have been static and I'd like 
retryTime to be RETRY_TIME. Also consider using TimeUnit instead of these magic 
number. TimeUnit.MINUTES.toMillis(2); With a static import of MINUTES you could 
just pass this in with no var name: "RetryTimeKeeper(MINUTES.toMillis(2))"

##########
File path: 
geode-core/src/main/java/org/apache/geode/internal/cache/CachePerfStats.java
##########
@@ -1413,36 +1404,36 @@ public void endJob() {
     };
   }
 
-  public long getRegionClearCount() {
-    return stats.getLong(regionClearsId);
-  }
-
-  public long getBucketClearCount() {
-    return stats.getLong(bucketClearsId);
-  }
-
-  public long getPartitionedRegionClearLocalDuration() {
-    return stats.getLong(partitionedRegionClearLocalDurationId);
+  public long getClearCount() {
+    return stats.getLong(clearsId);
   }
 
-  public long getPartitionedRegionClearTotalDuration() {
-    return stats.getLong(partitionedRegionClearTotalDurationId);
-  }
-
-  public void incRegionClearCount() {
-    stats.incLong(regionClearsId, 1L);
+  public long startClear() {
+    long clearStartTime = 0L;
+    if (clock.isEnabled()) {

Review comment:
       No need in this method to check isEnabled. A disabled clock does the 
right thing when getTime is called (returns 0). So get rid of the local 
variable and the if and change the return statement to "return getTime();".
   We use "isEnabled" checks in the end methods to avoid the extra arithmetic 
and a call to increment a stat by 0. 

##########
File path: 
geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
##########
@@ -8631,7 +8635,9 @@ public void clearRegionLocally(RegionEventImpl 
regionEvent, boolean cacheWrite,
   @Override
   void basicLocalClear(RegionEventImpl rEvent) {
     getDataView().checkSupportsRegionClear();
+    long startTime = startClear();

Review comment:
       consider adding final

##########
File path: 
geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
##########
@@ -10140,12 +10143,24 @@ protected PartitionedRegionClear 
getPartitionedRegionClear() {
     return partitionedRegionClear;
   }
 
+  @Override
+  public void endClear(long startTime) {
+    getCachePerfStats().endClear(startTime);
+  }
+
+  @Override
+  public long startClear() {
+    return getCachePerfStats().startClear();
+  }
+
   @Override
   void cmnClearRegion(RegionEventImpl regionEvent, boolean cacheWrite, boolean 
useRVV) {
     // Synchronized to avoid other threads invoking clear on this vm/node.
+    long startTime = startClear();

Review comment:
       consider adding final

##########
File path: 
geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionStats.java
##########
@@ -425,8 +435,16 @@
                 false),
 
             f.createLongGauge("localMaxMemory",
-                "local max memory in bytes for this region on this member", 
"bytes")
-
+                "local max memory in bytes for this region on this member", 
"bytes"),
+            f.createLongCounter("clearLocalDuration",

Review comment:
       I think clearLocalDuration and clearTotalDuration should no longer 
exist. If I'm right then you want to delete these two lines

##########
File path: 
geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionStats.java
##########
@@ -1188,11 +1210,51 @@ public void endPutLocal(long start) {
     stats.incLong(putLocalTimeId, delta);
   }
 
+  public long startBucketClear() {
+    incBucketClearsInProgress();
+    return clock.getTime();
+  }
+
+  public void endBucketClear(long start) {
+    long delta = clock.getTime() - start;

Review comment:
       check "if (clock.isEnabled)" and only calc delta and inc 
bucketClearTimeId if it is

##########
File path: 
geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionStats.java
##########
@@ -176,8 +176,18 @@
   private static final int prMetaDataSentCountId;
 
   private static final int localMaxMemoryId;
+  static final int bucketClearsId;
+  static final int bucketClearTimeId;
+  static final int bucketClearsInProgressId;
 
   static {
+    final String bucketClearsDesc =
+        "The total number of times a clear has been done on this region and 
it's bucket regions";
+    final String bucketClearTimeDesc =
+        "The total amount of time spent doing a bucket clear onx bucket 
regions";
+    final String bucketClearsInProgressDesc =
+        "The total number bucket region clears in progress";

Review comment:
       Counters are "totals". Gauges are "current". So: "The current number 
bucket clears on this partitioned region that are in progress."
   We want to avoid the term "bucket region" in a descriptions. We have a 
partitioned region that has buckets

##########
File path: 
geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionStats.java
##########
@@ -176,8 +176,18 @@
   private static final int prMetaDataSentCountId;
 
   private static final int localMaxMemoryId;
+  static final int bucketClearsId;
+  static final int bucketClearTimeId;
+  static final int bucketClearsInProgressId;
 
   static {
+    final String bucketClearsDesc =
+        "The total number of times a clear has been done on this region and 
it's bucket regions";

Review comment:
       I think this is better: "The total number of times a bucket of this 
partitioned region has been cleared."

##########
File path: 
geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionStats.java
##########
@@ -1188,11 +1210,51 @@ public void endPutLocal(long start) {
     stats.incLong(putLocalTimeId, delta);
   }
 
+  public long startBucketClear() {
+    stats.incLong(bucketClearsInProgressId, 1);
+    return clock.getTime();
+  }
+
+  public void endBucketClear(long start) {
+    long delta = clock.getTime() - start;
+    stats.incLong(bucketClearsId, 1);
+    stats.incLong(bucketClearsInProgressId, -1);
+    stats.incLong(bucketClearTimeId, delta);
+  }
+
   public void incPRMetaDataSentCount() {
     this.stats.incLong(prMetaDataSentCountId, 1);
   }
 
   public long getPRMetaDataSentCount() {
     return this.stats.getLong(prMetaDataSentCountId);
   }
+
+  public long getBucketClearCount() {
+    return stats.getLong(bucketClearsId);
+  }
+
+  public void incBucketClearCount() {

Review comment:
       It looks like it is only used by tests. Shouldn't this method be removed 
to force callers to use startBucketClear and endBucketClear?

##########
File path: 
geode-core/src/main/java/org/apache/geode/internal/cache/RegionPerfStats.java
##########
@@ -519,16 +519,27 @@ public void incEvictWorkTime(long delta) {
     cachePerfStats.incEvictWorkTime(delta);
   }
 
+  private void startClearLocal() {
+    stats.incLong(clearsInProgressId, 1L);
+  }
+
+  private void endClearLocal(long timeTaken) {
+    stats.incLong(clearsInProgressId, -1L);
+    stats.incLong(clearsId, 1L);
+    stats.incLong(clearTimeId, timeTaken);

Review comment:
       Add a "if (clock.isEnabled())" to the inc of clearTimeId

##########
File path: 
geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionStats.java
##########
@@ -1188,11 +1210,51 @@ public void endPutLocal(long start) {
     stats.incLong(putLocalTimeId, delta);
   }
 
+  public long startBucketClear() {
+    incBucketClearsInProgress();
+    return clock.getTime();
+  }
+
+  public void endBucketClear(long start) {
+    long delta = clock.getTime() - start;
+    stats.incLong(bucketClearsId, 1);
+    decBucketClearsInProgress();
+    stats.incLong(bucketClearTimeId, delta);
+  }
+
   public void incPRMetaDataSentCount() {
     this.stats.incLong(prMetaDataSentCountId, 1);
   }
 
   public long getPRMetaDataSentCount() {
     return this.stats.getLong(prMetaDataSentCountId);
   }
+
+  public long getBucketClearCount() {
+    return stats.getLong(bucketClearsId);
+  }
+
+  public void incBucketClearCount() {
+    stats.incLong(bucketClearsId, 1L);
+  }
+
+  public void incBucketClearTime(Long nanoseconds) {

Review comment:
       with the addition of startBucketClear and endBucketClear this method 
should no longer be needed.

##########
File path: 
geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
##########
@@ -8437,8 +8437,12 @@ public void basicBridgeClear(Object callbackArg, final 
ClientProxyMembershipID c
 
     RegionEventImpl event = new ClientRegionEventImpl(this, 
Operation.REGION_CLEAR, callbackArg,
         false, client.getDistributedMember(), client, eventId);
-
-    basicClear(event, true);
+    long startTime = startClear();

Review comment:
       consider adding a final




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to