This is an automated email from the ASF dual-hosted git repository.
zhangduo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/master by this push:
new 0b2b474 HBASE-21908 Remove Scan.getScanMetrics
0b2b474 is described below
commit 0b2b474b4c799b6005559fa4ee873d0ec7e08964
Author: zhangduo <[email protected]>
AuthorDate: Sat Feb 16 17:39:10 2019 +0800
HBASE-21908 Remove Scan.getScanMetrics
Signed-off-by: Michael Stack <[email protected]>
---
.../apache/hadoop/hbase/client/ClientScanner.java | 43 +++-------------------
.../java/org/apache/hadoop/hbase/client/Scan.java | 36 ++----------------
.../hadoop/hbase/ScanPerformanceEvaluation.java | 2 +-
.../hadoop/hbase/client/TestFromClientSide.java | 21 +++--------
4 files changed, 17 insertions(+), 85 deletions(-)
diff --git
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
index 3dbe5f4..cae98aa 100644
---
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
+++
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
@@ -21,33 +21,30 @@ import static
org.apache.hadoop.hbase.client.ConnectionUtils.calcEstimatedSize;
import static
org.apache.hadoop.hbase.client.ConnectionUtils.createScanResultCache;
import static
org.apache.hadoop.hbase.client.ConnectionUtils.incRegionCountMetrics;
-import
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-
import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.ArrayDeque;
import java.util.Queue;
import java.util.concurrent.ExecutorService;
-
import org.apache.commons.lang3.mutable.MutableBoolean;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.ScannerCallable.MoreResults;
import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
-import org.apache.hadoop.hbase.exceptions.ScannerResetException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
import org.apache.hadoop.hbase.NotServingRegionException;
-import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownScannerException;
+import org.apache.hadoop.hbase.client.ScannerCallable.MoreResults;
+import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
+import org.apache.hadoop.hbase.exceptions.ScannerResetException;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
/**
* Implements the scanner interface for the HBase client. If there are
multiple regions in a table,
@@ -74,7 +71,6 @@ public abstract class ClientScanner extends
AbstractClientScanner {
private final ClusterConnection connection;
protected final TableName tableName;
protected final int scannerTimeout;
- protected boolean scanMetricsPublished = false;
protected RpcRetryingCaller<Result[]> caller;
protected RpcControllerFactory rpcControllerFactory;
protected Configuration conf;
@@ -270,27 +266,6 @@ public abstract class ClientScanner extends
AbstractClientScanner {
return rrs;
}
- /**
- * Publish the scan metrics. For now, we use scan.setAttribute to pass the
metrics back to the
- * application or TableInputFormat.Later, we could push it to other systems.
We don't use metrics
- * framework because it doesn't support multi-instances of the same metrics
on the same machine;
- * for scan/map reduce scenarios, we will have multiple scans running at the
same time. By
- * default, scan metrics are disabled; if the application wants to collect
them, this behavior can
- * be turned on by calling calling {@link
Scan#setScanMetricsEnabled(boolean)}
- */
- protected void writeScanMetrics() {
- if (this.scanMetrics == null || scanMetricsPublished) {
- return;
- }
- // Publish ScanMetrics to the Scan Object.
- // As we have claimed in the comment of Scan.getScanMetrics, this relies
on that user will not
- // call ResultScanner.getScanMetrics and reset the ScanMetrics. Otherwise
the metrics published
- // to Scan will be messed up.
- scan.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA,
- ProtobufUtil.toScanMetrics(scanMetrics, false).toByteArray());
- scanMetricsPublished = true;
- }
-
protected void initSyncCache() {
cache = new ArrayDeque<>();
}
@@ -310,11 +285,6 @@ public abstract class ClientScanner extends
AbstractClientScanner {
// try again to load from cache
result = cache.poll();
-
- // if we exhausted this scanner before calling close, write out the scan
metrics
- if (result == null) {
- writeScanMetrics();
- }
return result;
}
@@ -548,7 +518,6 @@ public abstract class ClientScanner extends
AbstractClientScanner {
@Override
public void close() {
- if (!scanMetricsPublished) writeScanMetrics();
if (callable != null) {
callable.setClose();
try {
diff --git
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
index d4aff04..23bb5ce 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
@@ -28,19 +28,17 @@ import java.util.Map;
import java.util.NavigableSet;
import java.util.TreeMap;
import java.util.TreeSet;
-
import org.apache.hadoop.hbase.HConstants;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.IncompatibleFilterException;
import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.security.access.Permission;
import org.apache.hadoop.hbase.security.visibility.Authorizations;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Used to perform Scan operations.
@@ -114,19 +112,7 @@ public class Scan extends Query {
private int storeLimit = -1;
private int storeOffset = 0;
- /**
- * @deprecated since 1.0.0. Use {@link #setScanMetricsEnabled(boolean)}
- */
- // Make private or remove.
- @Deprecated
- static public final String SCAN_ATTRIBUTES_METRICS_ENABLE =
"scan.attributes.metrics.enable";
-
- /**
- * Use {@link #getScanMetrics()}
- */
- // Make this private or remove.
- @Deprecated
- static public final String SCAN_ATTRIBUTES_METRICS_DATA =
"scan.attributes.metrics.data";
+ private static final String SCAN_ATTRIBUTES_METRICS_ENABLE =
"scan.attributes.metrics.enable";
// If an application wants to use multiple scans over different tables each
scan must
// define this attribute with the appropriate table name by calling
@@ -1123,20 +1109,6 @@ public class Scan extends Query {
return attr == null ? false : Bytes.toBoolean(attr);
}
- /**
- * @return Metrics on this Scan, if metrics were enabled.
- * @see #setScanMetricsEnabled(boolean)
- * @deprecated Use {@link ResultScanner#getScanMetrics()} instead. And
notice that, please do not
- * use this method and {@link ResultScanner#getScanMetrics()}
together, the metrics
- * will be messed up.
- */
- @Deprecated
- public ScanMetrics getScanMetrics() {
- byte[] bytes = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA);
- if (bytes == null) return null;
- return ProtobufUtil.toScanMetrics(bytes);
- }
-
public Boolean isAsyncPrefetch() {
return asyncPrefetch;
}
diff --git
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java
index c181b19..8b70687 100644
---
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java
+++
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java
@@ -175,7 +175,7 @@ public class ScanPerformanceEvaluation extends
AbstractHBaseTool {
table.close();
connection.close();
- ScanMetrics metrics = scan.getScanMetrics();
+ ScanMetrics metrics = scanner.getScanMetrics();
long totalBytes = metrics.countOfBytesInResults.get();
double throughput = (double)totalBytes /
scanTimer.elapsed(TimeUnit.SECONDS);
double throughputRows = (double)numRows /
scanTimer.elapsed(TimeUnit.SECONDS);
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index c74c3c6..0b8e2ad 100644
---
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -5115,7 +5115,7 @@ public class TestFromClientSide {
LOG.info("test data has " + numRecords + " records.");
// by default, scan metrics collection is turned off
- assertEquals(null, scan1.getScanMetrics());
+ assertEquals(null, scanner.getScanMetrics());
// turn on scan metrics
Scan scan2 = new Scan();
@@ -5126,7 +5126,7 @@ public class TestFromClientSide {
}
scanner.close();
// closing the scanner will set the metrics.
- assertNotNull(scan2.getScanMetrics());
+ assertNotNull(scanner.getScanMetrics());
// set caching to 1, because metrics are collected in each roundtrip only
scan2 = new Scan();
@@ -5139,7 +5139,7 @@ public class TestFromClientSide {
}
scanner.close();
- ScanMetrics scanMetrics = scan2.getScanMetrics();
+ ScanMetrics scanMetrics = scanner.getScanMetrics();
assertEquals("Did not access all the regions in the table", numOfRegions,
scanMetrics.countOfRegions.get());
@@ -5155,7 +5155,7 @@ public class TestFromClientSide {
}
}
scanner.close();
- scanMetrics = scan2.getScanMetrics();
+ scanMetrics = scanner.getScanMetrics();
assertEquals("Did not count the result bytes", numBytes,
scanMetrics.countOfBytesInResults.get());
@@ -5172,7 +5172,7 @@ public class TestFromClientSide {
}
}
scanner.close();
- scanMetrics = scan2.getScanMetrics();
+ scanMetrics = scanner.getScanMetrics();
assertEquals("Did not count the result bytes", numBytes,
scanMetrics.countOfBytesInResults.get());
@@ -5200,20 +5200,11 @@ public class TestFromClientSide {
for (Result result : scannerWithClose.next(numRecords + 1)) {
}
scannerWithClose.close();
- ScanMetrics scanMetricsWithClose = getScanMetrics(scanWithClose);
+ ScanMetrics scanMetricsWithClose = scannerWithClose.getScanMetrics();
assertEquals("Did not access all the regions in the table", numOfRegions,
scanMetricsWithClose.countOfRegions.get());
}
- private ScanMetrics getScanMetrics(Scan scan) throws Exception {
- byte[] serializedMetrics =
scan.getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA);
- assertTrue("Serialized metrics were not found.", serializedMetrics !=
null);
-
- ScanMetrics scanMetrics = ProtobufUtil.toScanMetrics(serializedMetrics);
-
- return scanMetrics;
- }
-
/**
* Tests that cache on write works all the way up from the client-side.
*