This is an automated email from the ASF dual-hosted git repository.
tdsilva pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git
The following commit(s) were added to refs/heads/master by this push:
new 7d6d8e4 PHOENIX-5059 Use the Datasource v2 api in the spark connector
(addendum)
7d6d8e4 is described below
commit 7d6d8e4cccc78b54448b88ede3f80ae245f627cb
Author: Thomas D'Silva <[email protected]>
AuthorDate: Wed Jan 9 17:08:19 2019 -0800
PHOENIX-5059 Use the Datasource v2 api in the spark connector (addendum)
---
.../phoenix/spark/datasource/v2/reader/PhoenixDataSourceReader.java | 2 +-
.../spark/datasource/v2/reader/PhoenixInputPartitionReader.java | 3 ++-
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git
a/phoenix-spark/src/main/java/org/apache/phoenix/spark/datasource/v2/reader/PhoenixDataSourceReader.java
b/phoenix-spark/src/main/java/org/apache/phoenix/spark/datasource/v2/reader/PhoenixDataSourceReader.java
index 446d96f..c76d9c8 100644
---
a/phoenix-spark/src/main/java/org/apache/phoenix/spark/datasource/v2/reader/PhoenixDataSourceReader.java
+++
b/phoenix-spark/src/main/java/org/apache/phoenix/spark/datasource/v2/reader/PhoenixDataSourceReader.java
@@ -23,7 +23,7 @@ import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.util.RegionSizeCalculator;
+import org.apache.hadoop.hbase.mapreduce.RegionSizeCalculator;
import org.apache.phoenix.compile.QueryPlan;
import org.apache.phoenix.iterate.MapReduceParallelScanGrouper;
import org.apache.phoenix.jdbc.PhoenixConnection;
diff --git
a/phoenix-spark/src/main/java/org/apache/phoenix/spark/datasource/v2/reader/PhoenixInputPartitionReader.java
b/phoenix-spark/src/main/java/org/apache/phoenix/spark/datasource/v2/reader/PhoenixInputPartitionReader.java
index 30e84db..664a887 100644
---
a/phoenix-spark/src/main/java/org/apache/phoenix/spark/datasource/v2/reader/PhoenixInputPartitionReader.java
+++
b/phoenix-spark/src/main/java/org/apache/phoenix/spark/datasource/v2/reader/PhoenixInputPartitionReader.java
@@ -25,6 +25,7 @@ import java.sql.Statement;
import java.util.List;
import java.util.Properties;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.compile.QueryPlan;
@@ -108,7 +109,7 @@ public class PhoenixInputPartitionReader implements
InputPartitionReader<Interna
// Clear the table region boundary cache to make sure long running
jobs stay up to date
byte[] tableNameBytes =
queryPlan.getTableRef().getTable().getPhysicalName().getBytes();
ConnectionQueryServices services =
queryPlan.getContext().getConnection().getQueryServices();
- services.clearTableRegionCache(tableNameBytes);
+ services.clearTableRegionCache(TableName.valueOf(tableNameBytes));
long renewScannerLeaseThreshold =
queryPlan.getContext().getConnection().getQueryServices().getRenewLeaseThresholdMilliSeconds();
for (Scan scan : scans) {