phoenix git commit: Code clean up

2017-02-13 Thread samarth
Repository: phoenix
Updated Branches:
  refs/heads/encodecolumns2 9180ce22d -> ecc157b09


Code clean up


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ecc157b0
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ecc157b0
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ecc157b0

Branch: refs/heads/encodecolumns2
Commit: ecc157b09150f0cd62afd5820f3acdd3b57d9c44
Parents: 9180ce2
Author: Samarth 
Authored: Mon Feb 13 23:57:30 2017 -0800
Committer: Samarth 
Committed: Mon Feb 13 23:57:30 2017 -0800

--
 .../apache/phoenix/compile/JoinCompiler.java|  1 -
 .../coprocessor/BaseScannerRegionObserver.java  |  5 +++
 .../GroupedAggregateRegionObserver.java |  4 +-
 .../phoenix/coprocessor/ScanRegionObserver.java |  5 +--
 .../UngroupedAggregateRegionObserver.java   |  2 +-
 .../apache/phoenix/join/HashCacheFactory.java   |  1 -
 .../mapreduce/FormatToBytesWritableMapper.java  |  1 -
 .../apache/phoenix/schema/MetaDataClient.java   |  8 ++--
 .../phoenix/schema/PColumnFamilyImpl.java   | 12 +-
 .../org/apache/phoenix/schema/PTableImpl.java   | 44 +---
 .../java/org/apache/phoenix/util/IndexUtil.java |  1 -
 11 files changed, 24 insertions(+), 60 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ecc157b0/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
index 9a2651d..eef604b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
@@ -1307,7 +1307,6 @@ public class JoinCompiler {
 if (left.getBucketNum() != null) {
 merged.remove(0);
 }
-//TODO: samarth should projected join table should always have 
non-encoded column names? Is this where we also decide that once we start 
supporting joins then have the storage scheme right.
 return PTableImpl.makePTable(left.getTenantId(), left.getSchemaName(),
 
PNameFactory.newName(SchemaUtil.getTableName(left.getName().getString(), 
right.getName().getString())),
 left.getType(), left.getIndexState(), left.getTimeStamp(), 
left.getSequenceNumber(), left.getPKName(),

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ecc157b0/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
index 1c479c5..c340216 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
@@ -47,6 +47,7 @@ import 
org.apache.phoenix.hbase.index.covered.update.ColumnReference;
 import org.apache.phoenix.index.IndexMaintainer;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.KeyValueSchema;
+import org.apache.phoenix.schema.PTable.QualifierEncodingScheme;
 import org.apache.phoenix.schema.StaleRegionBoundaryCacheException;
 import org.apache.phoenix.schema.ValueBitSet;
 import org.apache.phoenix.schema.tuple.MultiKeyValueTuple;
@@ -54,6 +55,7 @@ import 
org.apache.phoenix.schema.tuple.PositionBasedMultiKeyValueTuple;
 import org.apache.phoenix.schema.tuple.PositionBasedResultTuple;
 import org.apache.phoenix.schema.tuple.ResultTuple;
 import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.util.EncodedColumnsUtil;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.ServerUtil;
@@ -114,6 +116,7 @@ abstract public class BaseScannerRegionObserver extends 
BaseRegionObserver {
 public final static String MAX_QUALIFIER = "_MaxQualifier";
 public final static String QUALIFIER_ENCODING_SCHEME = 
"_QualifierEncodingScheme";
 public final static String IMMUTABLE_STORAGE_ENCODING_SCHEME = 
"_ImmutableStorageEncodingScheme";
+public final static String USE_ENCODED_COLUMN_QUALIFIER_LIST = 
"_UseEncodedColumnQualifierList";
 
 /**
  * Attribute name used to pass custom annotations in Scans and Mutations 
(later). Custom annotations
@@ -124,6 +127,7 @@ abstract public class BaseScannerRegionObserver extends 
BaseRegionObserver {
 /** Exposed for testing */
 public static 

Apache-Phoenix | EncodeColumns | Build Successful

2017-02-13 Thread Apache Jenkins Server
encodecolumns2 branch build status Successful

Source repository https://git-wip-us.apache.org/repos/asf?p=phoenix.git;a=shortlog;h=refs/heads/encodecolumns2

Compiled Artifacts https://builds.apache.org/job/Phoenix-encode-columns/lastSuccessfulBuild/artifact/

Test Report https://builds.apache.org/job/Phoenix-encode-columns/lastCompletedBuild/testReport/

Changes
[samarth] Fix determination of qualifier range



Build times for last couple of runsLatest build time is the right most | Legend blue: normal, red: test failure, gray: timeout


phoenix git commit: Fix determination of qualifier range

2017-02-13 Thread samarth
Repository: phoenix
Updated Branches:
  refs/heads/encodecolumns2 a65ab0030 -> 9180ce22d


Fix determination of qualifier range


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9180ce22
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9180ce22
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9180ce22

Branch: refs/heads/encodecolumns2
Commit: 9180ce22d2782632b9c96c97c8165554efc98a95
Parents: a65ab00
Author: Samarth 
Authored: Mon Feb 13 22:07:34 2017 -0800
Committer: Samarth 
Committed: Mon Feb 13 22:07:34 2017 -0800

--
 .../UngroupedAggregateRegionObserver.java   |  2 +-
 .../apache/phoenix/util/EncodedColumnsUtil.java | 33 +---
 2 files changed, 15 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9180ce22/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index b6a0a6b..70ef609 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -337,7 +337,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 ColumnReference[] dataColumns = 
IndexUtil.deserializeDataTableColumnsToJoin(scan);
 final TupleProjector p = 
TupleProjector.deserializeProjectorFromScan(scan);
 final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
-boolean useQualifierAsIndex = 
EncodedColumnsUtil.useQualifierAsIndex(EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan))
 && scan.getAttribute(BaseScannerRegionObserver.TOPN) != null;
+boolean useQualifierAsIndex = 
EncodedColumnsUtil.useQualifierAsIndex(EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan));
 if ((localIndexScan && !isDelete && !isDescRowKeyOrderUpgrade) || (j 
== null && p != null)) {
 if (dataColumns != null) {
 tupleProjector = IndexUtil.getTupleProjector(scan, 
dataColumns);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9180ce22/phoenix-core/src/main/java/org/apache/phoenix/util/EncodedColumnsUtil.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/EncodedColumnsUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/EncodedColumnsUtil.java
index b33b085..59e99fd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/EncodedColumnsUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/EncodedColumnsUtil.java
@@ -21,7 +21,9 @@ import static 
com.google.common.base.Preconditions.checkNotNull;
 import static 
org.apache.phoenix.schema.PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS;
 
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.Map;
+import java.util.Map.Entry;
 
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -135,28 +137,21 @@ public class EncodedColumnsUtil {
 public static Map> 
getFamilyQualifierRanges(PTable table) {
 checkNotNull(table);
 QualifierEncodingScheme encodingScheme = table.getEncodingScheme();
-Preconditions.checkArgument(encodingScheme != NON_ENCODED_QUALIFIERS,
-"Use this method only for tables with encoding scheme "
-+ NON_ENCODED_QUALIFIERS);
-Map> toReturn = 
Maps.newHashMapWithExpectedSize(table.getColumns().size());
-for (PColumn column : table.getColumns()) {
-if (!SchemaUtil.isPKColumn(column)) {
-String colFamily = column.getFamilyName().getString();
-Pair minMaxQualifiers = 
toReturn.get(colFamily);
-Integer encodedColumnQualifier = 
encodingScheme.decode(column.getColumnQualifierBytes());
-if (minMaxQualifiers == null) {
-minMaxQualifiers = new Pair<>(encodedColumnQualifier, 
encodedColumnQualifier);
-toReturn.put(colFamily, minMaxQualifiers);
-} else {
-if (encodedColumnQualifier < minMaxQualifiers.getFirst()) {
-minMaxQualifiers.setFirst(encodedColumnQualifier);
-} else if (encodedColumnQualifier > 
minMaxQualifiers.getSecond()) {
- 

Build failed in Jenkins: Phoenix-Calcite #71

2017-02-13 Thread Apache Jenkins Server
See 

Changes:

[maryannxue] PHOENIX-3668 Resolve Date/Time/Timestamp incompatibility in bind

--
[...truncated 72735 lines...]
Caused by: org.apache.hadoop.hbase.client.RetriesExhaustedException: 
Failed after attempts=35, exceptions:
Tue Feb 14 02:05:11 UTC 2017, RpcRetryingCaller{globalStartTime=1487037911565, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:33461 failed on local exception: 
org.apache.hadoop.hbase.ipc.CallTimeoutException: Call id=4, waitTime=101, 
operationTimeout=100 expired.
Tue Feb 14 02:05:11 UTC 2017, RpcRetryingCaller{globalStartTime=1487037911565, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:33461 failed on local exception: 
org.apache.hadoop.hbase.ipc.CallTimeoutException: Call id=6, waitTime=101, 
operationTimeout=100 expired.
Tue Feb 14 02:05:12 UTC 2017, RpcRetryingCaller{globalStartTime=1487037911565, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:33461 failed on local exception: 
org.apache.hadoop.hbase.ipc.CallTimeoutException: Call id=8, waitTime=101, 
operationTimeout=100 expired.
Tue Feb 14 02:05:12 UTC 2017, RpcRetryingCaller{globalStartTime=1487037911565, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:33461 failed on local exception: 
org.apache.hadoop.hbase.ipc.CallTimeoutException: Call id=10, waitTime=101, 
operationTimeout=100 expired.
Tue Feb 14 02:05:14 UTC 2017, RpcRetryingCaller{globalStartTime=1487037911565, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:33461 failed on local exception: 
org.apache.hadoop.hbase.ipc.CallTimeoutException: Call id=12, waitTime=101, 
operationTimeout=100 expired.
Tue Feb 14 02:05:16 UTC 2017, RpcRetryingCaller{globalStartTime=1487037911565, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:33461 failed on local exception: 
org.apache.hadoop.hbase.ipc.CallTimeoutException: Call id=14, waitTime=101, 
operationTimeout=100 expired.
Tue Feb 14 02:05:20 UTC 2017, RpcRetryingCaller{globalStartTime=1487037911565, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:33461 failed on local exception: 
org.apache.hadoop.hbase.ipc.CallTimeoutException: Call id=16, waitTime=101, 
operationTimeout=100 expired.
Tue Feb 14 02:05:30 UTC 2017, RpcRetryingCaller{globalStartTime=1487037911565, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:33461 failed on local exception: 
org.apache.hadoop.hbase.ipc.CallTimeoutException: Call id=18, waitTime=101, 
operationTimeout=100 expired.
Tue Feb 14 02:05:40 UTC 2017, RpcRetryingCaller{globalStartTime=1487037911565, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:33461 failed on local exception: 
org.apache.hadoop.hbase.ipc.CallTimeoutException: Call id=20, waitTime=101, 
operationTimeout=100 expired.
Tue Feb 14 02:05:50 UTC 2017, RpcRetryingCaller{globalStartTime=1487037911565, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:33461 failed on local exception: 
org.apache.hadoop.hbase.ipc.CallTimeoutException: Call id=22, waitTime=102, 
operationTimeout=100 expired.
Tue Feb 14 02:06:00 UTC 2017, RpcRetryingCaller{globalStartTime=1487037911565, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:33461 failed on local exception: 
org.apache.hadoop.hbase.ipc.CallTimeoutException: Call id=24, waitTime=101, 
operationTimeout=100 expired.
Tue Feb 14 02:06:21 UTC 2017, RpcRetryingCaller{globalStartTime=1487037911565, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:33461 failed on local exception: 
org.apache.hadoop.hbase.ipc.CallTimeoutException: Call id=26, waitTime=101, 
operationTimeout=100 expired.
Tue Feb 14 02:06:41 UTC 2017, RpcRetryingCaller{globalStartTime=1487037911565, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:33461 failed on local exception: 
org.apache.hadoop.hbase.ipc.CallTimeoutException: Call id=28, waitTime=101, 
operationTimeout=100 expired.
Tue Feb 14 02:07:01 UTC 2017, RpcRetryingCaller{globalStartTime=1487037911565, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:33461 failed on local exception: 
org.apache.hadoop.hbase.ipc.CallTimeoutException: Call id=30, waitTime=101, 
operationTimeout=100 expired.
Tue Feb 14 02:07:22 UTC 2017, RpcRetryingCaller{globalStartTime=1487037911565, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:33461 failed on local exception: 
org.apache.hadoop.hbase.ipc.CallTimeoutException: Call id=32, waitTime=101, 
operationTimeout=100 expired.

Apache Phoenix - Timeout crawler - Build https://builds.apache.org/job/Phoenix-calcite/71/

2017-02-13 Thread Apache Jenkins Server
[...truncated 83 lines...]
Looking at the log, list of test(s) that timed-out:

Build:
https://builds.apache.org/job/Phoenix-calcite/71/


Affected test class(es):
Set(['org.apache.phoenix.end2end.CastAndCoerceIT', 
'org.apache.phoenix.end2end.QueryIT', 
'org.apache.phoenix.end2end.ClientTimeArithmeticQueryIT', 
'org.apache.phoenix.end2end.index.IndexIT'])


Build step 'Execute shell' marked build as failure
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any


Apache-Phoenix | Phoenix-4.8-HBase-1.2 | Build Successful

2017-02-13 Thread Apache Jenkins Server
Master branch build status Successful
Source repository https://git-wip-us.apache.org/repos/asf?p=phoenix.git;a=shortlog;h=refs/heads/4.8-HBase-1.2

Last Successful Compiled Artifacts https://builds.apache.org/job/Phoenix-4.8-HBase-1.2/lastSuccessfulBuild/artifact/

Last Complete Test Report https://builds.apache.org/job/Phoenix-4.8-HBase-1.2/lastCompletedBuild/testReport/

Changes
No changes


Build times for last couple of runsLatest build time is the right most | Legend blue: normal, red: test failure, gray: timeout


Apache-Phoenix | EncodeColumns | Build Successful

2017-02-13 Thread Apache Jenkins Server
encodecolumns2 branch build status Successful

Source repository https://git-wip-us.apache.org/repos/asf?p=phoenix.git;a=shortlog;h=refs/heads/encodecolumns2

Compiled Artifacts https://builds.apache.org/job/Phoenix-encode-columns/lastSuccessfulBuild/artifact/

Test Report https://builds.apache.org/job/Phoenix-encode-columns/lastCompletedBuild/testReport/

Changes
[samarth] PHOENIX-3666 Make use of EncodedColumnQualifierCellsList for all column



Build times for last couple of runsLatest build time is the right most | Legend blue: normal, red: test failure, gray: timeout


Apache-Phoenix | 4.x-HBase-0.98 | Build Successful

2017-02-13 Thread Apache Jenkins Server
4.x-HBase-0.98 branch build status Successful

Source repository https://git-wip-us.apache.org/repos/asf?p=phoenix.git;a=shortlog;h=refs/heads/4.x-HBase-0.98

Compiled Artifacts https://builds.apache.org/job/Phoenix-4.x-HBase-0.98/lastSuccessfulBuild/artifact/

Test Report https://builds.apache.org/job/Phoenix-4.x-HBase-0.98/lastCompletedBuild/testReport/

Changes
[tdsilva] PHOENIX-3660 Don't pass statement properties while adding columns to a

[apurtell] PHOENIX-3661 Make phoenix tool select file system dynamically (Yishan



Build times for last couple of runsLatest build time is the right most | Legend blue: normal, red: test failure, gray: timeout


phoenix git commit: PHOENIX-3668 Resolve Date/Time/Timestamp incompatibility in bind variables

2017-02-13 Thread maryannxue
Repository: phoenix
Updated Branches:
  refs/heads/calcite 553dc9727 -> 84d92bffe


PHOENIX-3668 Resolve Date/Time/Timestamp incompatibility in bind variables


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/84d92bff
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/84d92bff
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/84d92bff

Branch: refs/heads/calcite
Commit: 84d92bffecc1e1cf20c56f2691b7d6ecae7bb7ab
Parents: 553dc97
Author: maryannxue 
Authored: Mon Feb 13 16:50:08 2017 -0800
Committer: maryannxue 
Committed: Mon Feb 13 16:50:08 2017 -0800

--
 .../calcite/jdbc/PhoenixCalciteFactory.java | 31 
 .../apache/phoenix/calcite/CalciteUtils.java|  4 +--
 .../jdbc/PhoenixCalciteEmbeddedDriver.java  |  2 +-
 3 files changed, 34 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/84d92bff/phoenix-core/src/main/java/org/apache/calcite/jdbc/PhoenixCalciteFactory.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/calcite/jdbc/PhoenixCalciteFactory.java 
b/phoenix-core/src/main/java/org/apache/calcite/jdbc/PhoenixCalciteFactory.java
index 5c15025..15d8b83 100644
--- 
a/phoenix-core/src/main/java/org/apache/calcite/jdbc/PhoenixCalciteFactory.java
+++ 
b/phoenix-core/src/main/java/org/apache/calcite/jdbc/PhoenixCalciteFactory.java
@@ -4,6 +4,7 @@ import java.io.File;
 import java.io.InputStream;
 import java.io.Reader;
 import java.sql.DatabaseMetaData;
+import java.sql.Date;
 import java.sql.NClob;
 import java.sql.ResultSetMetaData;
 import java.sql.RowId;
@@ -11,6 +12,8 @@ import java.sql.SQLException;
 import java.sql.SQLFeatureNotSupportedException;
 import java.sql.SQLXML;
 import java.sql.Savepoint;
+import java.sql.Time;
+import java.sql.Timestamp;
 import java.sql.ResultSet;
 import java.util.Calendar;
 import java.util.List;
@@ -381,6 +384,34 @@ public class PhoenixCalciteFactory extends CalciteFactory {
 }
 }
 
+public void setTimestamp(int parameterIndex, Timestamp x, Calendar 
calendar)
+throws SQLException {
+if (x != null) {
+x = new Timestamp(getAdjustedTime(x.getTime(), calendar));
+}
+super.setTimestamp(parameterIndex, x, calendar);
+}
+
+public void setDate(int parameterIndex, Date x, Calendar calendar)
+throws SQLException {
+if (x != null) {
+x = new Date(getAdjustedTime(x.getTime(), calendar));
+}
+super.setDate(parameterIndex, x, calendar);
+}
+
+public void setTime(int parameterIndex, Time x, Calendar calendar)
+throws SQLException {
+if (x != null) {
+x = new Time(getAdjustedTime(x.getTime(), calendar));
+}
+super.setTime(parameterIndex, x, calendar);
+}
+
+private long getAdjustedTime(long v, Calendar calendar) {
+return (v - calendar.getTimeZone().getOffset(v));
+}
+
 public void setRowId(
 int parameterIndex,
 RowId x) throws SQLException {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/84d92bff/phoenix-core/src/main/java/org/apache/phoenix/calcite/CalciteUtils.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/calcite/CalciteUtils.java 
b/phoenix-core/src/main/java/org/apache/phoenix/calcite/CalciteUtils.java
index c9f072a..d1ca972 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/calcite/CalciteUtils.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/calcite/CalciteUtils.java
@@ -295,7 +295,7 @@ public class CalciteUtils {
ExpressionFactory eFactory = EXPRESSION_MAP.get(node.getKind());
if (eFactory == null) {
throw new UnsupportedOperationException("Unsupported 
RexNode: "
-   + node);
+   + node.getKind());
}
return eFactory;
}
@@ -1169,7 +1169,7 @@ public class CalciteUtils {
 
 public static SQLException unwrapSqlException(SQLException root){
 Exception e = root;
-while(e.getCause() != null){
+while(e.getCause() instanceof Exception){
 e = (Exception) e.getCause();
 if(e instanceof RuntimeException && e.getCause() instanceof 
SQLException) {
 return (SQLException) e.getCause();


phoenix git commit: PHOENIX-3661 Make phoenix tool select file system dynamically (Yishan Yang) [Forced Update!]

2017-02-13 Thread apurtell
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.1 222388b03 -> 07df91700 (forced update)


PHOENIX-3661 Make phoenix tool select file system dynamically (Yishan Yang)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/07df9170
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/07df9170
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/07df9170

Branch: refs/heads/4.x-HBase-1.1
Commit: 07df9170080adf715612fe8739d6011f51ae8cb6
Parents: dbb0c1e
Author: Andrew Purtell 
Authored: Mon Feb 13 15:24:01 2017 -0800
Committer: Andrew Purtell 
Committed: Mon Feb 13 15:51:35 2017 -0800

--
 .../apache/phoenix/mapreduce/AbstractBulkLoadTool.java  |  2 +-
 .../phoenix/mapreduce/MultiHfileOutputFormat.java   |  2 +-
 .../org/apache/phoenix/mapreduce/index/IndexTool.java   | 12 
 3 files changed, 10 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/07df9170/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
index b32f9c6..f717647 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
@@ -331,7 +331,7 @@ public abstract class AbstractBulkLoadTool extends 
Configured implements Tool {
 LOG.info("Loading HFiles from {}", outputPath);
 completebulkload(conf,outputPath,tablesToBeLoaded);
 LOG.info("Removing output directory {}", outputPath);
-if(!FileSystem.get(conf).delete(outputPath, true)) {
+if(!outputPath.getFileSystem(conf).delete(outputPath, true)) {
 LOG.error("Failed to delete the output directory {}", 
outputPath);
 }
 return 0;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/07df9170/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
index 35a2bd8..da78fd5 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
@@ -455,8 +455,8 @@ public class MultiHfileOutputFormat extends 
FileOutputFormat

[3/4] phoenix git commit: PHOENIX-3661 Make phoenix tool select file system dynamically (Yishan Yang)

2017-02-13 Thread apurtell
PHOENIX-3661 Make phoenix tool select file system dynamically (Yishan Yang)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f48aa81a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f48aa81a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f48aa81a

Branch: refs/heads/4.x-HBase-0.98
Commit: f48aa81a02f5e8830dc821d23618f579453ab733
Parents: 234e427
Author: Andrew Purtell 
Authored: Mon Feb 13 15:24:01 2017 -0800
Committer: Andrew Purtell 
Committed: Mon Feb 13 15:25:37 2017 -0800

--
 .../apache/phoenix/mapreduce/AbstractBulkLoadTool.java  |  2 +-
 .../phoenix/mapreduce/MultiHfileOutputFormat.java   |  2 +-
 .../org/apache/phoenix/mapreduce/index/IndexTool.java   | 12 
 3 files changed, 10 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f48aa81a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
index f7b7d22..9cb54ef 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
@@ -328,7 +328,7 @@ public abstract class AbstractBulkLoadTool extends 
Configured implements Tool {
 LOG.info("Loading HFiles from {}", outputPath);
 completebulkload(conf,outputPath,tablesToBeLoaded);
 LOG.info("Removing output directory {}", outputPath);
-if(!FileSystem.get(conf).delete(outputPath, true)) {
+if(!outputPath.getFileSystem(conf).delete(outputPath, true)) {
 LOG.error("Failed to delete the output directory {}", 
outputPath);
 }
 return 0;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f48aa81a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
index f48a690..9c19a52 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
@@ -454,8 +454,8 @@ public class MultiHfileOutputFormat extends 
FileOutputFormat

[1/4] phoenix git commit: Amend PHOENIX-3611 ConnectionQueryService should expire LRU entries

2017-02-13 Thread apurtell
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 234e427b3 -> f48aa81a0
  refs/heads/4.x-HBase-1.1 dbb0c1ea0 -> 222388b03
  refs/heads/master 8f2d0fbc5 -> 7567fcd6d


Amend PHOENIX-3611 ConnectionQueryService should expire LRU entries

Signed-off-by: Andrew Purtell 

Do not enforce a maximum size on the client connection cache.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/beea861b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/beea861b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/beea861b

Branch: refs/heads/4.x-HBase-1.1
Commit: beea861be4dab1f2bcb61e7c97f1ac8de742af74
Parents: dbb0c1e
Author: gjacoby 
Authored: Wed Jan 25 13:49:26 2017 -0800
Committer: Andrew Purtell 
Committed: Mon Feb 13 15:22:59 2017 -0800

--
 .../src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java  | 3 ---
 .../src/main/java/org/apache/phoenix/query/QueryServices.java | 1 -
 .../main/java/org/apache/phoenix/query/QueryServicesOptions.java  | 1 -
 3 files changed, 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/beea861b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
index b2acacf..67ac9c9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
@@ -149,8 +149,6 @@ public final class PhoenixDriver extends 
PhoenixEmbeddedDriver {
 
 private Cache 
initializeConnectionCache() {
 Configuration config = 
HBaseFactoryProvider.getConfigurationFactory().getConfiguration();
-int maxCacheSize = 
config.getInt(QueryServices.CLIENT_CONNECTION_CACHE_MAX_SIZE,
-QueryServicesOptions.DEFAULT_CLIENT_CONNECTION_CACHE_MAX_SIZE);
 int maxCacheDuration = 
config.getInt(QueryServices.CLIENT_CONNECTION_CACHE_MAX_DURATION_MILLISECONDS,
 QueryServicesOptions.DEFAULT_CLIENT_CONNECTION_CACHE_MAX_DURATION);
 RemovalListener 
cacheRemovalListener =
@@ -170,7 +168,6 @@ public final class PhoenixDriver extends 
PhoenixEmbeddedDriver {
 }
 };
 return CacheBuilder.newBuilder()
-.maximumSize(maxCacheSize)
 .expireAfterAccess(maxCacheDuration, TimeUnit.MILLISECONDS)
 .removalListener(cacheRemovalListener)
 .build();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/beea861b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index 2035de8..0307e4c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -237,7 +237,6 @@ public interface QueryServices extends SQLCloseable {
 public static final String CLIENT_CACHE_ENCODING = 
"phoenix.table.client.cache.encoding";
 public static final String AUTO_UPGRADE_ENABLED = 
"phoenix.autoupgrade.enabled";
 
-public static final String CLIENT_CONNECTION_CACHE_MAX_SIZE = 
"phoenix.client.connection.cache.max.size";
 public static final String 
CLIENT_CONNECTION_CACHE_MAX_DURATION_MILLISECONDS =
 "phoenix.client.connection.max.duration";
 /**

http://git-wip-us.apache.org/repos/asf/phoenix/blob/beea861b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index de0796f..39a7d7e 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -265,7 +265,6 @@ public class QueryServicesOptions {
 
 public static final String DEFAULT_CLIENT_CACHE_ENCODING = 
PTableRefFactory.Encoding.OBJECT.toString();
 public static final boolean DEFAULT_AUTO_UPGRADE_ENABLED = true;
-public static final int DEFAULT_CLIENT_CONNECTION_CACHE_MAX_SIZE = 100;
 public static final int DEFAULT_CLIENT_CONNECTION_CACHE_MAX_DURATION = 
8640;
 
 

phoenix git commit: PHOENIX-3666 Make use of EncodedColumnQualifierCellsList for all column name mapping schemes

2017-02-13 Thread samarth
Repository: phoenix
Updated Branches:
  refs/heads/encodecolumns2 e89337f83 -> a65ab0030


PHOENIX-3666 Make use of EncodedColumnQualifierCellsList for all column name 
mapping schemes


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a65ab003
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a65ab003
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a65ab003

Branch: refs/heads/encodecolumns2
Commit: a65ab0030eb50315d8e82948bcf334fc41cea575
Parents: e89337f
Author: Samarth 
Authored: Mon Feb 13 15:51:10 2017 -0800
Committer: Samarth 
Committed: Mon Feb 13 15:51:10 2017 -0800

--
 .../apache/phoenix/query/QueryConstants.java| 22 +++
 .../java/org/apache/phoenix/schema/PName.java   | 26 
 .../java/org/apache/phoenix/schema/PTable.java  | 62 +++-
 .../apache/phoenix/util/EncodedColumnsUtil.java |  9 ++-
 .../util/QualifierEncodingSchemeTest.java   | 10 
 5 files changed, 77 insertions(+), 52 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a65ab003/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
index 9f4a569..6f105f1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
@@ -48,6 +48,7 @@ import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.GUIDE_POSTS_ROW_CO
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.GUIDE_POST_KEY;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IMMUTABLE_ROWS;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IMMUTABLE_STORAGE_SCHEME;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INCREMENT_BY;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INDEX_STATE;
@@ -88,7 +89,6 @@ import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SOURCE_DATA_TYPE;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SQL_DATA_TYPE;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SQL_DATETIME_SUB;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.START_WITH;
-import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IMMUTABLE_STORAGE_SCHEME;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.STORE_NULLS;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE;
@@ -108,7 +108,6 @@ import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_CONSTANT;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_STATEMENT;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE;
-import static 
org.apache.phoenix.schema.PTable.QualifierEncodingScheme.FOUR_BYTE_QUALIFIERS;
 
 import java.math.BigDecimal;
 
@@ -122,6 +121,7 @@ import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.schema.MetaDataSplitPolicy;
 import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PNameFactory;
+import org.apache.phoenix.schema.PTable.QualifierEncodingScheme;
 import org.apache.phoenix.schema.SortOrder;
 
 
@@ -161,23 +161,19 @@ public interface QueryConstants {
 
 /** BEGIN Set of reserved column qualifiers **/
 
-public static final String RESERVED_COLUMN_FAMILY = "_r";
+public static final String RESERVED_COLUMN_FAMILY = "_v";
 public static final byte[] RESERVED_COLUMN_FAMILY_BYTES = 
Bytes.toBytes(RESERVED_COLUMN_FAMILY);
 
 public static final byte[] VALUE_COLUMN_FAMILY = 
RESERVED_COLUMN_FAMILY_BYTES;
-//TODO: samarth think about the implication of using the four byte scheme 
here. Can we just
-// get away with storing them in a single byte? We would need to make our 
encoding scheme
-// cognizant of the fact that all bytes may not be available making them 
interoperable.
-// In other words allow upper casting but not downcasting.
-public static final byte[] VALUE_COLUMN_QUALIFIER = 
FOUR_BYTE_QUALIFIERS.encode(1);
+public static final byte[] VALUE_COLUMN_QUALIFIER = 
QualifierEncodingScheme.FOUR_BYTE_QUALIFIERS.encode(1);
 
 public static final byte[] ARRAY_VALUE_COLUMN_FAMILY = 

[4/4] phoenix git commit: PHOENIX-3661 Make phoenix tool select file system dynamically (Yishan Yang)

2017-02-13 Thread apurtell
PHOENIX-3661 Make phoenix tool select file system dynamically (Yishan Yang)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7567fcd6
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7567fcd6
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7567fcd6

Branch: refs/heads/master
Commit: 7567fcd6d569a2ece7556c4e3a966a1baf34c3a5
Parents: 8f2d0fb
Author: Andrew Purtell 
Authored: Mon Feb 13 15:24:01 2017 -0800
Committer: Andrew Purtell 
Committed: Mon Feb 13 15:26:17 2017 -0800

--
 .../apache/phoenix/mapreduce/AbstractBulkLoadTool.java  |  2 +-
 .../phoenix/mapreduce/MultiHfileOutputFormat.java   |  2 +-
 .../org/apache/phoenix/mapreduce/index/IndexTool.java   | 12 
 3 files changed, 10 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/7567fcd6/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
index b32f9c6..f717647 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
@@ -331,7 +331,7 @@ public abstract class AbstractBulkLoadTool extends 
Configured implements Tool {
 LOG.info("Loading HFiles from {}", outputPath);
 completebulkload(conf,outputPath,tablesToBeLoaded);
 LOG.info("Removing output directory {}", outputPath);
-if(!FileSystem.get(conf).delete(outputPath, true)) {
+if(!outputPath.getFileSystem(conf).delete(outputPath, true)) {
 LOG.error("Failed to delete the output directory {}", 
outputPath);
 }
 return 0;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7567fcd6/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
index 35a2bd8..da78fd5 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
@@ -455,8 +455,8 @@ public class MultiHfileOutputFormat extends 
FileOutputFormat

[2/4] phoenix git commit: PHOENIX-3661 Make phoenix tool select file system dynamically (Yishan Yang)

2017-02-13 Thread apurtell
PHOENIX-3661 Make phoenix tool select file system dynamically (Yishan Yang)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/222388b0
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/222388b0
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/222388b0

Branch: refs/heads/4.x-HBase-1.1
Commit: 222388b03415caad37d858d1cd91fe79be571787
Parents: beea861
Author: Andrew Purtell 
Authored: Mon Feb 13 15:24:01 2017 -0800
Committer: Andrew Purtell 
Committed: Mon Feb 13 15:25:30 2017 -0800

--
 .../apache/phoenix/mapreduce/AbstractBulkLoadTool.java  |  2 +-
 .../phoenix/mapreduce/MultiHfileOutputFormat.java   |  2 +-
 .../org/apache/phoenix/mapreduce/index/IndexTool.java   | 12 
 3 files changed, 10 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/222388b0/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
index b32f9c6..f717647 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
@@ -331,7 +331,7 @@ public abstract class AbstractBulkLoadTool extends 
Configured implements Tool {
 LOG.info("Loading HFiles from {}", outputPath);
 completebulkload(conf,outputPath,tablesToBeLoaded);
 LOG.info("Removing output directory {}", outputPath);
-if(!FileSystem.get(conf).delete(outputPath, true)) {
+if(!outputPath.getFileSystem(conf).delete(outputPath, true)) {
 LOG.error("Failed to delete the output directory {}", 
outputPath);
 }
 return 0;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/222388b0/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
index 35a2bd8..da78fd5 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
@@ -455,8 +455,8 @@ public class MultiHfileOutputFormat extends 
FileOutputFormat

Jenkins build is back to normal : Phoenix-encode-columns #58

2017-02-13 Thread Apache Jenkins Server
See 



phoenix git commit: PHOENIX-3660 Don't pass statement properties while adding columns to a table that already exists that had APPEND_ONLY_SCHEMA=true

2017-02-13 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 f11237c8e -> 234e427b3


PHOENIX-3660 Don't pass statement properties while adding columns to a table 
that already exists that had APPEND_ONLY_SCHEMA=true


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/234e427b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/234e427b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/234e427b

Branch: refs/heads/4.x-HBase-0.98
Commit: 234e427b31b8b00a95e7d7dd1e5f143dce20dd16
Parents: f11237c
Author: Thomas D'Silva 
Authored: Mon Feb 13 13:35:59 2017 -0800
Committer: Thomas D'Silva 
Committed: Mon Feb 13 14:34:55 2017 -0800

--
 .../phoenix/end2end/AppendOnlySchemaIT.java | 28 +++-
 .../apache/phoenix/schema/MetaDataClient.java   |  3 ++-
 2 files changed, 23 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/234e427b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java
index 7ed64ff..e9a20b3 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java
@@ -81,23 +81,37 @@ public class AppendOnlySchemaIT extends 
ParallelStatsDisabledIT {
 // create sequence for auto partition
 conn1.createStatement().execute("CREATE SEQUENCE " + 
metricIdSeqTableName + " CACHE 1");
 // create base table
-conn1.createStatement().execute("CREATE TABLE "+ metricTableName + 
"(metricId INTEGER NOT NULL, metricVal DOUBLE, CONSTRAINT PK PRIMARY 
KEY(metricId))"
-+ " APPEND_ONLY_SCHEMA = true, UPDATE_CACHE_FREQUENCY=1, 
AUTO_PARTITION_SEQ=" + metricIdSeqTableName);
+String ddl = "CREATE TABLE " + (notExists ? "IF NOT EXISTS " : "") 
+ metricTableName + "(metricId INTEGER NOT NULL, metricVal DOUBLE, CONSTRAINT 
PK PRIMARY KEY(metricId))"
++ " APPEND_ONLY_SCHEMA = true, UPDATE_CACHE_FREQUENCY=1, 
AUTO_PARTITION_SEQ=" + metricIdSeqTableName;
+   conn1.createStatement().execute(ddl);
+   // execute same create ddl
+try {
+conn2.createStatement().execute(ddl);
+if (!notExists) {
+fail("Create Table should fail");
+}
+}
+catch (TableAlreadyExistsException e) {
+if (notExists) {
+fail("Create Table should not fail");
+}
+}
+   
 // create view
-String ddl =
+String viewDDL =
 "CREATE VIEW " + (notExists ? "IF NOT EXISTS " : "")
 + viewName + " ( hostName varchar NOT NULL, 
tagName varChar"
 + " CONSTRAINT HOSTNAME_PK PRIMARY KEY (hostName))"
 + " AS SELECT * FROM " + metricTableName
 + " UPDATE_CACHE_FREQUENCY=30";
-conn1.createStatement().execute(ddl);
+conn1.createStatement().execute(viewDDL);
 conn1.createStatement().execute("UPSERT INTO " + viewName + 
"(hostName, metricVal) VALUES('host1', 1.0)");
 conn1.commit();
 reset(connectionQueryServices);
 
 // execute same create ddl
 try {
-conn2.createStatement().execute(ddl);
+conn2.createStatement().execute(viewDDL);
 if (!notExists) {
 fail("Create Table should fail");
 }
@@ -118,9 +132,9 @@ public class AppendOnlySchemaIT extends 
ParallelStatsDisabledIT {
 reset(connectionQueryServices);
 
 // execute alter table ddl that adds the same column
-ddl = "ALTER VIEW " + viewName + " ADD " + (notExists ? "IF NOT 
EXISTS" : "") + " tagName varchar";
+viewDDL = "ALTER VIEW " + viewName + " ADD " + (notExists ? "IF 
NOT EXISTS" : "") + " tagName varchar";
 try {
-conn2.createStatement().execute(ddl);
+conn2.createStatement().execute(viewDDL);
 if (!notExists) {
 fail("Alter Table should fail");
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/234e427b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java

Build failed in Jenkins: Phoenix-encode-columns #57

2017-02-13 Thread Apache Jenkins Server
See 

Changes:

[tdsilva] PHOENIX-3446 Parameterize tests for different encoding and storage

--
[...truncated 37255 lines...]
at 
org.apache.phoenix.end2end.StatsCollectorIT.testNoDuplicatesAfterUpdateStatsWithSplits(StatsCollectorIT.java:271)
Caused by: java.lang.Exception: Thrift error for 
org.apache.tephra.distributed.TransactionServiceClient$2@124161d7: Unable to 
discover tx service.
at 
org.apache.phoenix.end2end.StatsCollectorIT.testNoDuplicatesAfterUpdateStats(StatsCollectorIT.java:251)
at 
org.apache.phoenix.end2end.StatsCollectorIT.testNoDuplicatesAfterUpdateStatsWithSplits(StatsCollectorIT.java:271)
Caused by: org.apache.thrift.TException: Unable to discover tx service.
at 
org.apache.phoenix.end2end.StatsCollectorIT.testNoDuplicatesAfterUpdateStats(StatsCollectorIT.java:251)
at 
org.apache.phoenix.end2end.StatsCollectorIT.testNoDuplicatesAfterUpdateStatsWithSplits(StatsCollectorIT.java:271)

testUpdateEmptyStats[columnEncoded = true, mutable = true, transactional = 
false, isUserTableNamespaceMapped = 
true](org.apache.phoenix.end2end.StatsCollectorIT)  Time elapsed: 46.121 sec  
<<< ERROR!
java.lang.RuntimeException: java.lang.Exception: Thrift error for 
org.apache.tephra.distributed.TransactionServiceClient$2@9aa60e5: Unable to 
discover tx service.
at 
org.apache.phoenix.end2end.StatsCollectorIT.testUpdateEmptyStats(StatsCollectorIT.java:165)
Caused by: java.lang.Exception: Thrift error for 
org.apache.tephra.distributed.TransactionServiceClient$2@9aa60e5: Unable to 
discover tx service.
at 
org.apache.phoenix.end2end.StatsCollectorIT.testUpdateEmptyStats(StatsCollectorIT.java:165)
Caused by: org.apache.thrift.TException: Unable to discover tx service.
at 
org.apache.phoenix.end2end.StatsCollectorIT.testUpdateEmptyStats(StatsCollectorIT.java:165)

testWithMultiCF[columnEncoded = true, mutable = true, transactional = false, 
isUserTableNamespaceMapped = true](org.apache.phoenix.end2end.StatsCollectorIT) 
 Time elapsed: 46.118 sec  <<< ERROR!
java.lang.RuntimeException: java.lang.Exception: Thrift error for 
org.apache.tephra.distributed.TransactionServiceClient$2@cb09d70: Unable to 
discover tx service.
at 
org.apache.phoenix.end2end.StatsCollectorIT.testWithMultiCF(StatsCollectorIT.java:511)
Caused by: java.lang.Exception: Thrift error for 
org.apache.tephra.distributed.TransactionServiceClient$2@cb09d70: Unable to 
discover tx service.
at 
org.apache.phoenix.end2end.StatsCollectorIT.testWithMultiCF(StatsCollectorIT.java:511)
Caused by: org.apache.thrift.TException: Unable to discover tx service.
at 
org.apache.phoenix.end2end.StatsCollectorIT.testWithMultiCF(StatsCollectorIT.java:511)

testRowCountAndByteCounts[columnEncoded = true, mutable = true, transactional = 
true, isUserTableNamespaceMapped = 
false](org.apache.phoenix.end2end.StatsCollectorIT)  Time elapsed: 46.162 sec  
<<< ERROR!
java.lang.RuntimeException: java.lang.Exception: Thrift error for 
org.apache.tephra.distributed.TransactionServiceClient$2@2966e80c: Unable to 
discover tx service.
at 
org.apache.phoenix.end2end.StatsCollectorIT.testRowCountAndByteCounts(StatsCollectorIT.java:608)
Caused by: java.lang.Exception: Thrift error for 
org.apache.tephra.distributed.TransactionServiceClient$2@2966e80c: Unable to 
discover tx service.
at 
org.apache.phoenix.end2end.StatsCollectorIT.testRowCountAndByteCounts(StatsCollectorIT.java:608)
Caused by: org.apache.thrift.TException: Unable to discover tx service.
at 
org.apache.phoenix.end2end.StatsCollectorIT.testRowCountAndByteCounts(StatsCollectorIT.java:608)

testSomeUpdateEmptyStats[columnEncoded = true, mutable = true, transactional = 
true, isUserTableNamespaceMapped = 
false](org.apache.phoenix.end2end.StatsCollectorIT)  Time elapsed: 46.148 sec  
<<< ERROR!
java.lang.RuntimeException: java.lang.Exception: Thrift error for 
org.apache.tephra.distributed.TransactionServiceClient$2@2042a235: Unable to 
discover tx service.
at 
org.apache.phoenix.end2end.StatsCollectorIT.testSomeUpdateEmptyStats(StatsCollectorIT.java:181)
Caused by: java.lang.Exception: Thrift error for 
org.apache.tephra.distributed.TransactionServiceClient$2@2042a235: Unable to 
discover tx service.
at 
org.apache.phoenix.end2end.StatsCollectorIT.testSomeUpdateEmptyStats(StatsCollectorIT.java:181)
Caused by: org.apache.thrift.TException: Unable to discover tx service.
at 
org.apache.phoenix.end2end.StatsCollectorIT.testSomeUpdateEmptyStats(StatsCollectorIT.java:181)

testUpdateStatsWithMultipleTables[columnEncoded = true, mutable = true, 
transactional = true, isUserTableNamespaceMapped = 
false](org.apache.phoenix.end2end.StatsCollectorIT)  Time elapsed: 46.155 sec  
<<< ERROR!
java.lang.RuntimeException: java.lang.Exception: Thrift error for 

[3/3] phoenix git commit: PHOENIX-3446 Parameterize tests for different encoding and storage schemes

2017-02-13 Thread tdsilva
PHOENIX-3446 Parameterize tests for different encoding and storage schemes


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e89337f8
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e89337f8
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e89337f8

Branch: refs/heads/encodecolumns2
Commit: e89337f836922b50ab265f3009ca6cf2a07401ca
Parents: 3b6709d
Author: Thomas D'Silva 
Authored: Tue Feb 7 12:55:32 2017 -0800
Committer: Thomas D'Silva 
Committed: Mon Feb 13 11:43:32 2017 -0800

--
 .../phoenix/end2end/AggregateQueryIT.java   |  47 +-
 .../org/apache/phoenix/end2end/BaseJoinIT.java  |   4 +-
 .../org/apache/phoenix/end2end/BaseQueryIT.java |  59 +-
 .../apache/phoenix/end2end/CaseStatementIT.java |  37 +-
 .../apache/phoenix/end2end/CastAndCoerceIT.java |   4 +-
 .../end2end/ClientTimeArithmeticQueryIT.java|   4 +-
 .../end2end/CountDistinctCompressionIT.java |   2 +-
 .../org/apache/phoenix/end2end/DateTimeIT.java  |   2 +-
 .../apache/phoenix/end2end/DerivedTableIT.java  |   2 +-
 .../apache/phoenix/end2end/DistinctCountIT.java |   4 +-
 .../phoenix/end2end/ExtendedQueryExecIT.java|   8 +-
 .../apache/phoenix/end2end/FunkyNamesIT.java|   2 +-
 .../org/apache/phoenix/end2end/GroupByIT.java   | 124 +---
 .../apache/phoenix/end2end/MutableQueryIT.java  | 424 ++
 .../phoenix/end2end/NativeHBaseTypesIT.java |   2 +-
 .../org/apache/phoenix/end2end/NotQueryIT.java  |   4 +-
 .../apache/phoenix/end2end/PercentileIT.java|   4 +-
 .../phoenix/end2end/PointInTimeQueryIT.java |  64 +-
 .../phoenix/end2end/ProductMetricsIT.java   |   2 +-
 .../end2end/QueryDatabaseMetaDataIT.java|  16 +-
 .../org/apache/phoenix/end2end/QueryIT.java |  50 +-
 .../phoenix/end2end/ReadIsolationLevelIT.java   |   2 +-
 .../phoenix/end2end/RowValueConstructorIT.java  |  34 +-
 .../org/apache/phoenix/end2end/ScanQueryIT.java |  61 +-
 .../java/org/apache/phoenix/end2end/TopNIT.java |   6 +-
 .../apache/phoenix/end2end/UpsertSelectIT.java  |  10 +-
 .../apache/phoenix/end2end/UpsertValuesIT.java  |   6 +-
 .../phoenix/end2end/VariableLengthPKIT.java |  38 +-
 .../phoenix/end2end/index/ImmutableIndexIT.java |  20 +-
 .../apache/phoenix/end2end/index/IndexIT.java   |  30 +-
 .../phoenix/end2end/index/MutableIndexIT.java   |  17 +-
 .../phoenix/end2end/salted/SaltedTableIT.java   |   2 +-
 .../phoenix/tx/ParameterizedTransactionIT.java  | 518 
 .../org/apache/phoenix/tx/TransactionIT.java| 587 +++
 .../org/apache/phoenix/tx/TxCheckpointIT.java   |  42 +-
 .../compile/TupleProjectionCompiler.java|   7 +-
 .../java/org/apache/phoenix/query/BaseTest.java |  33 +-
 37 files changed, 1252 insertions(+), 1026 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e89337f8/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateQueryIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateQueryIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateQueryIT.java
index c689373..f017c00 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateQueryIT.java
@@ -22,7 +22,6 @@ import static org.apache.phoenix.util.TestUtil.A_VALUE;
 import static org.apache.phoenix.util.TestUtil.B_VALUE;
 import static org.apache.phoenix.util.TestUtil.C_VALUE;
 import static org.apache.phoenix.util.TestUtil.E_VALUE;
-import static org.apache.phoenix.util.TestUtil.ROW3;
 import static org.apache.phoenix.util.TestUtil.ROW4;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
@@ -47,50 +46,8 @@ import org.junit.Test;
 
 public class AggregateQueryIT extends BaseQueryIT {
 
-public AggregateQueryIT(String indexDDL) {
-super(indexDDL);
-}
-
-@Test
-public void testSumOverNullIntegerColumn() throws Exception {
-String query = "SELECT sum(a_integer) FROM aTable a";
-Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts 
+ 20));
-Connection conn = DriverManager.getConnection(getUrl(), props);
-conn.setAutoCommit(true);
-conn.createStatement().execute("UPSERT INTO 
atable(organization_id,entity_id,a_integer) VALUES('" + getOrganizationId() + 
"','" + ROW3 + "',NULL)");
-props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts 
+ 30));
-Connection conn1 = DriverManager.getConnection(getUrl(), props);
-analyzeTable(conn1, "ATABLE");
-

[2/3] phoenix git commit: PHOENIX-3446 Parameterize tests for different encoding and storage schemes

2017-02-13 Thread tdsilva
http://git-wip-us.apache.org/repos/asf/phoenix/blob/e89337f8/phoenix-core/src/it/java/org/apache/phoenix/end2end/ScanQueryIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ScanQueryIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ScanQueryIT.java
index 9b28bad..b553816 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ScanQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ScanQueryIT.java
@@ -20,7 +20,6 @@ package org.apache.phoenix.end2end;
 import static org.apache.phoenix.util.TestUtil.A_VALUE;
 import static org.apache.phoenix.util.TestUtil.B_VALUE;
 import static org.apache.phoenix.util.TestUtil.C_VALUE;
-import static org.apache.phoenix.util.TestUtil.E_VALUE;
 import static org.apache.phoenix.util.TestUtil.ROW1;
 import static org.apache.phoenix.util.TestUtil.ROW2;
 import static org.apache.phoenix.util.TestUtil.ROW3;
@@ -39,10 +38,8 @@ import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
-import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashSet;
-import java.util.List;
 import java.util.Properties;
 import java.util.Set;
 
@@ -53,7 +50,6 @@ import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
 
-import com.google.common.collect.Lists;
 import com.google.common.primitives.Doubles;
 import com.google.common.primitives.Floats;
 
@@ -66,8 +62,8 @@ public class ScanQueryIT extends BaseQueryIT {
 return QueryIT.data();
 }
 
-public ScanQueryIT(String indexDDL) {
-super(indexDDL);
+public ScanQueryIT(String indexDDL, boolean mutable, boolean 
columnEncoded) {
+super(indexDDL, mutable, columnEncoded);
 }
 
 @Test
@@ -440,57 +436,4 @@ public class ScanQueryIT extends BaseQueryIT {
 conn.close();
 }
 }
-
-@SuppressWarnings("unchecked")
-@Test
-public void testPointInTimeLimitedScan() throws Exception {
-// Override value that was set at creation time
-String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" 
+ (ts + 1); // Run query at timestamp 5
-Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-Connection upsertConn = DriverManager.getConnection(url, props);
-String upsertStmt =
-"upsert into " +
-"ATABLE(" +
-"ORGANIZATION_ID, " +
-"ENTITY_ID, " +
-"A_INTEGER) " +
-"VALUES (?, ?, ?)";
-upsertConn.setAutoCommit(true); // Test auto commit
-// Insert all rows at ts
-PreparedStatement stmt = upsertConn.prepareStatement(upsertStmt);
-stmt.setString(1, tenantId);
-stmt.setString(2, ROW1);
-stmt.setInt(3, 6);
-stmt.execute(); // should commit too
-upsertConn.close();
-
-// Override value again, but should be ignored since it's past the SCN
-url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 
3);
-upsertConn = DriverManager.getConnection(url, props);
-upsertConn.setAutoCommit(true); // Test auto commit
-// Insert all rows at ts
-stmt = upsertConn.prepareStatement(upsertStmt);
-stmt.setString(1, tenantId);
-stmt.setString(2, ROW1);
-stmt.setInt(3, 0);
-stmt.execute(); // should commit too
-upsertConn.close();
-
-String query = "SELECT a_integer,b_string FROM atable WHERE 
organization_id=? and a_integer <= 5 limit 2";
-props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts 
+ 2));
-Connection conn = DriverManager.getConnection(getUrl(), props);
-PreparedStatement statement = conn.prepareStatement(query);
-statement.setString(1, tenantId);
-ResultSet rs = statement.executeQuery();
-List expectedResultsA = Lists.newArrayList(
-Arrays.asList(2, C_VALUE),
-Arrays.asList( 3, E_VALUE));
-List expectedResultsB = Lists.newArrayList(
-Arrays.asList( 5, C_VALUE),
-Arrays.asList(4, B_VALUE));
-// Since we're not ordering and we may be using a descending index, we 
don't
-// know which rows we'll get back.
-assertOneOfValuesEqualsResultSet(rs, 
expectedResultsA,expectedResultsB);
-   conn.close();
-}
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e89337f8/phoenix-core/src/it/java/org/apache/phoenix/end2end/TopNIT.java
--
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TopNIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TopNIT.java
index ca1cd86..39e8cb6 100644
--- 

[1/3] phoenix git commit: PHOENIX-3446 Parameterize tests for different encoding and storage schemes

2017-02-13 Thread tdsilva
Repository: phoenix
Updated Branches:
  refs/heads/encodecolumns2 3b6709d05 -> e89337f83


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e89337f8/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
--
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
index 0521159..5fa4dfc 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
@@ -26,337 +26,110 @@ import static org.junit.Assert.fail;
 
 import java.sql.Connection;
 import java.sql.DatabaseMetaData;
-import java.sql.Date;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
-import java.util.List;
 import java.util.Properties;
 
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.coprocessor.PhoenixTransactionalProcessor;
 import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.QueryConstants;
-import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
-import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableKey;
-import org.apache.phoenix.schema.types.PInteger;
-import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.StringUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.apache.tephra.TxConstants;
-import org.junit.Ignore;
 import org.junit.Test;
 
-import com.google.common.collect.Lists;
+public class TransactionIT  extends ParallelStatsDisabledIT {
 
-public class TransactionIT extends ParallelStatsDisabledIT {
-
 @Test
-public void testReadOwnWrites() throws Exception {
-String transTableName = generateUniqueName();
-String fullTableName = INDEX_DATA_SCHEMA + 
QueryConstants.NAME_SEPARATOR + transTableName;
-String selectSql = "SELECT * FROM "+ fullTableName;
-try (Connection conn = DriverManager.getConnection(getUrl())) {
-TestUtil.createTransactionalTable(conn, fullTableName);
-conn.setAutoCommit(false);
-ResultSet rs = conn.createStatement().executeQuery(selectSql);
-assertFalse(rs.next());
-
-String upsert = "UPSERT INTO " + fullTableName + "(varchar_pk, 
char_pk, int_pk, long_pk, decimal_pk, date_pk) VALUES(?, ?, ?, ?, ?, ?)";
-PreparedStatement stmt = conn.prepareStatement(upsert);
-// upsert two rows
-TestUtil.setRowKeyColumns(stmt, 1);
-stmt.execute();
-TestUtil.setRowKeyColumns(stmt, 2);
-stmt.execute();
-
-// verify rows can be read even though commit has not been called
-rs = conn.createStatement().executeQuery(selectSql);
-TestUtil.validateRowKeyColumns(rs, 1);
-TestUtil.validateRowKeyColumns(rs, 2);
-assertFalse(rs.next());
-
-conn.commit();
-
-// verify rows can be read after commit
-rs = conn.createStatement().executeQuery(selectSql);
-TestUtil.validateRowKeyColumns(rs, 1);
-TestUtil.validateRowKeyColumns(rs, 2);
-assertFalse(rs.next());
-}
+public void testReCreateTxnTableAfterDroppingExistingNonTxnTable() throws 
SQLException {
+String tableName = generateUniqueName();
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+Connection conn = DriverManager.getConnection(getUrl(), props);
+conn.setAutoCommit(false);
+Statement stmt = conn.createStatement();
+stmt.execute("CREATE TABLE " + tableName + "(k VARCHAR PRIMARY KEY, v1 
VARCHAR, v2 VARCHAR)");
+stmt.execute("DROP TABLE " + tableName);
+stmt.execute("CREATE TABLE " + tableName + "(k VARCHAR PRIMARY KEY, v1 
VARCHAR, v2 VARCHAR) TRANSACTIONAL=true");
+stmt.execute("CREATE INDEX " + tableName + "_IDX ON " + tableName + " 
(v1) INCLUDE(v2)");
+assertTrue(conn.unwrap(PhoenixConnection.class).getTable(new 
PTableKey(null, tableName)).isTransactional());
+assertTrue(conn.unwrap(PhoenixConnection.class).getTable(new 
PTableKey(null,  tableName + "_IDX")).isTransactional());
 }
 
 @Test
-public 

Jenkins build is back to normal : Phoenix | 4.x-HBase-0.98 #1441

2017-02-13 Thread Apache Jenkins Server
See 



Build failed in Jenkins: Phoenix-4.x-HBase-1.1 #326

2017-02-13 Thread Apache Jenkins Server
See 

Changes:

[jmahonin] PHOENIX-3600 Core MapReduce classes don't provide location info

[jmahonin] PHOENIX-3601 PhoenixRDD doesn't expose the preferred node locations 
to

--
[...truncated 817 lines...]
Running org.apache.phoenix.end2end.SequenceIT
Running org.apache.phoenix.end2end.SequenceBulkAllocationIT
Tests run: 119, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 69.973 sec - 
in org.apache.phoenix.end2end.ScanQueryIT
Tests run: 56, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 32.096 sec - 
in org.apache.phoenix.end2end.SequenceBulkAllocationIT
Running org.apache.phoenix.end2end.TopNIT
Tests run: 126, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 69.975 sec - 
in org.apache.phoenix.end2end.QueryIT
Running org.apache.phoenix.end2end.TruncateFunctionIT
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 9.74 sec - in 
org.apache.phoenix.end2end.TopNIT
Running org.apache.phoenix.end2end.UpsertSelectIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 10.221 sec - in 
org.apache.phoenix.end2end.TruncateFunctionIT
Running org.apache.phoenix.end2end.UpsertValuesIT
Tests run: 54, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 76.736 sec - 
in org.apache.phoenix.end2end.SequenceIT
Running org.apache.phoenix.end2end.VariableLengthPKIT
Running org.apache.phoenix.end2end.ToNumberFunctionIT
Tests run: 18, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 10.608 sec - 
in org.apache.phoenix.end2end.ToNumberFunctionIT
Running org.apache.phoenix.end2end.salted.SaltedTableIT
Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 10.076 sec - in 
org.apache.phoenix.end2end.salted.SaltedTableIT
Running org.apache.phoenix.rpc.UpdateCacheWithScnIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 11.448 sec - in 
org.apache.phoenix.rpc.UpdateCacheWithScnIT
Tests run: 19, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 178.492 sec - 
in org.apache.phoenix.end2end.QueryDatabaseMetaDataIT
Tests run: 50, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 67.67 sec - in 
org.apache.phoenix.end2end.VariableLengthPKIT
Tests run: 46, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 179.222 sec - 
in org.apache.phoenix.end2end.RowValueConstructorIT
Tests run: 25, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 152.632 sec - 
in org.apache.phoenix.end2end.UpsertValuesIT
Tests run: 22, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 191.362 sec - 
in org.apache.phoenix.end2end.UpsertSelectIT

Results :

Tests run: 1359, Failures: 0, Errors: 0, Skipped: 0

[INFO] 
[INFO] --- maven-failsafe-plugin:2.19.1:integration-test 
(HBaseManagedTimeTests) @ phoenix-core ---

---
 T E S T S
---

Results :

Tests run: 0, Failures: 0, Errors: 0, Skipped: 0

[INFO] 
[INFO] --- maven-failsafe-plugin:2.19.1:integration-test 
(NeedTheirOwnClusterTests) @ phoenix-core ---

---
 T E S T S
---
Running 
org.apache.hadoop.hbase.regionserver.wal.WALReplayWithIndexWritesAndCompressedWALIT
Running org.apache.phoenix.end2end.ConnectionUtilIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 33.111 sec - in 
org.apache.hadoop.hbase.regionserver.wal.WALReplayWithIndexWritesAndCompressedWALIT
Running org.apache.phoenix.end2end.CsvBulkLoadToolIT
Running org.apache.phoenix.end2end.AutomaticRebuildIT
Running org.apache.phoenix.end2end.CountDistinctCompressionIT
Running org.apache.phoenix.end2end.ContextClassloaderIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 37.582 sec - in 
org.apache.phoenix.end2end.ConnectionUtilIT
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 7.475 sec - in 
org.apache.phoenix.end2end.ContextClassloaderIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 14.076 sec - in 
org.apache.phoenix.end2end.CountDistinctCompressionIT
Running org.apache.phoenix.end2end.FlappingLocalIndexIT
Running org.apache.phoenix.end2end.IndexToolForPartialBuildIT
Running org.apache.phoenix.end2end.IndexExtendedIT
Running 
org.apache.phoenix.end2end.IndexToolForPartialBuildWithNamespaceEnabledIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 34.946 sec - in 
org.apache.phoenix.end2end.IndexToolForPartialBuildIT
Tests run: 10, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 95.87 sec - in 
org.apache.phoenix.end2end.CsvBulkLoadToolIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 61.472 sec - in 
org.apache.phoenix.end2end.IndexToolForPartialBuildWithNamespaceEnabledIT
Running org.apache.phoenix.end2end.QueryTimeoutIT
Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 102.601 sec - 
in org.apache.phoenix.end2end.FlappingLocalIndexIT
Running 

Apache Phoenix - Timeout crawler - Build https://builds.apache.org/job/Phoenix-master/1550/

2017-02-13 Thread Apache Jenkins Server
[...truncated 16 lines...]


phoenix git commit: PHOENIX-3600 Addendum to rework classes for HBase 0.98

2017-02-13 Thread jmahonin
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 c1027f17f -> f11237c8e


PHOENIX-3600 Addendum to rework classes for HBase 0.98


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f11237c8
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f11237c8
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f11237c8

Branch: refs/heads/4.x-HBase-0.98
Commit: f11237c8e191ce97c2f21ff8e8faeb2aa8e2b6d8
Parents: c1027f1
Author: Josh Mahonin 
Authored: Mon Feb 13 11:27:55 2017 -0500
Committer: Josh Mahonin 
Committed: Mon Feb 13 11:27:55 2017 -0500

--
 .../apache/phoenix/mapreduce/PhoenixInputFormat.java   | 13 +++--
 1 file changed, 7 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f11237c8/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
index 14f7b94..595d623 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
@@ -94,17 +94,18 @@ public class PhoenixInputFormat 
extends InputFormat psplits = 
Lists.newArrayListWithExpectedSize(splits.size());
 for (List scans : qplan.getScans()) {
 // Get the region location
-HRegionLocation location = regionLocator.getRegionLocation(
+HRegionLocation location = connection.getRegionLocation(
+TableName.valueOf(tableName),
 scans.get(0).getStartRow(),
 false
 );



Build failed in Jenkins: Phoenix | 4.x-HBase-0.98 #1440

2017-02-13 Thread Apache Jenkins Server
See 

Changes:

[jmahonin] PHOENIX-3600 Core MapReduce classes don't provide location info

[jmahonin] PHOENIX-3601 PhoenixRDD doesn't expose the preferred node locations 
to

--
[...truncated 34911 lines...]
[INFO] --- maven-dependency-plugin:2.1:build-classpath 
(create-phoenix-generated-classpath) @ phoenix-core ---
Downloading: 
https://repo.maven.apache.org/maven2/org/apache/maven/shared/maven-dependency-analyzer/1.1/maven-dependency-analyzer-1.1.pom
3/5 KB  
5/5 KB   
 
Downloaded: 
https://repo.maven.apache.org/maven2/org/apache/maven/shared/maven-dependency-analyzer/1.1/maven-dependency-analyzer-1.1.pom
 (5 KB at 40.3 KB/sec)
Downloading: 
https://repo.maven.apache.org/maven2/org/apache/maven/shared/maven-dependency-analyzer/1.1/maven-dependency-analyzer-1.1.jar
Downloading: 
https://repo.maven.apache.org/maven2/plexus/plexus-utils/1.0.2/plexus-utils-1.0.2.jar
3/157 KB   
3/27 KB   3/157 KB   
3/27 KB   5/157 KB   
5/27 KB   5/157 KB   
5/27 KB   8/157 KB   
5/27 KB   11/157 KB   
8/27 KB   11/157 KB   
8/27 KB   13/157 KB   
8/27 KB   16/157 KB   
8/27 KB   19/157 KB   
8/27 KB   21/157 KB   
8/27 KB   24/157 KB   
8/27 KB   27/157 KB   
11/27 KB   27/157 KB   
11/27 KB   29/157 KB   
11/27 KB   32/157 KB   
13/27 KB   32/157 KB   
13/27 KB   36/157 KB   
13/27 KB   40/157 KB   
13/27 KB   44/157 KB   
13/27 KB   48/157 KB   
16/27 KB   48/157 KB   
16/27 KB   52/157 KB   
16/27 KB   56/157 KB   
16/27 KB   60/157 KB   
16/27 KB   64/157 KB   
16/27 KB   68/157 KB   
16/27 KB   72/157 KB   
19/27 KB   72/157 KB   
19/27 KB   76/157 KB   
19/27 KB   80/157 KB   
21/27 KB   80/157 KB   
21/27 KB   84/157 KB   
21/27 KB   88/157 KB   
21/27 KB   92/157 KB   
21/27 KB   96/157 KB   
24/27 KB   96/157 KB   
24/27 KB   100/157 KB   
24/27 KB   104/157 KB   
24/27 KB   108/157 KB   
24/27 KB   112/157 KB   
24/27 KB   116/157 KB   
24/27 KB   120/157 KB   
27/27 KB   120/157 KB   
27/27 KB   124/157 KB   
27/27 KB   124/157 KB   
27/27 KB   128/157 KB   
27/27 KB   132/157 KB   
27/27 KB   136/157 KB   
27/27 KB   140/157 KB   
27/27 KB   144/157 KB   
27/27 KB   148/157 KB   
27/27 KB   152/157 KB   
27/27 KB   156/157 KB   
27/27 KB   157/157 KB   


Downloaded: 
https://repo.maven.apache.org/maven2/org/apache/maven/shared/maven-dependency-analyzer/1.1/maven-dependency-analyzer-1.1.jar
 (27 KB at 235.8 KB/sec)
Downloaded: 
https://repo.maven.apache.org/maven2/plexus/plexus-utils/1.0.2/plexus-utils-1.0.2.jar
 (157 KB at 1411.0 KB/sec)
[INFO] Wrote classpath file 
'
[INFO] 
[INFO] --- maven-remote-resources-plugin:1.5:process (default) @ phoenix-core 
---
Downloading: 
https://repository.apache.org/content/repositories/releases/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar
Downloading: 
https://repository.apache.org/content/repositories/releases/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar


Downloading: 
https://repo.maven.apache.org/maven2/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar
Downloading: 
https://repo.maven.apache.org/maven2/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar
3/9 KB  
5/9 KB   
8/9 KB   
9/9 KB   
9/9 KB   3/128 KB   
9/9 KB   5/128 KB   
9/9 KB   8/128 KB   
9/9 KB   11/128 KB   
9/9 KB   13/128 KB   
9/9 KB   16/128 KB   
9/9 KB   19/128 KB   
9/9 KB   21/128 KB   
9/9 KB   24/128 KB   
9/9 KB   27/128 KB   
9/9 KB   29/128 KB   
9/9 KB   32/128 KB   
9/9 KB   36/128 KB   
9/9 KB   40/128 KB   
9/9 KB   44/128 KB   
9/9 KB   48/128 KB   
9/9 KB   52/128 KB   
9/9 KB   56/128 KB   
9/9 KB   60/128 KB   
9/9 KB   64/128 KB   
9/9 KB   68/128 KB   
9/9 KB   72/128 KB   
9/9 KB   76/128 KB   
9/9 KB   80/128 KB   
9/9 KB   84/128 KB   
9/9 KB   88/128 KB   
9/9 KB   92/128 KB   
9/9 KB   96/128 KB   
9/9 KB   100/128 KB   
9/9 KB   104/128 KB   
9/9 KB   108/128 KB   
9/9 KB   112/128 KB   
9/9 KB   116/128 KB   
9/9 KB   120/128 KB   
9/9 KB   124/128 KB   
9/9 KB   128/128 KB   
9/9 KB   128/128 KB   
  
Downloaded: 
https://repo.maven.apache.org/maven2/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar
 (9 KB at 84.9 KB/sec)
  
Downloaded: 
https://repo.maven.apache.org/maven2/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar
 (128 KB at 965.2 KB/sec)
[INFO] 
[INFO] --- maven-resources-plugin:2.6:resources (default-resources) @ 
phoenix-core ---
[INFO] Using 'UTF-8' encoding to copy filtered resources.
[INFO] Copying 1 resource to META-INF/services
[INFO] Copying 3 resources
[INFO] 
[INFO] --- maven-compiler-plugin:3.0:compile (default-compile) @ phoenix-core 
---
Downloading: 

[2/2] phoenix git commit: PHOENIX-3601 PhoenixRDD doesn't expose the preferred node locations to Spark

2017-02-13 Thread jmahonin
PHOENIX-3601 PhoenixRDD doesn't expose the preferred node locations to Spark


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c1027f17
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c1027f17
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c1027f17

Branch: refs/heads/4.x-HBase-0.98
Commit: c1027f17facad8870835b880767312e15be7e651
Parents: e1b1cd8
Author: Josh Mahonin 
Authored: Mon Feb 13 10:58:02 2017 -0500
Committer: Josh Mahonin 
Committed: Mon Feb 13 11:04:45 2017 -0500

--
 .../src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c1027f17/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
--
diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
index 01a9077..63547d2 100644
--- a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
@@ -55,6 +55,10 @@ class PhoenixRDD(sc: SparkContext, table: String, columns: 
Seq[String],
 phoenixRDD.partitions
   }
 
+  override protected def getPreferredLocations(split: Partition): Seq[String] 
= {
+phoenixRDD.preferredLocations(split)
+  }
+
   @DeveloperApi
   override def compute(split: Partition, context: TaskContext) = {
 phoenixRDD.compute(split, context).map(r => r._2)



[1/2] phoenix git commit: PHOENIX-3600 Core MapReduce classes don't provide location info

2017-02-13 Thread jmahonin
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.1 dc3536e74 -> dbb0c1ea0


PHOENIX-3600 Core MapReduce classes don't provide location info

This mostly just ports the same functionality in the phoenix-hive MR
classes to the main classes. Adds a new configuration parameter
'phoenix.mapreduce.split.by.stats', defaulting to true, to create
input splits based off the scans provided by statistics, not just the
region locations.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/fc4f9783
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/fc4f9783
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/fc4f9783

Branch: refs/heads/4.x-HBase-1.1
Commit: fc4f97832b32c3c25cd8ad0a89a36393da32608b
Parents: dc3536e
Author: Josh Mahonin 
Authored: Mon Feb 13 10:55:06 2017 -0500
Committer: Josh Mahonin 
Committed: Mon Feb 13 11:03:33 2017 -0500

--
 .../phoenix/mapreduce/PhoenixInputFormat.java   | 69 ++--
 .../phoenix/mapreduce/PhoenixInputSplit.java| 23 ++-
 .../util/PhoenixConfigurationUtil.java  | 11 
 3 files changed, 96 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc4f9783/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
index df96c7b..14f7b94 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
@@ -21,14 +21,18 @@ import java.io.IOException;
 import java.sql.Connection;
 import java.sql.SQLException;
 import java.sql.Statement;
+import java.util.Collections;
 import java.util.List;
 import java.util.Properties;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.*;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.RegionSizeCalculator;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.InputFormat;
 import org.apache.hadoop.mapreduce.InputSplit;
@@ -42,6 +46,7 @@ import 
org.apache.phoenix.iterate.MapReduceParallelScanGrouper;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.mapreduce.util.ConnectionUtil;
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
+import org.apache.phoenix.mapreduce.util.PhoenixMapReduceUtil;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.util.PhoenixRuntime;
 
@@ -80,16 +85,72 @@ public class PhoenixInputFormat 
extends InputFormat allSplits = queryPlan.getSplits();
-final List splits = generateSplits(queryPlan,allSplits);
+final List splits = generateSplits(queryPlan, allSplits, 
configuration);
 return splits;
 }
 
-private List generateSplits(final QueryPlan qplan, final 
List splits) throws IOException {
+private List generateSplits(final QueryPlan qplan, final 
List splits, Configuration config) throws IOException {
 Preconditions.checkNotNull(qplan);
 Preconditions.checkNotNull(splits);
+
+// Get the RegionSizeCalculator
+org.apache.hadoop.hbase.client.Connection connection = 
ConnectionFactory.createConnection(config);
+RegionLocator regionLocator = 
connection.getRegionLocator(TableName.valueOf(qplan
+.getTableRef().getTable().getPhysicalName().toString()));
+RegionSizeCalculator sizeCalculator = new 
RegionSizeCalculator(regionLocator, connection
+.getAdmin());
+
+
 final List psplits = 
Lists.newArrayListWithExpectedSize(splits.size());
 for (List scans : qplan.getScans()) {
-psplits.add(new PhoenixInputSplit(scans));
+// Get the region location
+HRegionLocation location = regionLocator.getRegionLocation(
+scans.get(0).getStartRow(),
+false
+);
+
+String regionLocation = location.getHostname();
+
+// Get the region size
+long regionSize = sizeCalculator.getRegionSize(
+location.getRegionInfo().getRegionName()
+);
+
+// Generate splits based off statistics, or just region splits?
+boolean splitByStats = 

[1/2] phoenix git commit: PHOENIX-3600 Core MapReduce classes don't provide location info

2017-02-13 Thread jmahonin
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 44dc576cf -> c1027f17f


PHOENIX-3600 Core MapReduce classes don't provide location info

This mostly just ports the same functionality in the phoenix-hive MR
classes to the main classes. Adds a new configuration parameter
'phoenix.mapreduce.split.by.stats', defaulting to true, to create
input splits based off the scans provided by statistics, not just the
region locations.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e1b1cd87
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e1b1cd87
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e1b1cd87

Branch: refs/heads/4.x-HBase-0.98
Commit: e1b1cd8733d7adfca3a17899630c73881af187f1
Parents: 44dc576
Author: Josh Mahonin 
Authored: Mon Feb 13 10:55:06 2017 -0500
Committer: Josh Mahonin 
Committed: Mon Feb 13 11:04:40 2017 -0500

--
 .../phoenix/mapreduce/PhoenixInputFormat.java   | 69 ++--
 .../phoenix/mapreduce/PhoenixInputSplit.java| 23 ++-
 .../util/PhoenixConfigurationUtil.java  | 11 
 3 files changed, 96 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e1b1cd87/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
index df96c7b..14f7b94 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
@@ -21,14 +21,18 @@ import java.io.IOException;
 import java.sql.Connection;
 import java.sql.SQLException;
 import java.sql.Statement;
+import java.util.Collections;
 import java.util.List;
 import java.util.Properties;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.*;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.RegionSizeCalculator;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.InputFormat;
 import org.apache.hadoop.mapreduce.InputSplit;
@@ -42,6 +46,7 @@ import 
org.apache.phoenix.iterate.MapReduceParallelScanGrouper;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.mapreduce.util.ConnectionUtil;
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
+import org.apache.phoenix.mapreduce.util.PhoenixMapReduceUtil;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.util.PhoenixRuntime;
 
@@ -80,16 +85,72 @@ public class PhoenixInputFormat 
extends InputFormat allSplits = queryPlan.getSplits();
-final List splits = generateSplits(queryPlan,allSplits);
+final List splits = generateSplits(queryPlan, allSplits, 
configuration);
 return splits;
 }
 
-private List generateSplits(final QueryPlan qplan, final 
List splits) throws IOException {
+private List generateSplits(final QueryPlan qplan, final 
List splits, Configuration config) throws IOException {
 Preconditions.checkNotNull(qplan);
 Preconditions.checkNotNull(splits);
+
+// Get the RegionSizeCalculator
+org.apache.hadoop.hbase.client.Connection connection = 
ConnectionFactory.createConnection(config);
+RegionLocator regionLocator = 
connection.getRegionLocator(TableName.valueOf(qplan
+.getTableRef().getTable().getPhysicalName().toString()));
+RegionSizeCalculator sizeCalculator = new 
RegionSizeCalculator(regionLocator, connection
+.getAdmin());
+
+
 final List psplits = 
Lists.newArrayListWithExpectedSize(splits.size());
 for (List scans : qplan.getScans()) {
-psplits.add(new PhoenixInputSplit(scans));
+// Get the region location
+HRegionLocation location = regionLocator.getRegionLocation(
+scans.get(0).getStartRow(),
+false
+);
+
+String regionLocation = location.getHostname();
+
+// Get the region size
+long regionSize = sizeCalculator.getRegionSize(
+location.getRegionInfo().getRegionName()
+);
+
+// Generate splits based off statistics, or just region splits?
+boolean splitByStats = 

[2/2] phoenix git commit: PHOENIX-3601 PhoenixRDD doesn't expose the preferred node locations to Spark

2017-02-13 Thread jmahonin
PHOENIX-3601 PhoenixRDD doesn't expose the preferred node locations to Spark


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/dbb0c1ea
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/dbb0c1ea
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/dbb0c1ea

Branch: refs/heads/4.x-HBase-1.1
Commit: dbb0c1ea0ec59c3ae2e6ce7391c289b621bd42dd
Parents: fc4f978
Author: Josh Mahonin 
Authored: Mon Feb 13 10:58:02 2017 -0500
Committer: Josh Mahonin 
Committed: Mon Feb 13 11:03:42 2017 -0500

--
 .../src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/dbb0c1ea/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
--
diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
index 01a9077..63547d2 100644
--- a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
@@ -55,6 +55,10 @@ class PhoenixRDD(sc: SparkContext, table: String, columns: 
Seq[String],
 phoenixRDD.partitions
   }
 
+  override protected def getPreferredLocations(split: Partition): Seq[String] 
= {
+phoenixRDD.preferredLocations(split)
+  }
+
   @DeveloperApi
   override def compute(split: Partition, context: TaskContext) = {
 phoenixRDD.compute(split, context).map(r => r._2)



[2/2] phoenix git commit: PHOENIX-3601 PhoenixRDD doesn't expose the preferred node locations to Spark

2017-02-13 Thread jmahonin
PHOENIX-3601 PhoenixRDD doesn't expose the preferred node locations to Spark


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8f2d0fbc
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8f2d0fbc
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8f2d0fbc

Branch: refs/heads/master
Commit: 8f2d0fbc5e4d14dc04c2491d78cea1a4b93be0b7
Parents: 267323d
Author: Josh Mahonin 
Authored: Mon Feb 13 10:58:02 2017 -0500
Committer: Josh Mahonin 
Committed: Mon Feb 13 10:58:02 2017 -0500

--
 .../src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/8f2d0fbc/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
--
diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
index 01a9077..63547d2 100644
--- a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
@@ -55,6 +55,10 @@ class PhoenixRDD(sc: SparkContext, table: String, columns: 
Seq[String],
 phoenixRDD.partitions
   }
 
+  override protected def getPreferredLocations(split: Partition): Seq[String] 
= {
+phoenixRDD.preferredLocations(split)
+  }
+
   @DeveloperApi
   override def compute(split: Partition, context: TaskContext) = {
 phoenixRDD.compute(split, context).map(r => r._2)



[1/2] phoenix git commit: PHOENIX-3600 Core MapReduce classes don't provide location info

2017-02-13 Thread jmahonin
Repository: phoenix
Updated Branches:
  refs/heads/master 41d6349bd -> 8f2d0fbc5


PHOENIX-3600 Core MapReduce classes don't provide location info

This mostly just ports the same functionality in the phoenix-hive MR
classes to the main classes. Adds a new configuration parameter
'phoenix.mapreduce.split.by.stats', defaulting to true, to create
input splits based off the scans provided by statistics, not just the
region locations.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/267323da
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/267323da
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/267323da

Branch: refs/heads/master
Commit: 267323da8242fb6f0953c1a75cf96c5fde3d49ed
Parents: 41d6349
Author: Josh Mahonin 
Authored: Mon Feb 13 10:55:06 2017 -0500
Committer: Josh Mahonin 
Committed: Mon Feb 13 10:55:06 2017 -0500

--
 .../phoenix/mapreduce/PhoenixInputFormat.java   | 69 ++--
 .../phoenix/mapreduce/PhoenixInputSplit.java| 23 ++-
 .../util/PhoenixConfigurationUtil.java  | 11 
 3 files changed, 96 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/267323da/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
index df96c7b..14f7b94 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
@@ -21,14 +21,18 @@ import java.io.IOException;
 import java.sql.Connection;
 import java.sql.SQLException;
 import java.sql.Statement;
+import java.util.Collections;
 import java.util.List;
 import java.util.Properties;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.*;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.RegionSizeCalculator;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.InputFormat;
 import org.apache.hadoop.mapreduce.InputSplit;
@@ -42,6 +46,7 @@ import 
org.apache.phoenix.iterate.MapReduceParallelScanGrouper;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.mapreduce.util.ConnectionUtil;
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
+import org.apache.phoenix.mapreduce.util.PhoenixMapReduceUtil;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.util.PhoenixRuntime;
 
@@ -80,16 +85,72 @@ public class PhoenixInputFormat 
extends InputFormat allSplits = queryPlan.getSplits();
-final List splits = generateSplits(queryPlan,allSplits);
+final List splits = generateSplits(queryPlan, allSplits, 
configuration);
 return splits;
 }
 
-private List generateSplits(final QueryPlan qplan, final 
List splits) throws IOException {
+private List generateSplits(final QueryPlan qplan, final 
List splits, Configuration config) throws IOException {
 Preconditions.checkNotNull(qplan);
 Preconditions.checkNotNull(splits);
+
+// Get the RegionSizeCalculator
+org.apache.hadoop.hbase.client.Connection connection = 
ConnectionFactory.createConnection(config);
+RegionLocator regionLocator = 
connection.getRegionLocator(TableName.valueOf(qplan
+.getTableRef().getTable().getPhysicalName().toString()));
+RegionSizeCalculator sizeCalculator = new 
RegionSizeCalculator(regionLocator, connection
+.getAdmin());
+
+
 final List psplits = 
Lists.newArrayListWithExpectedSize(splits.size());
 for (List scans : qplan.getScans()) {
-psplits.add(new PhoenixInputSplit(scans));
+// Get the region location
+HRegionLocation location = regionLocator.getRegionLocation(
+scans.get(0).getStartRow(),
+false
+);
+
+String regionLocation = location.getHostname();
+
+// Get the region size
+long regionSize = sizeCalculator.getRegionSize(
+location.getRegionInfo().getRegionName()
+);
+
+// Generate splits based off statistics, or just region splits?
+boolean splitByStats = 
PhoenixConfigurationUtil.getSplitByStats(config);
+
+  

Jenkins build is back to normal : Phoenix Compile Compatibility with HBase #202

2017-02-13 Thread Apache Jenkins Server
See