[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5298 Branch 4.x-HBase-1.5 should include Apache snapshots in dependency resolution

2019-05-24 Thread apurtell
This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new 55af6da  PHOENIX-5298 Branch 4.x-HBase-1.5 should include Apache 
snapshots in dependency resolution
55af6da is described below

commit 55af6da21849941820ff58df97d41e68d4c07384
Author: Andrew Purtell 
AuthorDate: Fri May 24 14:24:11 2019 -0700

PHOENIX-5298 Branch 4.x-HBase-1.5 should include Apache snapshots in 
dependency resolution

Include Apache snapshots repository until 1.5.0 is released.
---
 pom.xml | 4 
 1 file changed, 4 insertions(+)

diff --git a/pom.xml b/pom.xml
index dfc2bf5..504c75c 100644
--- a/pom.xml
+++ b/pom.xml
@@ -36,6 +36,10 @@
   apache release
   https://repository.apache.org/content/repositories/releases/
 
+
+  apache.snapshots
+  https://repository.apache.org/snapshots/
+
   
 
   



[phoenix] 01/01: PHOENIX-5277 Fixups for interface changes in HBase 1.5

2019-05-13 Thread apurtell
This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 75d3bc0518aea777f665367ea5043a04bd098461
Author: Andrew Purtell 
AuthorDate: Fri May 10 10:44:23 2019 -0700

PHOENIX-5277 Fixups for interface changes in HBase 1.5
---
 dev/test-patch.properties  |  3 +-
 phoenix-assembly/pom.xml   |  2 +-
 phoenix-client/pom.xml |  2 +-
 phoenix-core/pom.xml   |  2 +-
 .../hadoop/hbase/ipc/PhoenixRpcScheduler.java  | 18 ++--
 .../coprocessor/PhoenixAccessController.java   | 24 +--
 .../PhoenixMetaDataCoprocessorHost.java| 34 +-
 phoenix-pherf/pom.xml  |  2 +-
 phoenix-server/pom.xml |  2 +-
 phoenix-tracing-webapp/pom.xml |  2 +-
 pom.xml|  4 +--
 11 files changed, 59 insertions(+), 36 deletions(-)

diff --git a/dev/test-patch.properties b/dev/test-patch.properties
index b3dc46f..73ebbe1 100644
--- a/dev/test-patch.properties
+++ b/dev/test-patch.properties
@@ -27,8 +27,7 @@ MAX_LINE_LENGTH=100
 # All supported branches for testing with precommit build
 # be sure to consider branch name prefixes in the order, ie, 4.x should appear
 # before 4 since the latter is a prefix
-BRANCH_NAMES="4.x-HBase-1.2 4.x-HBase-1.3 4.x-HBase-1.4 master"
-
+BRANCH_NAMES="4.x-HBase-1.3 4.x-HBase-1.4 4.x-HBase-1.5 master"
 
 # All supported Hadoop versions that we want to test the compilation with
 HADOOP2_VERSIONS="2.6.5 2.7.3 2.8.0"
diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index 2ee0ad8..2991ff1 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.15.0-HBase-1.4-SNAPSHOT
+4.15.0-HBase-1.5-SNAPSHOT
   
   phoenix-assembly
   Phoenix Assembly
diff --git a/phoenix-client/pom.xml b/phoenix-client/pom.xml
index 9143b9a..15b6ddc 100644
--- a/phoenix-client/pom.xml
+++ b/phoenix-client/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.15.0-HBase-1.4-SNAPSHOT
+4.15.0-HBase-1.5-SNAPSHOT
   
   phoenix-client
   Phoenix Client
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 12b291c..e08445f 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -4,7 +4,7 @@
   
 org.apache.phoenix
 phoenix
-4.15.0-HBase-1.4-SNAPSHOT
+4.15.0-HBase-1.5-SNAPSHOT
   
   phoenix-core
   Phoenix Core
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
index 888084d..cb293ae 100644
--- 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
+++ 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
@@ -164,6 +164,20 @@ public class PhoenixRpcScheduler extends RpcScheduler {
 public int getActiveScanRpcHandlerCount() {
 return delegate.getActiveScanRpcHandlerCount();
 }
-
-
+
+@Override
+public int getActiveGeneralRpcHandlerCount() {
+return delegate.getActiveGeneralRpcHandlerCount();
+}
+
+@Override
+public int getActivePriorityRpcHandlerCount() {
+return delegate.getActivePriorityRpcHandlerCount();
+}
+
+@Override
+public int getActiveReplicationRpcHandlerCount() {
+return delegate.getActiveReplicationRpcHandlerCount();
+}
+
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
index 1ef38bd..59d3036 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
@@ -138,7 +138,7 @@ public class PhoenixAccessController extends 
BaseMetaDataEndpointObserver {
 htd.addFamily(new HColumnDescriptor(familyName));
 }
 for (BaseMasterAndRegionObserver observer : 
getAccessControllers()) {
-observer.preCreateTable(new 
ObserverContext(), htd, null);
+observer.preCreateTable(new 
ObserverContext(getActiveUser()), htd, null);
 }
 }
 
@@ -317,11 +317,11 @@ public class PhoenixAccessController extends 
BaseMetaDataEndpointObserver {
 
 for (BaseMasterAndRegionObserver observer : getAccessControllers()) {
 if (tableType != PTableType.VIEW) {
-observer.preDeleteTable(new 
ObserverContext(), physicalTableName);
+observer.preDeleteTable(new 
ObserverContext(getAct

[phoenix] branch 4.x-HBase-1.5 created (now 75d3bc0)

2019-05-13 Thread apurtell
This is an automated email from the ASF dual-hosted git repository.

apurtell pushed a change to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


  at 75d3bc0  PHOENIX-5277 Fixups for interface changes in HBase 1.5

This branch includes the following new commits:

 new 75d3bc0  PHOENIX-5277 Fixups for interface changes in HBase 1.5

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.




phoenix git commit: PHOENIX-4231 Support restriction of remote UDF load sources

2018-03-14 Thread apurtell
Repository: phoenix
Updated Branches:
  refs/heads/5.x-HBase-2.0 3f892a4ed -> f49c8a724


PHOENIX-4231 Support restriction of remote UDF load sources

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f49c8a72
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f49c8a72
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f49c8a72

Branch: refs/heads/5.x-HBase-2.0
Commit: f49c8a7240f3ea705c6e07ec43df9a8d9577ad83
Parents: 3f892a4
Author: aertoria 
Authored: Tue Feb 27 15:02:07 2018 -0800
Committer: Andrew Purtell 
Committed: Wed Mar 14 14:55:03 2018 -0700

--
 .../phoenix/end2end/UserDefinedFunctionsIT.java | 127 +--
 .../expression/function/UDFExpression.java  |  20 +--
 2 files changed, 120 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f49c8a72/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
index f58f750..943119d 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
@@ -27,15 +27,12 @@ import static 
org.apache.phoenix.util.PhoenixRuntime.PHOENIX_TEST_DRIVER_URL_PAR
 import static org.apache.phoenix.util.TestUtil.JOIN_ITEM_TABLE_FULL_NAME;
 import static org.apache.phoenix.util.TestUtil.JOIN_SUPPLIER_TABLE_FULL_NAME;
 import static org.apache.phoenix.util.TestUtil.LOCALHOST;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
 
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
+import java.io.IOException;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
@@ -53,6 +50,10 @@ import java.util.jar.Manifest;
 import javax.tools.JavaCompiler;
 import javax.tools.ToolProvider;
 
+import org.apache.commons.lang.exception.ExceptionUtils;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -66,13 +67,15 @@ import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.junit.After;
+import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.Rule;
 import org.junit.Test;
 
 import com.google.common.collect.Maps;
+import org.junit.rules.TestName;
 
 public class UserDefinedFunctionsIT extends BaseOwnClusterIT {
-
 protected static final String TENANT_ID = "ZZTop";
 private static String url;
 private static PhoenixTestDriver driver;
@@ -190,10 +193,36 @@ public class UserDefinedFunctionsIT extends 
BaseOwnClusterIT {
 private static String GETY_CLASSNAME_PROGRAM = getProgram(GETY_CLASSNAME, 
GETY_EVALUATE_METHOD, "return PInteger.INSTANCE;");
 private static Properties EMPTY_PROPS = new Properties();
 
+@Rule
+public TestName name = new TestName();
 
 @Override
 @After
-public void cleanUpAfterTest() throws Exception {}
+public void cleanUpAfterTest() throws Exception {
+Connection conn = driver.connect(url, EMPTY_PROPS);
+Statement stmt = conn.createStatement();
+ResultSet rs = stmt.executeQuery("list jars");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar1.jar'");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar2.jar'");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar3.jar'");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar4.jar'");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar5.jar'");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar6.jar'");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar7.jar'");
+

[1/4] phoenix git commit: PHOENIX-4231 Support restriction of remote UDF load sources

2018-03-14 Thread apurtell
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 8f8209dcf -> ade93c9d5
  refs/heads/4.x-HBase-1.2 8c830a790 -> be634b576
  refs/heads/4.x-HBase-1.3 c115b6a3e -> ae3618ff8
  refs/heads/master 274c7be94 -> 74228aee7


PHOENIX-4231 Support restriction of remote UDF load sources

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/74228aee
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/74228aee
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/74228aee

Branch: refs/heads/master
Commit: 74228aee724e24ddb00bef2be0c7430172b699a8
Parents: 274c7be
Author: aertoria 
Authored: Tue Feb 27 15:02:07 2018 -0800
Committer: Andrew Purtell 
Committed: Wed Mar 14 12:42:18 2018 -0700

--
 .../phoenix/end2end/UserDefinedFunctionsIT.java | 127 +--
 .../expression/function/UDFExpression.java  |  20 +--
 2 files changed, 120 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/74228aee/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
index f58f750..943119d 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
@@ -27,15 +27,12 @@ import static 
org.apache.phoenix.util.PhoenixRuntime.PHOENIX_TEST_DRIVER_URL_PAR
 import static org.apache.phoenix.util.TestUtil.JOIN_ITEM_TABLE_FULL_NAME;
 import static org.apache.phoenix.util.TestUtil.JOIN_SUPPLIER_TABLE_FULL_NAME;
 import static org.apache.phoenix.util.TestUtil.LOCALHOST;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
 
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
+import java.io.IOException;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
@@ -53,6 +50,10 @@ import java.util.jar.Manifest;
 import javax.tools.JavaCompiler;
 import javax.tools.ToolProvider;
 
+import org.apache.commons.lang.exception.ExceptionUtils;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -66,13 +67,15 @@ import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.junit.After;
+import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.Rule;
 import org.junit.Test;
 
 import com.google.common.collect.Maps;
+import org.junit.rules.TestName;
 
 public class UserDefinedFunctionsIT extends BaseOwnClusterIT {
-
 protected static final String TENANT_ID = "ZZTop";
 private static String url;
 private static PhoenixTestDriver driver;
@@ -190,10 +193,36 @@ public class UserDefinedFunctionsIT extends 
BaseOwnClusterIT {
 private static String GETY_CLASSNAME_PROGRAM = getProgram(GETY_CLASSNAME, 
GETY_EVALUATE_METHOD, "return PInteger.INSTANCE;");
 private static Properties EMPTY_PROPS = new Properties();
 
+@Rule
+public TestName name = new TestName();
 
 @Override
 @After
-public void cleanUpAfterTest() throws Exception {}
+public void cleanUpAfterTest() throws Exception {
+Connection conn = driver.connect(url, EMPTY_PROPS);
+Statement stmt = conn.createStatement();
+ResultSet rs = stmt.executeQuery("list jars");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar1.jar'");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar2.jar'");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar3.jar'");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar4.jar'");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar5.jar'");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar6.jar'");
+

[3/4] phoenix git commit: PHOENIX-4231 Support restriction of remote UDF load sources

2018-03-14 Thread apurtell
PHOENIX-4231 Support restriction of remote UDF load sources

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/be634b57
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/be634b57
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/be634b57

Branch: refs/heads/4.x-HBase-1.2
Commit: be634b576589c3c2a523e792efec0c03398b5285
Parents: 8c830a7
Author: aertoria 
Authored: Tue Feb 27 15:02:07 2018 -0800
Committer: Andrew Purtell 
Committed: Wed Mar 14 12:43:04 2018 -0700

--
 .../phoenix/end2end/UserDefinedFunctionsIT.java | 127 +--
 .../expression/function/UDFExpression.java  |  20 +--
 2 files changed, 120 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/be634b57/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
index f58f750..943119d 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
@@ -27,15 +27,12 @@ import static 
org.apache.phoenix.util.PhoenixRuntime.PHOENIX_TEST_DRIVER_URL_PAR
 import static org.apache.phoenix.util.TestUtil.JOIN_ITEM_TABLE_FULL_NAME;
 import static org.apache.phoenix.util.TestUtil.JOIN_SUPPLIER_TABLE_FULL_NAME;
 import static org.apache.phoenix.util.TestUtil.LOCALHOST;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
 
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
+import java.io.IOException;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
@@ -53,6 +50,10 @@ import java.util.jar.Manifest;
 import javax.tools.JavaCompiler;
 import javax.tools.ToolProvider;
 
+import org.apache.commons.lang.exception.ExceptionUtils;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -66,13 +67,15 @@ import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.junit.After;
+import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.Rule;
 import org.junit.Test;
 
 import com.google.common.collect.Maps;
+import org.junit.rules.TestName;
 
 public class UserDefinedFunctionsIT extends BaseOwnClusterIT {
-
 protected static final String TENANT_ID = "ZZTop";
 private static String url;
 private static PhoenixTestDriver driver;
@@ -190,10 +193,36 @@ public class UserDefinedFunctionsIT extends 
BaseOwnClusterIT {
 private static String GETY_CLASSNAME_PROGRAM = getProgram(GETY_CLASSNAME, 
GETY_EVALUATE_METHOD, "return PInteger.INSTANCE;");
 private static Properties EMPTY_PROPS = new Properties();
 
+@Rule
+public TestName name = new TestName();
 
 @Override
 @After
-public void cleanUpAfterTest() throws Exception {}
+public void cleanUpAfterTest() throws Exception {
+Connection conn = driver.connect(url, EMPTY_PROPS);
+Statement stmt = conn.createStatement();
+ResultSet rs = stmt.executeQuery("list jars");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar1.jar'");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar2.jar'");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar3.jar'");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar4.jar'");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar5.jar'");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar6.jar'");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar7.jar'");
+stmt.execute("delete jar '"+ 

[2/4] phoenix git commit: PHOENIX-4231 Support restriction of remote UDF load sources

2018-03-14 Thread apurtell
PHOENIX-4231 Support restriction of remote UDF load sources

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ae3618ff
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ae3618ff
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ae3618ff

Branch: refs/heads/4.x-HBase-1.3
Commit: ae3618ff88c36eb04734fad78ac64c8989fc470f
Parents: c115b6a
Author: aertoria 
Authored: Tue Feb 27 15:02:07 2018 -0800
Committer: Andrew Purtell 
Committed: Wed Mar 14 12:42:55 2018 -0700

--
 .../phoenix/end2end/UserDefinedFunctionsIT.java | 127 +--
 .../expression/function/UDFExpression.java  |  20 +--
 2 files changed, 120 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ae3618ff/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
index f58f750..943119d 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
@@ -27,15 +27,12 @@ import static 
org.apache.phoenix.util.PhoenixRuntime.PHOENIX_TEST_DRIVER_URL_PAR
 import static org.apache.phoenix.util.TestUtil.JOIN_ITEM_TABLE_FULL_NAME;
 import static org.apache.phoenix.util.TestUtil.JOIN_SUPPLIER_TABLE_FULL_NAME;
 import static org.apache.phoenix.util.TestUtil.LOCALHOST;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
 
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
+import java.io.IOException;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
@@ -53,6 +50,10 @@ import java.util.jar.Manifest;
 import javax.tools.JavaCompiler;
 import javax.tools.ToolProvider;
 
+import org.apache.commons.lang.exception.ExceptionUtils;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -66,13 +67,15 @@ import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.junit.After;
+import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.Rule;
 import org.junit.Test;
 
 import com.google.common.collect.Maps;
+import org.junit.rules.TestName;
 
 public class UserDefinedFunctionsIT extends BaseOwnClusterIT {
-
 protected static final String TENANT_ID = "ZZTop";
 private static String url;
 private static PhoenixTestDriver driver;
@@ -190,10 +193,36 @@ public class UserDefinedFunctionsIT extends 
BaseOwnClusterIT {
 private static String GETY_CLASSNAME_PROGRAM = getProgram(GETY_CLASSNAME, 
GETY_EVALUATE_METHOD, "return PInteger.INSTANCE;");
 private static Properties EMPTY_PROPS = new Properties();
 
+@Rule
+public TestName name = new TestName();
 
 @Override
 @After
-public void cleanUpAfterTest() throws Exception {}
+public void cleanUpAfterTest() throws Exception {
+Connection conn = driver.connect(url, EMPTY_PROPS);
+Statement stmt = conn.createStatement();
+ResultSet rs = stmt.executeQuery("list jars");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar1.jar'");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar2.jar'");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar3.jar'");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar4.jar'");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar5.jar'");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar6.jar'");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar7.jar'");
+stmt.execute("delete jar '"+ 

[4/4] phoenix git commit: PHOENIX-4231 Support restriction of remote UDF load sources

2018-03-14 Thread apurtell
PHOENIX-4231 Support restriction of remote UDF load sources

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ade93c9d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ade93c9d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ade93c9d

Branch: refs/heads/4.x-HBase-0.98
Commit: ade93c9d5ac6ecad2234d22da6fdbb1168c5d32a
Parents: 8f8209d
Author: aertoria 
Authored: Tue Feb 27 15:02:07 2018 -0800
Committer: Andrew Purtell 
Committed: Wed Mar 14 12:43:09 2018 -0700

--
 .../phoenix/end2end/UserDefinedFunctionsIT.java | 127 +--
 .../expression/function/UDFExpression.java  |  20 +--
 2 files changed, 120 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ade93c9d/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
index f58f750..943119d 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
@@ -27,15 +27,12 @@ import static 
org.apache.phoenix.util.PhoenixRuntime.PHOENIX_TEST_DRIVER_URL_PAR
 import static org.apache.phoenix.util.TestUtil.JOIN_ITEM_TABLE_FULL_NAME;
 import static org.apache.phoenix.util.TestUtil.JOIN_SUPPLIER_TABLE_FULL_NAME;
 import static org.apache.phoenix.util.TestUtil.LOCALHOST;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
 
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
+import java.io.IOException;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
@@ -53,6 +50,10 @@ import java.util.jar.Manifest;
 import javax.tools.JavaCompiler;
 import javax.tools.ToolProvider;
 
+import org.apache.commons.lang.exception.ExceptionUtils;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -66,13 +67,15 @@ import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.junit.After;
+import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.Rule;
 import org.junit.Test;
 
 import com.google.common.collect.Maps;
+import org.junit.rules.TestName;
 
 public class UserDefinedFunctionsIT extends BaseOwnClusterIT {
-
 protected static final String TENANT_ID = "ZZTop";
 private static String url;
 private static PhoenixTestDriver driver;
@@ -190,10 +193,36 @@ public class UserDefinedFunctionsIT extends 
BaseOwnClusterIT {
 private static String GETY_CLASSNAME_PROGRAM = getProgram(GETY_CLASSNAME, 
GETY_EVALUATE_METHOD, "return PInteger.INSTANCE;");
 private static Properties EMPTY_PROPS = new Properties();
 
+@Rule
+public TestName name = new TestName();
 
 @Override
 @After
-public void cleanUpAfterTest() throws Exception {}
+public void cleanUpAfterTest() throws Exception {
+Connection conn = driver.connect(url, EMPTY_PROPS);
+Statement stmt = conn.createStatement();
+ResultSet rs = stmt.executeQuery("list jars");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar1.jar'");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar2.jar'");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar3.jar'");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar4.jar'");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar5.jar'");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar6.jar'");
+stmt.execute("delete jar '"+ 
util.getConfiguration().get(QueryServices.DYNAMIC_JARS_DIR_KEY)+"/"+"myjar7.jar'");
+stmt.execute("delete jar '"+ 

[3/4] phoenix git commit: PHOENIX-3997 UngroupedAggregateRegionObserver.commitBatchWithHTable() should not check the memstore size and wait for flush.

2017-07-24 Thread apurtell
PHOENIX-3997 UngroupedAggregateRegionObserver.commitBatchWithHTable() should 
not check the memstore size and wait for flush.

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e9498bf4
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e9498bf4
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e9498bf4

Branch: refs/heads/4.x-HBase-1.1
Commit: e9498bf4704d438969e67557c7d45b3a76c65458
Parents: bd11d86
Author: Geoffrey Jacoby 
Authored: Thu Jul 13 13:34:54 2017 -0700
Committer: Andrew Purtell 
Committed: Mon Jul 24 15:42:20 2017 -0700

--
 .../UngroupedAggregateRegionObserver.java   | 127 ++-
 1 file changed, 67 insertions(+), 60 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e9498bf4/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index a949058..a07b5d0 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -200,24 +200,12 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 InterRegionServerIndexRpcControllerFactory.class, 
RpcControllerFactory.class);
 }
 
-private void commitBatch(Region region, List mutations, byte[] 
indexUUID, long blockingMemstoreSize,
-byte[] indexMaintainersPtr, byte[] txState, boolean useIndexProto) 
throws IOException {
+private void commitBatch(Region region, List mutations, long 
blockingMemstoreSize) throws IOException {
   if (mutations.isEmpty()) {
- return;
+  return;
   }
-  for (Mutation m : mutations) {
- if (indexMaintainersPtr != null) {
- m.setAttribute(useIndexProto ? PhoenixIndexCodec.INDEX_PROTO_MD : 
PhoenixIndexCodec.INDEX_MD, indexMaintainersPtr);
- }
- if (indexUUID != null) {
-m.setAttribute(PhoenixIndexCodec.INDEX_UUID, indexUUID);
- }
- if (txState != null) {
- m.setAttribute(BaseScannerRegionObserver.TX_STATE, txState);
- }
-  }
-  
-  Mutation[] mutationArray = new Mutation[mutations.size()];
+
+Mutation[] mutationArray = new Mutation[mutations.size()];
   // When memstore size reaches blockingMemstoreSize we are waiting 3 
seconds for the
   // flush happen which decrease the memstore size and then writes allowed 
on the region.
   for (int i = 0; region.getMemstoreSize() > blockingMemstoreSize && i < 
30; i++) {
@@ -233,34 +221,26 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
   logger.debug("Committing bactch of " + mutations.size() + " mutations 
for " + region.getRegionInfo().getTable().getNameAsString());
   region.batchMutate(mutations.toArray(mutationArray), 
HConstants.NO_NONCE, HConstants.NO_NONCE);
 }
-
-private void commitBatchWithHTable(HTable table, Region region, 
List mutations, byte[] indexUUID,
-long blockingMemstoreSize, byte[] indexMaintainersPtr, byte[] 
txState, boolean useIndexProto) throws IOException {
-   if (mutations.isEmpty()) {
- return;
-   }
+
+private void setIndexAndTransactionProperties(List mutations, 
byte[] indexUUID, byte[] indexMaintainersPtr, byte[] txState, boolean 
useIndexProto) {
 for (Mutation m : mutations) {
-if (indexMaintainersPtr != null) {
-m.setAttribute(useIndexProto ? 
PhoenixIndexCodec.INDEX_PROTO_MD : PhoenixIndexCodec.INDEX_MD, 
indexMaintainersPtr);
-}
-if (txState != null) {
-m.setAttribute(BaseScannerRegionObserver.TX_STATE, txState);
-}
-if (indexUUID != null) {
-   m.setAttribute(PhoenixIndexCodec.INDEX_UUID, indexUUID);
-}
-}
-// When memstore size reaches blockingMemstoreSize we are waiting 3 
seconds for the
-// flush happen which decrease the memstore size and then writes 
allowed on the region.
-for (int i = 0; region.getMemstoreSize() > blockingMemstoreSize && i < 
30; i++) {
-try {
-checkForRegionClosing();
-Thread.sleep(100);
-} catch (InterruptedException e) {
-Thread.currentThread().interrupt();
- 

[4/4] phoenix git commit: PHOENIX-3997 UngroupedAggregateRegionObserver.commitBatchWithHTable() should not check the memstore size and wait for flush.

2017-07-24 Thread apurtell
PHOENIX-3997 UngroupedAggregateRegionObserver.commitBatchWithHTable() should 
not check the memstore size and wait for flush.

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/318dd230
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/318dd230
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/318dd230

Branch: refs/heads/4.x-HBase-0.98
Commit: 318dd23031382200d92781fb11913464c9d442a6
Parents: e18c393
Author: Geoffrey Jacoby 
Authored: Thu Jul 13 13:34:54 2017 -0700
Committer: Andrew Purtell 
Committed: Mon Jul 24 15:56:33 2017 -0700

--
 .../UngroupedAggregateRegionObserver.java   | 101 ++-
 1 file changed, 54 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/318dd230/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 70ed5e5..1571822 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -200,21 +200,9 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 InterRegionServerIndexRpcControllerFactory.class, 
RpcControllerFactory.class);
 }
 
-private void commitBatch(HRegion region, List mutations, byte[] 
indexUUID, long blockingMemstoreSize,
-byte[] indexMaintainersPtr, byte[] txState, boolean useIndexProto) 
throws IOException {
+private void commitBatch(HRegion region, List mutations, long 
blockingMemstoreSize) throws IOException {
   if (mutations.isEmpty()) {
- return;
-  }
-  for (Mutation m : mutations) {
- if (indexMaintainersPtr != null) {
- m.setAttribute(useIndexProto ? PhoenixIndexCodec.INDEX_PROTO_MD : 
PhoenixIndexCodec.INDEX_MD, indexMaintainersPtr);
- }
- if (indexUUID != null) {
-m.setAttribute(PhoenixIndexCodec.INDEX_UUID, indexUUID);
- }
- if (txState != null) {
- m.setAttribute(BaseScannerRegionObserver.TX_STATE, txState);
- }
+  return;
   }
   Mutation[] mutationArray = new Mutation[mutations.size()];
   // When memstore size reaches blockingMemstoreSize we are waiting 3 
seconds for the
@@ -233,11 +221,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
   region.batchMutate(mutations.toArray(mutationArray), 
HConstants.NO_NONCE, HConstants.NO_NONCE);
 }
 
-private void commitBatchWithHTable(HTable table, HRegion region, 
List mutations, byte[] indexUUID,
-long blockingMemstoreSize, byte[] indexMaintainersPtr, byte[] 
txState, boolean useIndexProto) throws IOException {
-   if (mutations.isEmpty()) {
- return;
-   }
+private void setIndexAndTransactionProperties(List mutations, 
byte[] indexUUID, byte[] indexMaintainersPtr, byte[] txState, boolean 
useIndexProto) {
 for (Mutation m : mutations) {
 if (indexMaintainersPtr != null) {
 m.setAttribute(useIndexProto ? 
PhoenixIndexCodec.INDEX_PROTO_MD : PhoenixIndexCodec.INDEX_MD, 
indexMaintainersPtr);
@@ -246,19 +230,14 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 m.setAttribute(BaseScannerRegionObserver.TX_STATE, txState);
 }
 if (indexUUID != null) {
-   m.setAttribute(PhoenixIndexCodec.INDEX_UUID, indexUUID);
+m.setAttribute(PhoenixIndexCodec.INDEX_UUID, indexUUID);
 }
 }
-// When memstore size reaches blockingMemstoreSize we are waiting 3 
seconds for the
-// flush happen which decrease the memstore size and then writes 
allowed on the region.
-for (int i = 0; region.getMemstoreSize().get() > blockingMemstoreSize 
&& i < 30; i++) {
-try {
-checkForRegionClosing();
-Thread.sleep(100);
-} catch (InterruptedException e) {
-Thread.currentThread().interrupt();
-throw new IOException(e);
-}
+}
+
+private void commitBatchWithHTable(HTable table, List mutations) 
throws IOException {
+if (mutations.isEmpty()) {
+return;
 }
 logger.debug("Committing batch of " + 

[2/4] phoenix git commit: PHOENIX-3997 UngroupedAggregateRegionObserver.commitBatchWithHTable() should not check the memstore size and wait for flush.

2017-07-24 Thread apurtell
PHOENIX-3997 UngroupedAggregateRegionObserver.commitBatchWithHTable() should 
not check the memstore size and wait for flush.

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/54c28d19
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/54c28d19
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/54c28d19

Branch: refs/heads/4.x-HBase-1.2
Commit: 54c28d19698b18d2e5d86d61e37a104391ff3392
Parents: 2913e10
Author: Geoffrey Jacoby 
Authored: Thu Jul 13 13:34:54 2017 -0700
Committer: Andrew Purtell 
Committed: Mon Jul 24 15:41:42 2017 -0700

--
 .../UngroupedAggregateRegionObserver.java   | 129 ++-
 1 file changed, 68 insertions(+), 61 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/54c28d19/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index dc2ae3f..a07b5d0 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -200,24 +200,12 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 InterRegionServerIndexRpcControllerFactory.class, 
RpcControllerFactory.class);
 }
 
-private void commitBatch(Region region, List mutations, byte[] 
indexUUID, long blockingMemstoreSize,
-byte[] indexMaintainersPtr, byte[] txState, boolean useIndexProto) 
throws IOException {
+private void commitBatch(Region region, List mutations, long 
blockingMemstoreSize) throws IOException {
   if (mutations.isEmpty()) {
- return;
+  return;
   }
-  for (Mutation m : mutations) {
- if (indexMaintainersPtr != null) {
- m.setAttribute(useIndexProto ? PhoenixIndexCodec.INDEX_PROTO_MD : 
PhoenixIndexCodec.INDEX_MD, indexMaintainersPtr);
- }
- if (indexUUID != null) {
-m.setAttribute(PhoenixIndexCodec.INDEX_UUID, indexUUID);
- }
- if (txState != null) {
- m.setAttribute(BaseScannerRegionObserver.TX_STATE, txState);
- }
-  }
-  
-  Mutation[] mutationArray = new Mutation[mutations.size()];
+
+Mutation[] mutationArray = new Mutation[mutations.size()];
   // When memstore size reaches blockingMemstoreSize we are waiting 3 
seconds for the
   // flush happen which decrease the memstore size and then writes allowed 
on the region.
   for (int i = 0; region.getMemstoreSize() > blockingMemstoreSize && i < 
30; i++) {
@@ -233,34 +221,26 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
   logger.debug("Committing bactch of " + mutations.size() + " mutations 
for " + region.getRegionInfo().getTable().getNameAsString());
   region.batchMutate(mutations.toArray(mutationArray), 
HConstants.NO_NONCE, HConstants.NO_NONCE);
 }
-
-private void commitBatchWithHTable(HTable table, Region region, 
List mutations, byte[] indexUUID,
-long blockingMemstoreSize, byte[] indexMaintainersPtr, byte[] 
txState, boolean useIndexProto) throws IOException {
-   if (mutations.isEmpty()) {
- return;
-   }
+
+private void setIndexAndTransactionProperties(List mutations, 
byte[] indexUUID, byte[] indexMaintainersPtr, byte[] txState, boolean 
useIndexProto) {
 for (Mutation m : mutations) {
-if (indexMaintainersPtr != null) {
-m.setAttribute(useIndexProto ? 
PhoenixIndexCodec.INDEX_PROTO_MD : PhoenixIndexCodec.INDEX_MD, 
indexMaintainersPtr);
-}
-if (txState != null) {
-m.setAttribute(BaseScannerRegionObserver.TX_STATE, txState);
-}
-if (indexUUID != null) {
-   m.setAttribute(PhoenixIndexCodec.INDEX_UUID, indexUUID);
-}
-}
-// When memstore size reaches blockingMemstoreSize we are waiting 3 
seconds for the
-// flush happen which decrease the memstore size and then writes 
allowed on the region.
-for (int i = 0; region.getMemstoreSize() > blockingMemstoreSize && i < 
30; i++) {
-try {
-checkForRegionClosing();
-Thread.sleep(100);
-} catch (InterruptedException e) {
-Thread.currentThread().interrupt();
- 

[1/4] phoenix git commit: PHOENIX-3997 UngroupedAggregateRegionObserver.commitBatchWithHTable() should not check the memstore size and wait for flush.

2017-07-24 Thread apurtell
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 e18c39341 -> 318dd2303
  refs/heads/4.x-HBase-1.1 bd11d86e3 -> e9498bf47
  refs/heads/4.x-HBase-1.2 2913e108f -> 54c28d196
  refs/heads/master adda7f10b -> a78811131


PHOENIX-3997 UngroupedAggregateRegionObserver.commitBatchWithHTable() should 
not check the memstore size and wait for flush.

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a7881113
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a7881113
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a7881113

Branch: refs/heads/master
Commit: a788111311f9b260c4eddbe7e9d2359a312b8d30
Parents: adda7f1
Author: Geoffrey Jacoby 
Authored: Thu Jul 13 13:34:54 2017 -0700
Committer: Andrew Purtell 
Committed: Mon Jul 24 15:38:26 2017 -0700

--
 .../UngroupedAggregateRegionObserver.java   | 127 ++-
 1 file changed, 67 insertions(+), 60 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a7881113/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index a949058..a07b5d0 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -200,24 +200,12 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 InterRegionServerIndexRpcControllerFactory.class, 
RpcControllerFactory.class);
 }
 
-private void commitBatch(Region region, List mutations, byte[] 
indexUUID, long blockingMemstoreSize,
-byte[] indexMaintainersPtr, byte[] txState, boolean useIndexProto) 
throws IOException {
+private void commitBatch(Region region, List mutations, long 
blockingMemstoreSize) throws IOException {
   if (mutations.isEmpty()) {
- return;
+  return;
   }
-  for (Mutation m : mutations) {
- if (indexMaintainersPtr != null) {
- m.setAttribute(useIndexProto ? PhoenixIndexCodec.INDEX_PROTO_MD : 
PhoenixIndexCodec.INDEX_MD, indexMaintainersPtr);
- }
- if (indexUUID != null) {
-m.setAttribute(PhoenixIndexCodec.INDEX_UUID, indexUUID);
- }
- if (txState != null) {
- m.setAttribute(BaseScannerRegionObserver.TX_STATE, txState);
- }
-  }
-  
-  Mutation[] mutationArray = new Mutation[mutations.size()];
+
+Mutation[] mutationArray = new Mutation[mutations.size()];
   // When memstore size reaches blockingMemstoreSize we are waiting 3 
seconds for the
   // flush happen which decrease the memstore size and then writes allowed 
on the region.
   for (int i = 0; region.getMemstoreSize() > blockingMemstoreSize && i < 
30; i++) {
@@ -233,34 +221,26 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
   logger.debug("Committing bactch of " + mutations.size() + " mutations 
for " + region.getRegionInfo().getTable().getNameAsString());
   region.batchMutate(mutations.toArray(mutationArray), 
HConstants.NO_NONCE, HConstants.NO_NONCE);
 }
-
-private void commitBatchWithHTable(HTable table, Region region, 
List mutations, byte[] indexUUID,
-long blockingMemstoreSize, byte[] indexMaintainersPtr, byte[] 
txState, boolean useIndexProto) throws IOException {
-   if (mutations.isEmpty()) {
- return;
-   }
+
+private void setIndexAndTransactionProperties(List mutations, 
byte[] indexUUID, byte[] indexMaintainersPtr, byte[] txState, boolean 
useIndexProto) {
 for (Mutation m : mutations) {
-if (indexMaintainersPtr != null) {
-m.setAttribute(useIndexProto ? 
PhoenixIndexCodec.INDEX_PROTO_MD : PhoenixIndexCodec.INDEX_MD, 
indexMaintainersPtr);
-}
-if (txState != null) {
-m.setAttribute(BaseScannerRegionObserver.TX_STATE, txState);
-}
-if (indexUUID != null) {
-   m.setAttribute(PhoenixIndexCodec.INDEX_UUID, indexUUID);
-}
-}
-// When memstore size reaches blockingMemstoreSize we are waiting 3 
seconds for the
-// flush happen which decrease the memstore size and then writes 
allowed on the region.
-for (int i = 0; region.getMemstoreSize() > blockingMemstoreSize && 

[3/4] phoenix git commit: Amend PHOENIX-4039 Increase default number of RPC retries for our index rebuild task

2017-07-24 Thread apurtell
Amend PHOENIX-4039 Increase default number of RPC retries for our index rebuild 
task

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/bd11d86e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/bd11d86e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/bd11d86e

Branch: refs/heads/4.x-HBase-1.1
Commit: bd11d86e382e6853cb54b2b7fc2a330a0aaa22e0
Parents: ce71efc
Author: Vincent 
Authored: Thu Jul 20 14:13:24 2017 -0700
Committer: Andrew Purtell 
Committed: Mon Jul 24 11:45:02 2017 -0700

--
 .../java/org/apache/phoenix/query/QueryServicesOptions.java | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/bd11d86e/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 8ffb0af..c6593da 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -183,10 +183,11 @@ public class QueryServicesOptions {
 public static final boolean DEFAULT_INDEX_FAILURE_THROW_EXCEPTION = true; 
 public static final long DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_INTERVAL = 
6; // 60 secs
 public static final long 
DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_TIME = 1; // 1 ms
-public static final long DEFAULT_INDEX_REBUILD_QUERY_TIMEOUT = 3 * 60; 
// 30 mins
+// 30 min rpc timeout * 5 tries, with 2100ms total pause time between 
retries
+public static final long DEFAULT_INDEX_REBUILD_QUERY_TIMEOUT = (5 * 3 
* 60) + 2100;
 public static final long DEFAULT_INDEX_REBUILD_RPC_TIMEOUT = 3 * 60; 
// 30 mins
 public static final long DEFAULT_INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT = 
3 * 60; // 30 mins
-public static final int DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER = 1; // 
1 retry at rpc level
+public static final int DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER = 5; // 
5 total tries at rpc level
 public static final int DEFAULT_INDEX_REBUILD_DISABLE_TIMESTAMP_THRESHOLD 
= 3 * 60; // 30 mins
 
 /**



[1/4] phoenix git commit: Amend PHOENIX-4039 Increase default number of RPC retries for our index rebuild task

2017-07-24 Thread apurtell
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 771f766eb -> e18c39341
  refs/heads/4.x-HBase-1.1 ce71efc9f -> bd11d86e3
  refs/heads/4.x-HBase-1.2 6b6bb7751 -> 2913e108f
  refs/heads/master 87976eb6f -> adda7f10b


Amend PHOENIX-4039 Increase default number of RPC retries for our index rebuild 
task

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/adda7f10
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/adda7f10
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/adda7f10

Branch: refs/heads/master
Commit: adda7f10b648fa1d739a1ee739780fa1643faa81
Parents: 87976eb
Author: Vincent 
Authored: Thu Jul 20 14:13:24 2017 -0700
Committer: Andrew Purtell 
Committed: Mon Jul 24 11:44:46 2017 -0700

--
 .../java/org/apache/phoenix/query/QueryServicesOptions.java | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/adda7f10/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 8ffb0af..c6593da 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -183,10 +183,11 @@ public class QueryServicesOptions {
 public static final boolean DEFAULT_INDEX_FAILURE_THROW_EXCEPTION = true; 
 public static final long DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_INTERVAL = 
6; // 60 secs
 public static final long 
DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_TIME = 1; // 1 ms
-public static final long DEFAULT_INDEX_REBUILD_QUERY_TIMEOUT = 3 * 60; 
// 30 mins
+// 30 min rpc timeout * 5 tries, with 2100ms total pause time between 
retries
+public static final long DEFAULT_INDEX_REBUILD_QUERY_TIMEOUT = (5 * 3 
* 60) + 2100;
 public static final long DEFAULT_INDEX_REBUILD_RPC_TIMEOUT = 3 * 60; 
// 30 mins
 public static final long DEFAULT_INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT = 
3 * 60; // 30 mins
-public static final int DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER = 1; // 
1 retry at rpc level
+public static final int DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER = 5; // 
5 total tries at rpc level
 public static final int DEFAULT_INDEX_REBUILD_DISABLE_TIMESTAMP_THRESHOLD 
= 3 * 60; // 30 mins
 
 /**



[4/4] phoenix git commit: Amend PHOENIX-4039 Increase default number of RPC retries for our index rebuild task

2017-07-24 Thread apurtell
Amend PHOENIX-4039 Increase default number of RPC retries for our index rebuild 
task

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e18c3934
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e18c3934
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e18c3934

Branch: refs/heads/4.x-HBase-0.98
Commit: e18c393412a40f38bead619a417120634190
Parents: 771f766
Author: Vincent 
Authored: Thu Jul 20 14:13:24 2017 -0700
Committer: Andrew Purtell 
Committed: Mon Jul 24 11:45:08 2017 -0700

--
 .../java/org/apache/phoenix/query/QueryServicesOptions.java | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e18c3934/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 80d01d7..55cbc91 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -173,10 +173,11 @@ public class QueryServicesOptions {
 public static final boolean DEFAULT_INDEX_FAILURE_THROW_EXCEPTION = true; 
 public static final long DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_INTERVAL = 
6; // 60 secs
 public static final long 
DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_TIME = 1; // 1 ms
-public static final long DEFAULT_INDEX_REBUILD_QUERY_TIMEOUT = 3 * 60; 
// 30 mins
+// 30 min rpc timeout * 5 tries, with 2100ms total pause time between 
retries
+public static final long DEFAULT_INDEX_REBUILD_QUERY_TIMEOUT = (5 * 3 
* 60) + 2100;
 public static final long DEFAULT_INDEX_REBUILD_RPC_TIMEOUT = 3 * 60; 
// 30 mins
 public static final long DEFAULT_INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT = 
3 * 60; // 30 mins
-public static final int DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER = 1; // 
1 retry at rpc level
+public static final int DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER = 5; // 
5 total tries at rpc level
 public static final int DEFAULT_INDEX_REBUILD_DISABLE_TIMESTAMP_THRESHOLD 
= 3 * 60; // 30 mins
 
 /**



[2/4] phoenix git commit: Amend PHOENIX-4039 Increase default number of RPC retries for our index rebuild task

2017-07-24 Thread apurtell
Amend PHOENIX-4039 Increase default number of RPC retries for our index rebuild 
task

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/2913e108
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/2913e108
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/2913e108

Branch: refs/heads/4.x-HBase-1.2
Commit: 2913e108f9d8371c8963d6235cddcea404883a82
Parents: 6b6bb77
Author: Vincent 
Authored: Thu Jul 20 14:13:24 2017 -0700
Committer: Andrew Purtell 
Committed: Mon Jul 24 11:44:55 2017 -0700

--
 .../java/org/apache/phoenix/query/QueryServicesOptions.java | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/2913e108/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 8ffb0af..c6593da 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -183,10 +183,11 @@ public class QueryServicesOptions {
 public static final boolean DEFAULT_INDEX_FAILURE_THROW_EXCEPTION = true; 
 public static final long DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_INTERVAL = 
6; // 60 secs
 public static final long 
DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_TIME = 1; // 1 ms
-public static final long DEFAULT_INDEX_REBUILD_QUERY_TIMEOUT = 3 * 60; 
// 30 mins
+// 30 min rpc timeout * 5 tries, with 2100ms total pause time between 
retries
+public static final long DEFAULT_INDEX_REBUILD_QUERY_TIMEOUT = (5 * 3 
* 60) + 2100;
 public static final long DEFAULT_INDEX_REBUILD_RPC_TIMEOUT = 3 * 60; 
// 30 mins
 public static final long DEFAULT_INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT = 
3 * 60; // 30 mins
-public static final int DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER = 1; // 
1 retry at rpc level
+public static final int DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER = 5; // 
5 total tries at rpc level
 public static final int DEFAULT_INDEX_REBUILD_DISABLE_TIMESTAMP_THRESHOLD 
= 3 * 60; // 30 mins
 
 /**



[3/4] phoenix git commit: PHOENIX-3842 Turn off all BloomFilter for Phoenix tables (Lars Hofhansl)

2017-05-10 Thread apurtell
PHOENIX-3842 Turn off all BloomFilter for Phoenix tables (Lars Hofhansl)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6b464780
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6b464780
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6b464780

Branch: refs/heads/4.x-HBase-1.2
Commit: 6b46478051db53bbc3dec60e59d53f05fe70bce9
Parents: ae76def
Author: Andrew Purtell 
Authored: Wed May 10 17:45:58 2017 -0700
Committer: Andrew Purtell 
Committed: Wed May 10 17:46:31 2017 -0700

--
 .../apache/phoenix/end2end/CreateTableIT.java   | 22 
 .../query/ConnectionQueryServicesImpl.java  |  2 ++
 2 files changed, 24 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/6b464780/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
index 96ba71d..f10c6d9 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
@@ -35,6 +35,7 @@ import java.util.Properties;
 
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -114,6 +115,9 @@ public class CreateTableIT extends BaseClientManagedTimeIT {
 }
 HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), 
props).getAdmin();
 assertNotNull(admin.getTableDescriptor(Bytes.toBytes(tableName)));
+HColumnDescriptor[] columnFamilies = 
admin.getTableDescriptor(Bytes.toBytes(tableName)).getColumnFamilies();
+assertEquals(BloomType.NONE, columnFamilies[0].getBloomFilterType());
+
 props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts 
+ 10));
 try (Connection conn = DriverManager.getConnection(getUrl(), props);) {
 conn.createStatement().execute(ddl);
@@ -384,6 +388,24 @@ public class CreateTableIT extends BaseClientManagedTimeIT 
{
assertEquals(1, columnFamilies[0].getTimeToLive());
 }
 
+@Test
+public void testCreateTableColumnFamilyHBaseAttribs8() throws Exception {
+String ddl = "create table IF NOT EXISTS TEST8 ("
++ " id char(1) NOT NULL,"
++ " col1 integer NOT NULL,"
++ " col2 bigint NOT NULL,"
++ " CONSTRAINT NAME_PK PRIMARY KEY (id, col1, col2)"
++ " ) BLOOMFILTER = 'ROW', SALT_BUCKETS = 4";
+long ts = nextTimestamp();
+Properties props = new Properties();
+props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(ts));
+Connection conn = DriverManager.getConnection(getUrl(), props);
+conn.createStatement().execute(ddl);
+HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), 
props).getAdmin();
+HColumnDescriptor[] columnFamilies = 
admin.getTableDescriptor(Bytes.toBytes("TEST8")).getColumnFamilies();
+assertEquals(BloomType.ROW, columnFamilies[0].getBloomFilterType());
+}
+
 
 /**
  * Test to ensure that NOT NULL constraint isn't added to a non primary 
key column.

http://git-wip-us.apache.org/repos/asf/phoenix/blob/6b464780/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index b402274..489ffb4 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -120,6 +120,7 @@ import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
 import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
+import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.regionserver.IndexHalfStoreFileReaderGenerator;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.util.ByteStringer;
@@ -734,6 +735,7 @@ public class ConnectionQueryServicesImpl extends 

[1/4] phoenix git commit: PHOENIX-3842 Turn off all BloomFilter for Phoenix tables (Lars Hofhansl)

2017-05-10 Thread apurtell
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 ed30d1ff1 -> 4beb182db
  refs/heads/4.x-HBase-1.1 b5312b4bf -> 9838dcfc5
  refs/heads/4.x-HBase-1.2 ae76def9a -> 6b4647805
  refs/heads/master 37d0a4a03 -> f6fbb0dc9


PHOENIX-3842 Turn off all BloomFilter for Phoenix tables (Lars Hofhansl)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4beb182d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4beb182d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4beb182d

Branch: refs/heads/4.x-HBase-0.98
Commit: 4beb182db738e0770c0feea138c5460b43769212
Parents: ed30d1f
Author: Andrew Purtell 
Authored: Wed May 10 17:45:58 2017 -0700
Committer: Andrew Purtell 
Committed: Wed May 10 17:45:58 2017 -0700

--
 .../apache/phoenix/end2end/CreateTableIT.java   | 22 
 .../query/ConnectionQueryServicesImpl.java  |  2 ++
 2 files changed, 24 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/4beb182d/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
index 96ba71d..f10c6d9 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
@@ -35,6 +35,7 @@ import java.util.Properties;
 
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -114,6 +115,9 @@ public class CreateTableIT extends BaseClientManagedTimeIT {
 }
 HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), 
props).getAdmin();
 assertNotNull(admin.getTableDescriptor(Bytes.toBytes(tableName)));
+HColumnDescriptor[] columnFamilies = 
admin.getTableDescriptor(Bytes.toBytes(tableName)).getColumnFamilies();
+assertEquals(BloomType.NONE, columnFamilies[0].getBloomFilterType());
+
 props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts 
+ 10));
 try (Connection conn = DriverManager.getConnection(getUrl(), props);) {
 conn.createStatement().execute(ddl);
@@ -384,6 +388,24 @@ public class CreateTableIT extends BaseClientManagedTimeIT 
{
assertEquals(1, columnFamilies[0].getTimeToLive());
 }
 
+@Test
+public void testCreateTableColumnFamilyHBaseAttribs8() throws Exception {
+String ddl = "create table IF NOT EXISTS TEST8 ("
++ " id char(1) NOT NULL,"
++ " col1 integer NOT NULL,"
++ " col2 bigint NOT NULL,"
++ " CONSTRAINT NAME_PK PRIMARY KEY (id, col1, col2)"
++ " ) BLOOMFILTER = 'ROW', SALT_BUCKETS = 4";
+long ts = nextTimestamp();
+Properties props = new Properties();
+props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(ts));
+Connection conn = DriverManager.getConnection(getUrl(), props);
+conn.createStatement().execute(ddl);
+HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), 
props).getAdmin();
+HColumnDescriptor[] columnFamilies = 
admin.getTableDescriptor(Bytes.toBytes("TEST8")).getColumnFamilies();
+assertEquals(BloomType.ROW, columnFamilies[0].getBloomFilterType());
+}
+
 
 /**
  * Test to ensure that NOT NULL constraint isn't added to a non primary 
key column.

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4beb182d/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 7d65d5a..16db802 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -120,6 +120,7 @@ import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
 import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
+import org.apache.hadoop.hbase.regionserver.BloomType;
 import 

[4/4] phoenix git commit: PHOENIX-3842 Turn off all BloomFilter for Phoenix tables (Lars Hofhansl)

2017-05-10 Thread apurtell
PHOENIX-3842 Turn off all BloomFilter for Phoenix tables (Lars Hofhansl)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f6fbb0dc
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f6fbb0dc
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f6fbb0dc

Branch: refs/heads/master
Commit: f6fbb0dc9ef308eb747993c92a6895c7d460d4b7
Parents: 37d0a4a
Author: Andrew Purtell 
Authored: Wed May 10 17:45:58 2017 -0700
Committer: Andrew Purtell 
Committed: Wed May 10 17:46:34 2017 -0700

--
 .../apache/phoenix/end2end/CreateTableIT.java   | 22 
 .../query/ConnectionQueryServicesImpl.java  |  2 ++
 2 files changed, 24 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f6fbb0dc/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
index 96ba71d..f10c6d9 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
@@ -35,6 +35,7 @@ import java.util.Properties;
 
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -114,6 +115,9 @@ public class CreateTableIT extends BaseClientManagedTimeIT {
 }
 HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), 
props).getAdmin();
 assertNotNull(admin.getTableDescriptor(Bytes.toBytes(tableName)));
+HColumnDescriptor[] columnFamilies = 
admin.getTableDescriptor(Bytes.toBytes(tableName)).getColumnFamilies();
+assertEquals(BloomType.NONE, columnFamilies[0].getBloomFilterType());
+
 props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts 
+ 10));
 try (Connection conn = DriverManager.getConnection(getUrl(), props);) {
 conn.createStatement().execute(ddl);
@@ -384,6 +388,24 @@ public class CreateTableIT extends BaseClientManagedTimeIT 
{
assertEquals(1, columnFamilies[0].getTimeToLive());
 }
 
+@Test
+public void testCreateTableColumnFamilyHBaseAttribs8() throws Exception {
+String ddl = "create table IF NOT EXISTS TEST8 ("
++ " id char(1) NOT NULL,"
++ " col1 integer NOT NULL,"
++ " col2 bigint NOT NULL,"
++ " CONSTRAINT NAME_PK PRIMARY KEY (id, col1, col2)"
++ " ) BLOOMFILTER = 'ROW', SALT_BUCKETS = 4";
+long ts = nextTimestamp();
+Properties props = new Properties();
+props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(ts));
+Connection conn = DriverManager.getConnection(getUrl(), props);
+conn.createStatement().execute(ddl);
+HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), 
props).getAdmin();
+HColumnDescriptor[] columnFamilies = 
admin.getTableDescriptor(Bytes.toBytes("TEST8")).getColumnFamilies();
+assertEquals(BloomType.ROW, columnFamilies[0].getBloomFilterType());
+}
+
 
 /**
  * Test to ensure that NOT NULL constraint isn't added to a non primary 
key column.

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f6fbb0dc/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index b402274..489ffb4 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -120,6 +120,7 @@ import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
 import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
+import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.regionserver.IndexHalfStoreFileReaderGenerator;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.util.ByteStringer;
@@ -734,6 +735,7 @@ public class ConnectionQueryServicesImpl extends 

[2/4] phoenix git commit: PHOENIX-3842 Turn off all BloomFilter for Phoenix tables (Lars Hofhansl)

2017-05-10 Thread apurtell
PHOENIX-3842 Turn off all BloomFilter for Phoenix tables (Lars Hofhansl)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9838dcfc
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9838dcfc
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9838dcfc

Branch: refs/heads/4.x-HBase-1.1
Commit: 9838dcfc53472a1491bff9fcfc9c071e901cd9bd
Parents: b5312b4
Author: Andrew Purtell 
Authored: Wed May 10 17:45:58 2017 -0700
Committer: Andrew Purtell 
Committed: Wed May 10 17:46:25 2017 -0700

--
 .../apache/phoenix/end2end/CreateTableIT.java   | 22 
 .../query/ConnectionQueryServicesImpl.java  |  2 ++
 2 files changed, 24 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9838dcfc/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
index 96ba71d..f10c6d9 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
@@ -35,6 +35,7 @@ import java.util.Properties;
 
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -114,6 +115,9 @@ public class CreateTableIT extends BaseClientManagedTimeIT {
 }
 HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), 
props).getAdmin();
 assertNotNull(admin.getTableDescriptor(Bytes.toBytes(tableName)));
+HColumnDescriptor[] columnFamilies = 
admin.getTableDescriptor(Bytes.toBytes(tableName)).getColumnFamilies();
+assertEquals(BloomType.NONE, columnFamilies[0].getBloomFilterType());
+
 props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts 
+ 10));
 try (Connection conn = DriverManager.getConnection(getUrl(), props);) {
 conn.createStatement().execute(ddl);
@@ -384,6 +388,24 @@ public class CreateTableIT extends BaseClientManagedTimeIT 
{
assertEquals(1, columnFamilies[0].getTimeToLive());
 }
 
+@Test
+public void testCreateTableColumnFamilyHBaseAttribs8() throws Exception {
+String ddl = "create table IF NOT EXISTS TEST8 ("
++ " id char(1) NOT NULL,"
++ " col1 integer NOT NULL,"
++ " col2 bigint NOT NULL,"
++ " CONSTRAINT NAME_PK PRIMARY KEY (id, col1, col2)"
++ " ) BLOOMFILTER = 'ROW', SALT_BUCKETS = 4";
+long ts = nextTimestamp();
+Properties props = new Properties();
+props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(ts));
+Connection conn = DriverManager.getConnection(getUrl(), props);
+conn.createStatement().execute(ddl);
+HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), 
props).getAdmin();
+HColumnDescriptor[] columnFamilies = 
admin.getTableDescriptor(Bytes.toBytes("TEST8")).getColumnFamilies();
+assertEquals(BloomType.ROW, columnFamilies[0].getBloomFilterType());
+}
+
 
 /**
  * Test to ensure that NOT NULL constraint isn't added to a non primary 
key column.

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9838dcfc/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 0ca5995..463819c 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -120,6 +120,7 @@ import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
 import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
+import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.regionserver.IndexHalfStoreFileReaderGenerator;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.util.ByteStringer;
@@ -734,6 +735,7 @@ public class ConnectionQueryServicesImpl extends 

[2/4] phoenix git commit: PHOENIX-3818 Add client setting to disable server UPSERT SELECT work

2017-05-08 Thread apurtell
PHOENIX-3818 Add client setting to disable server UPSERT SELECT work

Adds phoenix.client.enable.server.upsert.select property that is true
(enabled) by default. This acts as a feature toggle for PHOENIX-3271.

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ae76def9
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ae76def9
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ae76def9

Branch: refs/heads/4.x-HBase-1.2
Commit: ae76def9a3e6281a77189e2b0b5b2c971a9321d0
Parents: 54da09c
Author: Alex Araujo 
Authored: Mon May 1 20:27:18 2017 -0500
Committer: Andrew Purtell 
Committed: Mon May 8 17:34:56 2017 -0700

--
 .../apache/phoenix/rpc/PhoenixServerRpcIT.java  | 93 ++--
 .../apache/phoenix/compile/UpsertCompiler.java  | 14 ++-
 .../UngroupedAggregateRegionObserver.java   | 14 +--
 .../org/apache/phoenix/query/QueryServices.java |  3 +
 .../phoenix/query/QueryServicesOptions.java |  4 +-
 .../org/apache/phoenix/util/ExpressionUtil.java | 14 +++
 6 files changed, 97 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ae76def9/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
index 410f02c..b9e4fff 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
@@ -45,11 +45,12 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
+import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
-import org.junit.AfterClass;
+import org.junit.After;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -67,14 +68,14 @@ public class PhoenixServerRpcIT extends 
BaseUniqueNamesOwnClusterIT {
Map serverProps = 
Collections.singletonMap(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
 
TestPhoenixIndexRpcSchedulerFactory.class.getName());
 // use the standard rpc controller for client rpc, so that we can 
isolate server rpc and ensure they use the correct queue  
-   Map clientProps = 
Collections.singletonMap(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, 
-   RpcControllerFactory.class.getName());  
+   Map clientProps = 
Collections.singletonMap(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY,
+   RpcControllerFactory.class.getName());
 NUM_SLAVES_BASE = 2;
 setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), 
new ReadOnlyProps(clientProps.entrySet().iterator()));
 }
 
-@AfterClass
-public static void cleanUpAfterTestSuite() throws Exception {
+@After
+public void cleanUpAfterTest() throws Exception {
 TestPhoenixIndexRpcSchedulerFactory.reset();
 }
 
@@ -91,26 +92,19 @@ public class PhoenixServerRpcIT extends 
BaseUniqueNamesOwnClusterIT {
 Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
 Connection conn = driver.connect(getUrl(), props);
 try {
-// create the table 
-conn.createStatement().execute(
-"CREATE TABLE " + dataTableFullName + " (k VARCHAR NOT 
NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
+// create the table
+createTable(conn, dataTableFullName);
 
-// create the index 
-conn.createStatement().execute(
-"CREATE INDEX " + indexName + " ON " + dataTableFullName + 
" (v1) INCLUDE (v2)");
+// create the index
+createIndex(conn, indexName);
 
 ensureTablesOnDifferentRegionServers(dataTableFullName, 
indexTableFullName);
 
-PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + 
dataTableFullName + " VALUES(?,?,?)");
-stmt.setString(1, "k1");
-stmt.setString(2, "v1");
-stmt.setString(3, "v2");
-stmt.execute();
-conn.commit();
+upsertRow(conn, dataTableFullName);
 
 // run select query that should use 

[4/4] phoenix git commit: PHOENIX-3818 Add client setting to disable server UPSERT SELECT work

2017-05-08 Thread apurtell
PHOENIX-3818 Add client setting to disable server UPSERT SELECT work

Adds phoenix.client.enable.server.upsert.select property that is true
(enabled) by default. This acts as a feature toggle for PHOENIX-3271.

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ed30d1ff
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ed30d1ff
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ed30d1ff

Branch: refs/heads/4.x-HBase-0.98
Commit: ed30d1ff151eecbd2161d197c3cf7159f6707e6e
Parents: 6befc6c
Author: Alex Araujo 
Authored: Mon May 1 20:27:18 2017 -0500
Committer: Andrew Purtell 
Committed: Mon May 8 17:35:03 2017 -0700

--
 .../apache/phoenix/rpc/PhoenixServerRpcIT.java  | 93 ++--
 .../apache/phoenix/compile/UpsertCompiler.java  | 14 ++-
 .../UngroupedAggregateRegionObserver.java   | 14 +--
 .../org/apache/phoenix/query/QueryServices.java |  3 +
 .../phoenix/query/QueryServicesOptions.java |  4 +-
 .../org/apache/phoenix/util/ExpressionUtil.java | 14 +++
 6 files changed, 97 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ed30d1ff/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
index 8f95b32..6782c3e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
@@ -44,11 +44,12 @@ import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
+import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
-import org.junit.AfterClass;
+import org.junit.After;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -66,14 +67,14 @@ public class PhoenixServerRpcIT extends 
BaseUniqueNamesOwnClusterIT {
Map serverProps = 
Collections.singletonMap(HRegionServer.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
 
TestPhoenixIndexRpcSchedulerFactory.class.getName());
 // use the standard rpc controller for client rpc, so that we can 
isolate server rpc and ensure they use the correct queue  
-   Map clientProps = 
Collections.singletonMap(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, 
-   RpcControllerFactory.class.getName());  
+   Map clientProps = 
Collections.singletonMap(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY,
+   RpcControllerFactory.class.getName());
 NUM_SLAVES_BASE = 2;
 setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), 
new ReadOnlyProps(clientProps.entrySet().iterator()));
 }
 
-@AfterClass
-public static void cleanUpAfterTestSuite() throws Exception {
+@After
+public void cleanUpAfterTest() throws Exception {
 TestPhoenixIndexRpcSchedulerFactory.reset();
 }
 
@@ -90,26 +91,19 @@ public class PhoenixServerRpcIT extends 
BaseUniqueNamesOwnClusterIT {
 Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
 Connection conn = driver.connect(getUrl(), props);
 try {
-// create the table 
-conn.createStatement().execute(
-"CREATE TABLE " + dataTableFullName + " (k VARCHAR NOT 
NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
+// create the table
+createTable(conn, dataTableFullName);
 
-// create the index 
-conn.createStatement().execute(
-"CREATE INDEX " + indexName + " ON " + dataTableFullName + 
" (v1) INCLUDE (v2)");
+// create the index
+createIndex(conn, indexName);
 
 ensureTablesOnDifferentRegionServers(dataTableFullName, 
indexTableFullName);
 
-PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + 
dataTableFullName + " VALUES(?,?,?)");
-stmt.setString(1, "k1");
-stmt.setString(2, "v1");
-stmt.setString(3, "v2");
-stmt.execute();
-conn.commit();
+upsertRow(conn, dataTableFullName);
 
 // run select query that should use the index
   

[3/4] phoenix git commit: PHOENIX-3818 Add client setting to disable server UPSERT SELECT work

2017-05-08 Thread apurtell
PHOENIX-3818 Add client setting to disable server UPSERT SELECT work

Adds phoenix.client.enable.server.upsert.select property that is true
(enabled) by default. This acts as a feature toggle for PHOENIX-3271.

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b5312b4b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b5312b4b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b5312b4b

Branch: refs/heads/4.x-HBase-1.1
Commit: b5312b4bf718baf3a0025955793b4fdcb38e0774
Parents: 97ceda3
Author: Alex Araujo 
Authored: Mon May 1 20:27:18 2017 -0500
Committer: Andrew Purtell 
Committed: Mon May 8 17:34:59 2017 -0700

--
 .../apache/phoenix/rpc/PhoenixServerRpcIT.java  | 93 ++--
 .../apache/phoenix/compile/UpsertCompiler.java  | 14 ++-
 .../UngroupedAggregateRegionObserver.java   | 14 +--
 .../org/apache/phoenix/query/QueryServices.java |  3 +
 .../phoenix/query/QueryServicesOptions.java |  4 +-
 .../org/apache/phoenix/util/ExpressionUtil.java | 14 +++
 6 files changed, 97 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b5312b4b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
index 410f02c..b9e4fff 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/PhoenixServerRpcIT.java
@@ -45,11 +45,12 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
+import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
-import org.junit.AfterClass;
+import org.junit.After;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -67,14 +68,14 @@ public class PhoenixServerRpcIT extends 
BaseUniqueNamesOwnClusterIT {
Map serverProps = 
Collections.singletonMap(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
 
TestPhoenixIndexRpcSchedulerFactory.class.getName());
 // use the standard rpc controller for client rpc, so that we can 
isolate server rpc and ensure they use the correct queue  
-   Map clientProps = 
Collections.singletonMap(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, 
-   RpcControllerFactory.class.getName());  
+   Map clientProps = 
Collections.singletonMap(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY,
+   RpcControllerFactory.class.getName());
 NUM_SLAVES_BASE = 2;
 setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), 
new ReadOnlyProps(clientProps.entrySet().iterator()));
 }
 
-@AfterClass
-public static void cleanUpAfterTestSuite() throws Exception {
+@After
+public void cleanUpAfterTest() throws Exception {
 TestPhoenixIndexRpcSchedulerFactory.reset();
 }
 
@@ -91,26 +92,19 @@ public class PhoenixServerRpcIT extends 
BaseUniqueNamesOwnClusterIT {
 Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
 Connection conn = driver.connect(getUrl(), props);
 try {
-// create the table 
-conn.createStatement().execute(
-"CREATE TABLE " + dataTableFullName + " (k VARCHAR NOT 
NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
+// create the table
+createTable(conn, dataTableFullName);
 
-// create the index 
-conn.createStatement().execute(
-"CREATE INDEX " + indexName + " ON " + dataTableFullName + 
" (v1) INCLUDE (v2)");
+// create the index
+createIndex(conn, indexName);
 
 ensureTablesOnDifferentRegionServers(dataTableFullName, 
indexTableFullName);
 
-PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + 
dataTableFullName + " VALUES(?,?,?)");
-stmt.setString(1, "k1");
-stmt.setString(2, "v1");
-stmt.setString(3, "v2");
-stmt.execute();
-conn.commit();
+upsertRow(conn, dataTableFullName);
 
 // run select query that should use 

[phoenix] Git Push Summary

2017-04-25 Thread apurtell
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 [created] 92b951e53


phoenix git commit: PHOENIX-3603 Fix compilation errors against hbase 1.3

2017-04-25 Thread apurtell
Repository: phoenix
Updated Branches:
  refs/heads/master 92b951e53 -> 5b0990144


PHOENIX-3603 Fix compilation errors against hbase 1.3


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/5b099014
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/5b099014
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/5b099014

Branch: refs/heads/master
Commit: 5b099014446865c12779f3882fd8b407496717ea
Parents: 92b951e
Author: Zach York 
Authored: Wed Jan 25 20:42:08 2017 -0800
Committer: Andrew Purtell 
Committed: Tue Apr 25 15:14:43 2017 -0700

--
 phoenix-assembly/pom.xml|  2 +-
 phoenix-client/pom.xml  |  2 +-
 phoenix-core/pom.xml|  2 +-
 ...ReplayWithIndexWritesAndCompressedWALIT.java |  2 +-
 .../hadoop/hbase/ipc/PhoenixRpcScheduler.java   | 10 +
 .../apache/phoenix/execute/DelegateHTable.java  | 22 +++-
 .../hbase/ipc/PhoenixIndexRpcSchedulerTest.java |  2 +-
 .../recovery/TestPerRegionIndexWriteCache.java  |  2 +-
 phoenix-flume/pom.xml   |  2 +-
 phoenix-hive/pom.xml|  2 +-
 phoenix-kafka/pom.xml   |  2 +-
 phoenix-pherf/pom.xml   |  2 +-
 phoenix-pig/pom.xml |  2 +-
 phoenix-queryserver-client/pom.xml  |  2 +-
 phoenix-queryserver/pom.xml |  2 +-
 phoenix-server/pom.xml  |  2 +-
 phoenix-spark/pom.xml   |  2 +-
 phoenix-tracing-webapp/pom.xml  |  2 +-
 pom.xml |  4 ++--
 19 files changed, 49 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b099014/phoenix-assembly/pom.xml
--
diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index a6f6f64..829ce7b 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.11.0-HBase-1.2-SNAPSHOT
+4.11.0-HBase-1.3-SNAPSHOT
   
   phoenix-assembly
   Phoenix Assembly

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b099014/phoenix-client/pom.xml
--
diff --git a/phoenix-client/pom.xml b/phoenix-client/pom.xml
index fb0520c..f436345 100644
--- a/phoenix-client/pom.xml
+++ b/phoenix-client/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.11.0-HBase-1.2-SNAPSHOT
+4.11.0-HBase-1.3-SNAPSHOT
   
   phoenix-client
   Phoenix Client

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b099014/phoenix-core/pom.xml
--
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 9d6e0f4..58162b6 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -4,7 +4,7 @@
   
 org.apache.phoenix
 phoenix
-4.11.0-HBase-1.2-SNAPSHOT
+4.11.0-HBase-1.3-SNAPSHOT
   
   phoenix-core
   Phoenix Core

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b099014/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
 
b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
index 0b48a1a..a55fe7e 100644
--- 
a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
@@ -264,7 +264,7 @@ public class WALReplayWithIndexWritesAndCompressedWALIT {
* @throws IOException
*/
   private WAL createWAL(final Configuration c, WALFactory walFactory) throws 
IOException {
-WAL wal = walFactory.getWAL(new byte[]{});
+WAL wal = walFactory.getWAL(new byte[]{}, null);
 
 // Set down maximum recovery so we dfsclient doesn't linger retrying 
something
 // long gone.

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5b099014/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
index 4fdddf5..7712cc6 100644
--- 

[3/3] phoenix git commit: PHOENIX-3801 Demote extremely verbose logs in ParallelWriterIndexCommitter from DEBUG to TRACE

2017-04-20 Thread apurtell
PHOENIX-3801 Demote extremely verbose logs in ParallelWriterIndexCommitter from 
DEBUG to TRACE


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/679ff21b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/679ff21b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/679ff21b

Branch: refs/heads/master
Commit: 679ff21b78968a010db03c9428e60e7e00acb86e
Parents: 5bd7f79
Author: Andrew Purtell 
Authored: Wed Apr 19 18:43:51 2017 -0700
Committer: Andrew Purtell 
Committed: Thu Apr 20 18:20:08 2017 -0700

--
 .../hbase/index/write/ParallelWriterIndexCommitter.java  | 8 
 .../write/recovery/TrackingParallelWriterIndexCommitter.java | 8 
 2 files changed, 8 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/679ff21b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
index 1549d26..7510c5b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
@@ -143,8 +143,8 @@ public class ParallelWriterIndexCommitter implements 
IndexCommitter {
 // early exit, if that's the case
 throwFailureIfDone();
 
-if (LOG.isDebugEnabled()) {
-LOG.debug("Writing index update:" + mutations + " to 
table: " + tableReference);
+if (LOG.isTraceEnabled()) {
+LOG.trace("Writing index update:" + mutations + " to 
table: " + tableReference);
 }
 HTableInterface table = null;
 try {
@@ -158,8 +158,8 @@ public class ParallelWriterIndexCommitter implements 
IndexCommitter {
 return null;
 } catch (IOException ignord) {
 // when it's failed we fall back to the 
standard & slow way
-if (LOG.isDebugEnabled()) {
-LOG.debug("indexRegion.batchMutate failed 
and fall back to HTable.batch(). Got error="
+if (LOG.isTraceEnabled()) {
+LOG.trace("indexRegion.batchMutate failed 
and fall back to HTable.batch(). Got error="
 + ignord);
 }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/679ff21b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
index 4f1a076..074d0b9 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
@@ -159,15 +159,15 @@ public class TrackingParallelWriterIndexCommitter 
implements IndexCommitter {
 return Boolean.TRUE;
 } catch (IOException ignord) {
 // when it's failed we fall back to the 
standard & slow way
-if (LOG.isDebugEnabled()) {
-LOG.debug("indexRegion.batchMutate failed 
and fall back to HTable.batch(). Got error="
+if (LOG.isTraceEnabled()) {
+LOG.trace("indexRegion.batchMutate failed 
and fall back to HTable.batch(). Got error="
 + ignord);
 }
 }
 }
 
-if (LOG.isDebugEnabled()) {
-LOG.debug("Writing index update:" + mutations + " 
to table: " + tableReference);
+if (LOG.isTraceEnabled()) {
+LOG.trace("Writing index update:" + mutations + " 
to table: " + tableReference);
  

[2/3] phoenix git commit: PHOENIX-3801 Demote extremely verbose logs in ParallelWriterIndexCommitter from DEBUG to TRACE

2017-04-20 Thread apurtell
PHOENIX-3801 Demote extremely verbose logs in ParallelWriterIndexCommitter from 
DEBUG to TRACE


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/785c4680
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/785c4680
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/785c4680

Branch: refs/heads/4.x-HBase-1.1
Commit: 785c4680ebc389ae51bc0852536237bd94de1d6c
Parents: e3ea3b1
Author: Andrew Purtell 
Authored: Wed Apr 19 18:43:51 2017 -0700
Committer: Andrew Purtell 
Committed: Thu Apr 20 18:17:43 2017 -0700

--
 .../hbase/index/write/ParallelWriterIndexCommitter.java  | 8 
 .../write/recovery/TrackingParallelWriterIndexCommitter.java | 8 
 2 files changed, 8 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/785c4680/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
index 1ab7338..5823bd9 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
@@ -143,8 +143,8 @@ public class ParallelWriterIndexCommitter implements 
IndexCommitter {
 // early exit, if that's the case
 throwFailureIfDone();
 
-if (LOG.isDebugEnabled()) {
-LOG.debug("Writing index update:" + mutations + " to 
table: " + tableReference);
+if (LOG.isTraceEnabled()) {
+LOG.trace("Writing index update:" + mutations + " to 
table: " + tableReference);
 }
 HTableInterface table = null;
 try {
@@ -155,8 +155,8 @@ public class ParallelWriterIndexCommitter implements 
IndexCommitter {
return null;
} catch (IOException ignord) {
// when it's failed we fall back to the 
standard & slow way
-   if (LOG.isDebugEnabled()) {
-   LOG.debug("indexRegion.batchMutate 
failed and fall back to HTable.batch(). Got error="
+   if (LOG.isTraceEnabled()) {
+   LOG.trace("indexRegion.batchMutate 
failed and fall back to HTable.batch(). Got error="
+ ignord);
}
}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/785c4680/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
index d244d66..ed12d2f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
@@ -157,15 +157,15 @@ public class TrackingParallelWriterIndexCommitter 
implements IndexCommitter {
 return Boolean.TRUE;
 } catch (IOException ignord) {
 // when it's failed we fall back to the 
standard & slow way
-if (LOG.isDebugEnabled()) {
-LOG.debug("indexRegion.batchMutate failed 
and fall back to HTable.batch(). Got error="
+if (LOG.isTraceEnabled()) {
+LOG.trace("indexRegion.batchMutate failed 
and fall back to HTable.batch(). Got error="
 + ignord);
 }
 }
 }
 
-if (LOG.isDebugEnabled()) {
-LOG.debug("Writing index update:" + mutations + " 
to table: " + tableReference);
+if (LOG.isTraceEnabled()) {
+LOG.trace("Writing index update:" + mutations + " 
to table: " 

[1/3] phoenix git commit: PHOENIX-3801 Demote extremely verbose logs in ParallelWriterIndexCommitter from DEBUG to TRACE

2017-04-20 Thread apurtell
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 d18abb01f -> 452867b2c
  refs/heads/4.x-HBase-1.1 e3ea3b17a -> 785c4680e
  refs/heads/master 5bd7f79b5 -> 679ff21b7


PHOENIX-3801 Demote extremely verbose logs in ParallelWriterIndexCommitter from 
DEBUG to TRACE


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/452867b2
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/452867b2
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/452867b2

Branch: refs/heads/4.x-HBase-0.98
Commit: 452867b2c495ea975875513fc9bafe6f928deb07
Parents: d18abb0
Author: Andrew Purtell 
Authored: Wed Apr 19 18:43:51 2017 -0700
Committer: Andrew Purtell 
Committed: Thu Apr 20 18:17:11 2017 -0700

--
 .../hbase/index/write/ParallelWriterIndexCommitter.java  | 8 
 .../write/recovery/TrackingParallelWriterIndexCommitter.java | 8 
 2 files changed, 8 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/452867b2/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
index 7ef58a4..b912772 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
@@ -146,8 +146,8 @@ public class ParallelWriterIndexCommitter implements 
IndexCommitter {
 // early exit, if that's the case
 throwFailureIfDone();
 
-if (LOG.isDebugEnabled()) {
-LOG.debug("Writing index update:" + mutations + " to 
table: " + tableReference);
+if (LOG.isTraceEnabled()) {
+LOG.trace("Writing index update:" + mutations + " to 
table: " + tableReference);
 }
 HTableInterface table = null;
 try {
@@ -158,8 +158,8 @@ public class ParallelWriterIndexCommitter implements 
IndexCommitter {
 return null;
 } catch (IOException ignord) {
 // when it's failed we fall back to the 
standard & slow way
-if (LOG.isDebugEnabled()) {
-LOG.debug("indexRegion.batchMutate failed 
and fall back to HTable.batch(). Got error="
+if (LOG.isTraceEnabled()) {
+LOG.trace("indexRegion.batchMutate failed 
and fall back to HTable.batch(). Got error="
 + ignord);
 }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/452867b2/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
index 8aa3b78..3a3f32d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
@@ -149,8 +149,8 @@ public class TrackingParallelWriterIndexCommitter 
implements IndexCommitter {
 try {
 // this may have been queued, but there was an 
abort/stop so we try to early exit
 throwFailureIfDone();
-if (LOG.isDebugEnabled()) {
-LOG.debug("Writing index update:" + mutations + " 
to table: " + tableReference);
+if (LOG.isTraceEnabled()) {
+LOG.trace("Writing index update:" + mutations + " 
to table: " + tableReference);
 }
 if (allowLocalUpdates && env!=null && 
tableReference.getTableName().equals(
 env.getRegion().getTableDesc().getNameAsString())) 
{
@@ -160,8 +160,8 @@ public class TrackingParallelWriterIndexCommitter 
implements IndexCommitter {
 return Boolean.TRUE;

phoenix git commit: PHOENIX-3661 Make phoenix tool select file system dynamically (Yishan Yang) [Forced Update!]

2017-02-13 Thread apurtell
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.1 222388b03 -> 07df91700 (forced update)


PHOENIX-3661 Make phoenix tool select file system dynamically (Yishan Yang)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/07df9170
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/07df9170
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/07df9170

Branch: refs/heads/4.x-HBase-1.1
Commit: 07df9170080adf715612fe8739d6011f51ae8cb6
Parents: dbb0c1e
Author: Andrew Purtell 
Authored: Mon Feb 13 15:24:01 2017 -0800
Committer: Andrew Purtell 
Committed: Mon Feb 13 15:51:35 2017 -0800

--
 .../apache/phoenix/mapreduce/AbstractBulkLoadTool.java  |  2 +-
 .../phoenix/mapreduce/MultiHfileOutputFormat.java   |  2 +-
 .../org/apache/phoenix/mapreduce/index/IndexTool.java   | 12 
 3 files changed, 10 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/07df9170/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
index b32f9c6..f717647 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
@@ -331,7 +331,7 @@ public abstract class AbstractBulkLoadTool extends 
Configured implements Tool {
 LOG.info("Loading HFiles from {}", outputPath);
 completebulkload(conf,outputPath,tablesToBeLoaded);
 LOG.info("Removing output directory {}", outputPath);
-if(!FileSystem.get(conf).delete(outputPath, true)) {
+if(!outputPath.getFileSystem(conf).delete(outputPath, true)) {
 LOG.error("Failed to delete the output directory {}", 
outputPath);
 }
 return 0;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/07df9170/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
index 35a2bd8..da78fd5 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
@@ -455,8 +455,8 @@ public class MultiHfileOutputFormat extends 
FileOutputFormat

[3/4] phoenix git commit: PHOENIX-3661 Make phoenix tool select file system dynamically (Yishan Yang)

2017-02-13 Thread apurtell
PHOENIX-3661 Make phoenix tool select file system dynamically (Yishan Yang)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f48aa81a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f48aa81a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f48aa81a

Branch: refs/heads/4.x-HBase-0.98
Commit: f48aa81a02f5e8830dc821d23618f579453ab733
Parents: 234e427
Author: Andrew Purtell 
Authored: Mon Feb 13 15:24:01 2017 -0800
Committer: Andrew Purtell 
Committed: Mon Feb 13 15:25:37 2017 -0800

--
 .../apache/phoenix/mapreduce/AbstractBulkLoadTool.java  |  2 +-
 .../phoenix/mapreduce/MultiHfileOutputFormat.java   |  2 +-
 .../org/apache/phoenix/mapreduce/index/IndexTool.java   | 12 
 3 files changed, 10 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f48aa81a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
index f7b7d22..9cb54ef 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
@@ -328,7 +328,7 @@ public abstract class AbstractBulkLoadTool extends 
Configured implements Tool {
 LOG.info("Loading HFiles from {}", outputPath);
 completebulkload(conf,outputPath,tablesToBeLoaded);
 LOG.info("Removing output directory {}", outputPath);
-if(!FileSystem.get(conf).delete(outputPath, true)) {
+if(!outputPath.getFileSystem(conf).delete(outputPath, true)) {
 LOG.error("Failed to delete the output directory {}", 
outputPath);
 }
 return 0;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f48aa81a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
index f48a690..9c19a52 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
@@ -454,8 +454,8 @@ public class MultiHfileOutputFormat extends 
FileOutputFormat

[1/4] phoenix git commit: Amend PHOENIX-3611 ConnectionQueryService should expire LRU entries

2017-02-13 Thread apurtell
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 234e427b3 -> f48aa81a0
  refs/heads/4.x-HBase-1.1 dbb0c1ea0 -> 222388b03
  refs/heads/master 8f2d0fbc5 -> 7567fcd6d


Amend PHOENIX-3611 ConnectionQueryService should expire LRU entries

Signed-off-by: Andrew Purtell 

Do not enforce a maximum size on the client connection cache.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/beea861b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/beea861b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/beea861b

Branch: refs/heads/4.x-HBase-1.1
Commit: beea861be4dab1f2bcb61e7c97f1ac8de742af74
Parents: dbb0c1e
Author: gjacoby 
Authored: Wed Jan 25 13:49:26 2017 -0800
Committer: Andrew Purtell 
Committed: Mon Feb 13 15:22:59 2017 -0800

--
 .../src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java  | 3 ---
 .../src/main/java/org/apache/phoenix/query/QueryServices.java | 1 -
 .../main/java/org/apache/phoenix/query/QueryServicesOptions.java  | 1 -
 3 files changed, 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/beea861b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
index b2acacf..67ac9c9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
@@ -149,8 +149,6 @@ public final class PhoenixDriver extends 
PhoenixEmbeddedDriver {
 
 private Cache 
initializeConnectionCache() {
 Configuration config = 
HBaseFactoryProvider.getConfigurationFactory().getConfiguration();
-int maxCacheSize = 
config.getInt(QueryServices.CLIENT_CONNECTION_CACHE_MAX_SIZE,
-QueryServicesOptions.DEFAULT_CLIENT_CONNECTION_CACHE_MAX_SIZE);
 int maxCacheDuration = 
config.getInt(QueryServices.CLIENT_CONNECTION_CACHE_MAX_DURATION_MILLISECONDS,
 QueryServicesOptions.DEFAULT_CLIENT_CONNECTION_CACHE_MAX_DURATION);
 RemovalListener 
cacheRemovalListener =
@@ -170,7 +168,6 @@ public final class PhoenixDriver extends 
PhoenixEmbeddedDriver {
 }
 };
 return CacheBuilder.newBuilder()
-.maximumSize(maxCacheSize)
 .expireAfterAccess(maxCacheDuration, TimeUnit.MILLISECONDS)
 .removalListener(cacheRemovalListener)
 .build();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/beea861b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index 2035de8..0307e4c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -237,7 +237,6 @@ public interface QueryServices extends SQLCloseable {
 public static final String CLIENT_CACHE_ENCODING = 
"phoenix.table.client.cache.encoding";
 public static final String AUTO_UPGRADE_ENABLED = 
"phoenix.autoupgrade.enabled";
 
-public static final String CLIENT_CONNECTION_CACHE_MAX_SIZE = 
"phoenix.client.connection.cache.max.size";
 public static final String 
CLIENT_CONNECTION_CACHE_MAX_DURATION_MILLISECONDS =
 "phoenix.client.connection.max.duration";
 /**

http://git-wip-us.apache.org/repos/asf/phoenix/blob/beea861b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index de0796f..39a7d7e 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -265,7 +265,6 @@ public class QueryServicesOptions {
 
 public static final String DEFAULT_CLIENT_CACHE_ENCODING = 
PTableRefFactory.Encoding.OBJECT.toString();
 public static final boolean DEFAULT_AUTO_UPGRADE_ENABLED = true;
-public static final int DEFAULT_CLIENT_CONNECTION_CACHE_MAX_SIZE = 100;
 public static final int DEFAULT_CLIENT_CONNECTION_CACHE_MAX_DURATION = 
8640;
 
 

[4/4] phoenix git commit: PHOENIX-3661 Make phoenix tool select file system dynamically (Yishan Yang)

2017-02-13 Thread apurtell
PHOENIX-3661 Make phoenix tool select file system dynamically (Yishan Yang)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7567fcd6
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7567fcd6
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7567fcd6

Branch: refs/heads/master
Commit: 7567fcd6d569a2ece7556c4e3a966a1baf34c3a5
Parents: 8f2d0fb
Author: Andrew Purtell 
Authored: Mon Feb 13 15:24:01 2017 -0800
Committer: Andrew Purtell 
Committed: Mon Feb 13 15:26:17 2017 -0800

--
 .../apache/phoenix/mapreduce/AbstractBulkLoadTool.java  |  2 +-
 .../phoenix/mapreduce/MultiHfileOutputFormat.java   |  2 +-
 .../org/apache/phoenix/mapreduce/index/IndexTool.java   | 12 
 3 files changed, 10 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/7567fcd6/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
index b32f9c6..f717647 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
@@ -331,7 +331,7 @@ public abstract class AbstractBulkLoadTool extends 
Configured implements Tool {
 LOG.info("Loading HFiles from {}", outputPath);
 completebulkload(conf,outputPath,tablesToBeLoaded);
 LOG.info("Removing output directory {}", outputPath);
-if(!FileSystem.get(conf).delete(outputPath, true)) {
+if(!outputPath.getFileSystem(conf).delete(outputPath, true)) {
 LOG.error("Failed to delete the output directory {}", 
outputPath);
 }
 return 0;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7567fcd6/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
index 35a2bd8..da78fd5 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
@@ -455,8 +455,8 @@ public class MultiHfileOutputFormat extends 
FileOutputFormat

[2/4] phoenix git commit: PHOENIX-3661 Make phoenix tool select file system dynamically (Yishan Yang)

2017-02-13 Thread apurtell
PHOENIX-3661 Make phoenix tool select file system dynamically (Yishan Yang)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/222388b0
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/222388b0
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/222388b0

Branch: refs/heads/4.x-HBase-1.1
Commit: 222388b03415caad37d858d1cd91fe79be571787
Parents: beea861
Author: Andrew Purtell 
Authored: Mon Feb 13 15:24:01 2017 -0800
Committer: Andrew Purtell 
Committed: Mon Feb 13 15:25:30 2017 -0800

--
 .../apache/phoenix/mapreduce/AbstractBulkLoadTool.java  |  2 +-
 .../phoenix/mapreduce/MultiHfileOutputFormat.java   |  2 +-
 .../org/apache/phoenix/mapreduce/index/IndexTool.java   | 12 
 3 files changed, 10 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/222388b0/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
index b32f9c6..f717647 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
@@ -331,7 +331,7 @@ public abstract class AbstractBulkLoadTool extends 
Configured implements Tool {
 LOG.info("Loading HFiles from {}", outputPath);
 completebulkload(conf,outputPath,tablesToBeLoaded);
 LOG.info("Removing output directory {}", outputPath);
-if(!FileSystem.get(conf).delete(outputPath, true)) {
+if(!outputPath.getFileSystem(conf).delete(outputPath, true)) {
 LOG.error("Failed to delete the output directory {}", 
outputPath);
 }
 return 0;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/222388b0/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
index 35a2bd8..da78fd5 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
@@ -455,8 +455,8 @@ public class MultiHfileOutputFormat extends 
FileOutputFormat

phoenix git commit: Amend PHOENIX-3611 ConnectionQueryService should expire LRU entries

2017-01-25 Thread apurtell
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 d971192c5 -> cd8f0535b


Amend PHOENIX-3611 ConnectionQueryService should expire LRU entries

Signed-off-by: Andrew Purtell 

Do not enforce a maximum size on the client connection cache.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/cd8f0535
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/cd8f0535
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/cd8f0535

Branch: refs/heads/4.x-HBase-0.98
Commit: cd8f0535b6e52635a375a72f800a36ab5a0e292b
Parents: d971192
Author: gjacoby 
Authored: Wed Jan 25 13:49:26 2017 -0800
Committer: Andrew Purtell 
Committed: Wed Jan 25 19:10:01 2017 -0800

--
 .../src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java  | 3 ---
 .../src/main/java/org/apache/phoenix/query/QueryServices.java | 1 -
 .../main/java/org/apache/phoenix/query/QueryServicesOptions.java  | 1 -
 3 files changed, 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/cd8f0535/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
index ba06ed9..f90e5ec 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
@@ -149,8 +149,6 @@ public final class PhoenixDriver extends 
PhoenixEmbeddedDriver {
 
 private Cache 
initializeConnectionCache() {
 Configuration config = 
HBaseFactoryProvider.getConfigurationFactory().getConfiguration();
-int maxCacheSize = 
config.getInt(QueryServices.CLIENT_CONNECTION_CACHE_MAX_SIZE,
-QueryServicesOptions.DEFAULT_CLIENT_CONNECTION_CACHE_MAX_SIZE);
 int maxCacheDuration = 
config.getInt(QueryServices.CLIENT_CONNECTION_CACHE_MAX_DURATION_MILLISECONDS,
 QueryServicesOptions.DEFAULT_CLIENT_CONNECTION_CACHE_MAX_DURATION);
 RemovalListener 
cacheRemovalListener =
@@ -170,7 +168,6 @@ public final class PhoenixDriver extends 
PhoenixEmbeddedDriver {
 }
 };
 return CacheBuilder.newBuilder()
-.maximumSize(maxCacheSize)
 .expireAfterAccess(maxCacheDuration, TimeUnit.MILLISECONDS)
 .removalListener(cacheRemovalListener)
 .build();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/cd8f0535/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index e77e01f..dc949ea 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -233,7 +233,6 @@ public interface QueryServices extends SQLCloseable {
 public static final String CLIENT_CACHE_ENCODING = 
"phoenix.table.client.cache.encoding";
 public static final String AUTO_UPGRADE_ENABLED = 
"phoenix.autoupgrade.enabled";
 
-public static final String CLIENT_CONNECTION_CACHE_MAX_SIZE = 
"phoenix.client.connection.cache.max.size";
 public static final String 
CLIENT_CONNECTION_CACHE_MAX_DURATION_MILLISECONDS =
 "phoenix.client.connection.max.duration";
 /**

http://git-wip-us.apache.org/repos/asf/phoenix/blob/cd8f0535/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 13fb9ea..a15009a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -259,7 +259,6 @@ public class QueryServicesOptions {
 
 public static final String DEFAULT_CLIENT_CACHE_ENCODING = 
PTableRefFactory.Encoding.OBJECT.toString();
 public static final boolean DEFAULT_AUTO_UPGRADE_ENABLED = true;
-public static final int DEFAULT_CLIENT_CONNECTION_CACHE_MAX_SIZE = 100;
 public static final int DEFAULT_CLIENT_CONNECTION_CACHE_MAX_DURATION = 
8640;
 
 @SuppressWarnings("serial")



[3/3] phoenix git commit: PHOENIX-3611 Cache for client connections will expire (and close) entries in LRU fashion.

2017-01-20 Thread apurtell
PHOENIX-3611 Cache for client connections will expire (and close) entries in 
LRU fashion.

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/badb9b40
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/badb9b40
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/badb9b40

Branch: refs/heads/4.x-HBase-0.98
Commit: badb9b40b67e1dfc6b1bba1b368aa0ea461773f7
Parents: 2fd9b08
Author: Geoffrey 
Authored: Thu Jan 19 16:08:20 2017 -0800
Committer: Andrew Purtell 
Committed: Fri Jan 20 16:18:02 2017 -0800

--
 .../org/apache/phoenix/jdbc/PhoenixDriver.java  | 99 +---
 .../org/apache/phoenix/query/QueryServices.java |  5 +-
 .../phoenix/query/QueryServicesOptions.java |  2 +
 3 files changed, 70 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/badb9b40/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
index 1fb827c..ba06ed9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
@@ -23,20 +23,13 @@ import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.SQLException;
 import java.util.Properties;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
+import java.util.concurrent.*;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import javax.annotation.concurrent.GuardedBy;
 
+import com.google.common.cache.*;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
@@ -50,7 +43,6 @@ import org.apache.phoenix.query.QueryServicesOptions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 
@@ -147,13 +139,43 @@ public final class PhoenixDriver extends 
PhoenixEmbeddedDriver {
 }
 
 // One entry per cluster here
-private final ConcurrentMap 
connectionQueryServicesMap = new 
ConcurrentHashMap(3);
+private final Cache 
connectionQueryServicesCache =
+initializeConnectionCache();
 
 public PhoenixDriver() { // for Squirrel
 // Use production services implementation
 super();
 }
-
+
+private Cache 
initializeConnectionCache() {
+Configuration config = 
HBaseFactoryProvider.getConfigurationFactory().getConfiguration();
+int maxCacheSize = 
config.getInt(QueryServices.CLIENT_CONNECTION_CACHE_MAX_SIZE,
+QueryServicesOptions.DEFAULT_CLIENT_CONNECTION_CACHE_MAX_SIZE);
+int maxCacheDuration = 
config.getInt(QueryServices.CLIENT_CONNECTION_CACHE_MAX_DURATION_MILLISECONDS,
+QueryServicesOptions.DEFAULT_CLIENT_CONNECTION_CACHE_MAX_DURATION);
+RemovalListener 
cacheRemovalListener =
+new RemovalListener() {
+@Override
+public void onRemoval(RemovalNotification notification) {
+String connInfoIdentifier = 
notification.getKey().toString();
+logger.debug("Expiring " + connInfoIdentifier + " because 
of "
++ notification.getCause().name());
+
+try {
+notification.getValue().close();
+}
+catch (SQLException se) {
+logger.error("Error while closing expired cache 
connection " + connInfoIdentifier, se);
+}
+}
+};
+return CacheBuilder.newBuilder()
+.maximumSize(maxCacheSize)
+.expireAfterAccess(maxCacheDuration, TimeUnit.MILLISECONDS)
+.removalListener(cacheRemovalListener)
+   

[2/3] phoenix git commit: PHOENIX-3611 Cache for client connections will expire (and close) entries in LRU fashion.

2017-01-20 Thread apurtell
PHOENIX-3611 Cache for client connections will expire (and close) entries in 
LRU fashion.

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d75458fe
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d75458fe
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d75458fe

Branch: refs/heads/4.x-HBase-1.1
Commit: d75458fee60fc16661b9394a9e93824d8c3c0363
Parents: 59e5115
Author: Geoffrey 
Authored: Thu Jan 19 16:08:20 2017 -0800
Committer: Andrew Purtell 
Committed: Fri Jan 20 16:17:19 2017 -0800

--
 .../org/apache/phoenix/jdbc/PhoenixDriver.java  | 99 +---
 .../org/apache/phoenix/query/QueryServices.java |  5 +-
 .../phoenix/query/QueryServicesOptions.java |  2 +
 3 files changed, 70 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d75458fe/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
index fa31dd9..b2acacf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
@@ -23,20 +23,13 @@ import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.SQLException;
 import java.util.Properties;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
+import java.util.concurrent.*;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import javax.annotation.concurrent.GuardedBy;
 
+import com.google.common.cache.*;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
@@ -50,7 +43,6 @@ import org.apache.phoenix.query.QueryServicesOptions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 
@@ -147,13 +139,43 @@ public final class PhoenixDriver extends 
PhoenixEmbeddedDriver {
 }
 
 // One entry per cluster here
-private final ConcurrentMap 
connectionQueryServicesMap = new 
ConcurrentHashMap(3);
+private final Cache 
connectionQueryServicesCache =
+initializeConnectionCache();
 
 public PhoenixDriver() { // for Squirrel
 // Use production services implementation
 super();
 }
-
+
+private Cache 
initializeConnectionCache() {
+Configuration config = 
HBaseFactoryProvider.getConfigurationFactory().getConfiguration();
+int maxCacheSize = 
config.getInt(QueryServices.CLIENT_CONNECTION_CACHE_MAX_SIZE,
+QueryServicesOptions.DEFAULT_CLIENT_CONNECTION_CACHE_MAX_SIZE);
+int maxCacheDuration = 
config.getInt(QueryServices.CLIENT_CONNECTION_CACHE_MAX_DURATION_MILLISECONDS,
+QueryServicesOptions.DEFAULT_CLIENT_CONNECTION_CACHE_MAX_DURATION);
+RemovalListener 
cacheRemovalListener =
+new RemovalListener() {
+@Override
+public void onRemoval(RemovalNotification notification) {
+String connInfoIdentifier = 
notification.getKey().toString();
+logger.debug("Expiring " + connInfoIdentifier + " because 
of "
++ notification.getCause().name());
+
+try {
+notification.getValue().close();
+}
+catch (SQLException se) {
+logger.error("Error while closing expired cache 
connection " + connInfoIdentifier, se);
+}
+}
+};
+return CacheBuilder.newBuilder()
+.maximumSize(maxCacheSize)
+.expireAfterAccess(maxCacheDuration, TimeUnit.MILLISECONDS)
+.removalListener(cacheRemovalListener)
+

[1/3] phoenix git commit: PHOENIX-3611 Cache for client connections will expire (and close) entries in LRU fashion.

2017-01-20 Thread apurtell
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 2fd9b0861 -> badb9b40b
  refs/heads/4.x-HBase-1.1 59e5115c1 -> d75458fee
  refs/heads/master 4e4f7ddba -> 9b7f3ca5b


PHOENIX-3611 Cache for client connections will expire (and close) entries in 
LRU fashion.

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9b7f3ca5
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9b7f3ca5
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9b7f3ca5

Branch: refs/heads/master
Commit: 9b7f3ca5b2e6bbf0757d99fbe48b3065e79d6066
Parents: 4e4f7dd
Author: Geoffrey 
Authored: Thu Jan 19 16:08:20 2017 -0800
Committer: Andrew Purtell 
Committed: Fri Jan 20 16:16:50 2017 -0800

--
 .../org/apache/phoenix/jdbc/PhoenixDriver.java  | 99 +---
 .../org/apache/phoenix/query/QueryServices.java |  5 +-
 .../phoenix/query/QueryServicesOptions.java |  2 +
 3 files changed, 70 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9b7f3ca5/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
index fa31dd9..b2acacf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
@@ -23,20 +23,13 @@ import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.SQLException;
 import java.util.Properties;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
+import java.util.concurrent.*;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import javax.annotation.concurrent.GuardedBy;
 
+import com.google.common.cache.*;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
@@ -50,7 +43,6 @@ import org.apache.phoenix.query.QueryServicesOptions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 
@@ -147,13 +139,43 @@ public final class PhoenixDriver extends 
PhoenixEmbeddedDriver {
 }
 
 // One entry per cluster here
-private final ConcurrentMap 
connectionQueryServicesMap = new 
ConcurrentHashMap(3);
+private final Cache 
connectionQueryServicesCache =
+initializeConnectionCache();
 
 public PhoenixDriver() { // for Squirrel
 // Use production services implementation
 super();
 }
-
+
+private Cache 
initializeConnectionCache() {
+Configuration config = 
HBaseFactoryProvider.getConfigurationFactory().getConfiguration();
+int maxCacheSize = 
config.getInt(QueryServices.CLIENT_CONNECTION_CACHE_MAX_SIZE,
+QueryServicesOptions.DEFAULT_CLIENT_CONNECTION_CACHE_MAX_SIZE);
+int maxCacheDuration = 
config.getInt(QueryServices.CLIENT_CONNECTION_CACHE_MAX_DURATION_MILLISECONDS,
+QueryServicesOptions.DEFAULT_CLIENT_CONNECTION_CACHE_MAX_DURATION);
+RemovalListener 
cacheRemovalListener =
+new RemovalListener() {
+@Override
+public void onRemoval(RemovalNotification notification) {
+String connInfoIdentifier = 
notification.getKey().toString();
+logger.debug("Expiring " + connInfoIdentifier + " because 
of "
++ notification.getCause().name());
+
+try {
+notification.getValue().close();
+}
+catch (SQLException se) {
+logger.error("Error while closing expired cache 
connection " + connInfoIdentifier, se);
+}
+}
+};
+return CacheBuilder.newBuilder()

[4/6] phoenix git commit: PHOENIX-3563 Ensure we release ZooKeeper resources allocated by the Tephra client embedded in the Phoenix connection

2017-01-08 Thread apurtell
PHOENIX-3563 Ensure we release ZooKeeper resources allocated by the Tephra 
client embedded in the Phoenix connection


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f34a605a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f34a605a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f34a605a

Branch: refs/heads/4.9-HBase-1.2
Commit: f34a605a3934cb41bec8e06244992a82fd895b71
Parents: fb504f2
Author: Andrew Purtell 
Authored: Wed Jan 4 16:48:44 2017 -0800
Committer: Andrew Purtell 
Committed: Sat Jan 7 18:53:33 2017 -0800

--
 .../query/ConnectionQueryServicesImpl.java  | 37 
 1 file changed, 23 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f34a605a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index a2b32a8..f82e77f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -254,6 +254,7 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 private final boolean returnSequenceValues ;
 
 private HConnection connection;
+private ZKClientService txZKClientService;
 private TransactionServiceClient txServiceClient;
 private volatile boolean initialized;
 private volatile int nSequenceSaltBuckets;
@@ -370,15 +371,16 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 
 int timeOut = props.getInt(HConstants.ZK_SESSION_TIMEOUT, 
HConstants.DEFAULT_ZK_SESSION_TIMEOUT);
 // Create instance of the tephra zookeeper client
-ZKClientService tephraZKClientService = new 
TephraZKClientService(zkQuorumServersString, timeOut, null, 
ArrayListMultimap.create());
-
-ZKClientService zkClientService = ZKClientServices.delegate(
-ZKClients.reWatchOnExpire(
-ZKClients.retryOnFailure(tephraZKClientService, 
RetryStrategies.exponentialDelay(500, 2000, TimeUnit.MILLISECONDS))
-)
+txZKClientService = ZKClientServices.delegate(
+ZKClients.reWatchOnExpire(
+ZKClients.retryOnFailure(
+ new TephraZKClientService(zkQuorumServersString, timeOut, 
null,
+ ArrayListMultimap.create()), 
+ RetryStrategies.exponentialDelay(500, 2000, 
TimeUnit.MILLISECONDS))
+ )
 );
-zkClientService.startAndWait();
-ZKDiscoveryService zkDiscoveryService = new 
ZKDiscoveryService(zkClientService);
+txZKClientService.startAndWait();
+ZKDiscoveryService zkDiscoveryService = new 
ZKDiscoveryService(txZKClientService);
 PooledClientProvider pooledClientProvider = new PooledClientProvider(
 config, zkDiscoveryService);
 this.txServiceClient = new 
TransactionServiceClient(config,pooledClientProvider);
@@ -389,11 +391,12 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 boolean transactionsEnabled = props.getBoolean(
 QueryServices.TRANSACTIONS_ENABLED,
 QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED);
-// only initialize the tx service client if needed
+this.connection = 
HBaseFactoryProvider.getHConnectionFactory().createConnection(this.config);
+// only initialize the tx service client if needed and if we 
succeeded in getting a connection
+// to HBase
 if (transactionsEnabled) {
 initTxServiceClient();
 }
-this.connection = 
HBaseFactoryProvider.getHConnectionFactory().createConnection(this.config);
 } catch (IOException e) {
 throw new 
SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION)
 .setRootCause(e).build().buildException();
@@ -463,14 +466,20 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 } finally {
 try {
 childServices.clear();
-if (renewLeaseExecutor != null) {
-renewLeaseExecutor.shutdownNow();
-}
 synchronized (latestMetaDataLock) {
 latestMetaData 

[3/6] phoenix git commit: PHOENIX-3563 Ensure we release ZooKeeper resources allocated by the Tephra client embedded in the Phoenix connection

2017-01-08 Thread apurtell
PHOENIX-3563 Ensure we release ZooKeeper resources allocated by the Tephra 
client embedded in the Phoenix connection


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b69b177b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b69b177b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b69b177b

Branch: refs/heads/4.x-HBase-0.98
Commit: b69b177b3f5e39d1fa1c3300acfed9290cbe5c52
Parents: 91d1478
Author: Andrew Purtell 
Authored: Wed Jan 4 16:48:44 2017 -0800
Committer: Andrew Purtell 
Committed: Sat Jan 7 18:52:59 2017 -0800

--
 .../query/ConnectionQueryServicesImpl.java  | 37 
 1 file changed, 23 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b69b177b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index f1de0bd..c1688c4 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -255,6 +255,7 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 private final boolean returnSequenceValues ;
 
 private HConnection connection;
+private ZKClientService txZKClientService;
 private TransactionServiceClient txServiceClient;
 private volatile boolean initialized;
 private volatile int nSequenceSaltBuckets;
@@ -371,15 +372,16 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 
 int timeOut = props.getInt(HConstants.ZK_SESSION_TIMEOUT, 
HConstants.DEFAULT_ZK_SESSION_TIMEOUT);
 // Create instance of the tephra zookeeper client
-ZKClientService tephraZKClientService = new 
TephraZKClientService(zkQuorumServersString, timeOut, null, 
ArrayListMultimap.create());
-
-ZKClientService zkClientService = ZKClientServices.delegate(
-ZKClients.reWatchOnExpire(
-ZKClients.retryOnFailure(tephraZKClientService, 
RetryStrategies.exponentialDelay(500, 2000, TimeUnit.MILLISECONDS))
-)
+txZKClientService = ZKClientServices.delegate(
+ZKClients.reWatchOnExpire(
+ZKClients.retryOnFailure(
+ new TephraZKClientService(zkQuorumServersString, timeOut, 
null,
+ ArrayListMultimap.create()), 
+ RetryStrategies.exponentialDelay(500, 2000, 
TimeUnit.MILLISECONDS))
+ )
 );
-zkClientService.startAndWait();
-ZKDiscoveryService zkDiscoveryService = new 
ZKDiscoveryService(zkClientService);
+txZKClientService.startAndWait();
+ZKDiscoveryService zkDiscoveryService = new 
ZKDiscoveryService(txZKClientService);
 PooledClientProvider pooledClientProvider = new PooledClientProvider(
 config, zkDiscoveryService);
 this.txServiceClient = new 
TransactionServiceClient(config,pooledClientProvider);
@@ -390,11 +392,12 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 boolean transactionsEnabled = props.getBoolean(
 QueryServices.TRANSACTIONS_ENABLED,
 QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED);
-// only initialize the tx service client if needed
+this.connection = 
HBaseFactoryProvider.getHConnectionFactory().createConnection(this.config);
+// only initialize the tx service client if needed and if we 
succeeded in getting a connection
+// to HBase
 if (transactionsEnabled) {
 initTxServiceClient();
 }
-this.connection = 
HBaseFactoryProvider.getHConnectionFactory().createConnection(this.config);
 } catch (IOException e) {
 throw new 
SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION)
 .setRootCause(e).build().buildException();
@@ -464,14 +467,20 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 } finally {
 try {
 childServices.clear();
-if (renewLeaseExecutor != null) {
-renewLeaseExecutor.shutdownNow();
-}
 synchronized (latestMetaDataLock) {
 

[1/6] phoenix git commit: PHOENIX-3563 Ensure we release ZooKeeper resources allocated by the Tephra client embedded in the Phoenix connection

2017-01-08 Thread apurtell
Repository: phoenix
Updated Branches:
  refs/heads/4.9-HBase-0.98 4340dadb2 -> 3592782ae
  refs/heads/4.9-HBase-1.1 397fff999 -> 87cf11434
  refs/heads/4.9-HBase-1.2 fb504f2d7 -> f34a605a3
  refs/heads/4.x-HBase-0.98 91d1478cf -> b69b177b3
  refs/heads/4.x-HBase-1.1 e24ec6a8e -> cdc77dfcb
  refs/heads/master a164f0327 -> d8f459498


PHOENIX-3563 Ensure we release ZooKeeper resources allocated by the Tephra 
client embedded in the Phoenix connection


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d8f45949
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d8f45949
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d8f45949

Branch: refs/heads/master
Commit: d8f4594989c0b73945aaffec5649a0b62ac59724
Parents: a164f03
Author: Andrew Purtell 
Authored: Wed Jan 4 16:48:44 2017 -0800
Committer: Andrew Purtell 
Committed: Sat Jan 7 10:03:23 2017 -0800

--
 .../query/ConnectionQueryServicesImpl.java  | 37 
 1 file changed, 23 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d8f45949/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index f66b358..be34f66 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -254,6 +254,7 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 private final boolean returnSequenceValues ;
 
 private HConnection connection;
+private ZKClientService txZKClientService;
 private TransactionServiceClient txServiceClient;
 private volatile boolean initialized;
 private volatile int nSequenceSaltBuckets;
@@ -370,15 +371,16 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 
 int timeOut = props.getInt(HConstants.ZK_SESSION_TIMEOUT, 
HConstants.DEFAULT_ZK_SESSION_TIMEOUT);
 // Create instance of the tephra zookeeper client
-ZKClientService tephraZKClientService = new 
TephraZKClientService(zkQuorumServersString, timeOut, null, 
ArrayListMultimap.create());
-
-ZKClientService zkClientService = ZKClientServices.delegate(
-ZKClients.reWatchOnExpire(
-ZKClients.retryOnFailure(tephraZKClientService, 
RetryStrategies.exponentialDelay(500, 2000, TimeUnit.MILLISECONDS))
-)
+txZKClientService = ZKClientServices.delegate(
+ZKClients.reWatchOnExpire(
+ZKClients.retryOnFailure(
+ new TephraZKClientService(zkQuorumServersString, timeOut, 
null,
+ ArrayListMultimap.create()), 
+ RetryStrategies.exponentialDelay(500, 2000, 
TimeUnit.MILLISECONDS))
+ )
 );
-zkClientService.startAndWait();
-ZKDiscoveryService zkDiscoveryService = new 
ZKDiscoveryService(zkClientService);
+txZKClientService.startAndWait();
+ZKDiscoveryService zkDiscoveryService = new 
ZKDiscoveryService(txZKClientService);
 PooledClientProvider pooledClientProvider = new PooledClientProvider(
 config, zkDiscoveryService);
 this.txServiceClient = new 
TransactionServiceClient(config,pooledClientProvider);
@@ -389,11 +391,12 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 boolean transactionsEnabled = props.getBoolean(
 QueryServices.TRANSACTIONS_ENABLED,
 QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED);
-// only initialize the tx service client if needed
+this.connection = 
HBaseFactoryProvider.getHConnectionFactory().createConnection(this.config);
+// only initialize the tx service client if needed and if we 
succeeded in getting a connection
+// to HBase
 if (transactionsEnabled) {
 initTxServiceClient();
 }
-this.connection = 
HBaseFactoryProvider.getHConnectionFactory().createConnection(this.config);
 } catch (IOException e) {
 throw new 
SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION)
 .setRootCause(e).build().buildException();
@@ -463,14 +466,20 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices 

[5/6] phoenix git commit: PHOENIX-3563 Ensure we release ZooKeeper resources allocated by the Tephra client embedded in the Phoenix connection

2017-01-08 Thread apurtell
PHOENIX-3563 Ensure we release ZooKeeper resources allocated by the Tephra 
client embedded in the Phoenix connection


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/87cf1143
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/87cf1143
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/87cf1143

Branch: refs/heads/4.9-HBase-1.1
Commit: 87cf11434823c4fe325f340f6d19f7604bbf27a6
Parents: 397fff9
Author: Andrew Purtell 
Authored: Wed Jan 4 16:48:44 2017 -0800
Committer: Andrew Purtell 
Committed: Sat Jan 7 18:53:38 2017 -0800

--
 .../query/ConnectionQueryServicesImpl.java  | 37 
 1 file changed, 23 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/87cf1143/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index eb26230..30d3a1a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -254,6 +254,7 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 private final boolean returnSequenceValues ;
 
 private HConnection connection;
+private ZKClientService txZKClientService;
 private TransactionServiceClient txServiceClient;
 private volatile boolean initialized;
 private volatile int nSequenceSaltBuckets;
@@ -370,15 +371,16 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 
 int timeOut = props.getInt(HConstants.ZK_SESSION_TIMEOUT, 
HConstants.DEFAULT_ZK_SESSION_TIMEOUT);
 // Create instance of the tephra zookeeper client
-ZKClientService tephraZKClientService = new 
TephraZKClientService(zkQuorumServersString, timeOut, null, 
ArrayListMultimap.create());
-
-ZKClientService zkClientService = ZKClientServices.delegate(
-ZKClients.reWatchOnExpire(
-ZKClients.retryOnFailure(tephraZKClientService, 
RetryStrategies.exponentialDelay(500, 2000, TimeUnit.MILLISECONDS))
-)
+txZKClientService = ZKClientServices.delegate(
+ZKClients.reWatchOnExpire(
+ZKClients.retryOnFailure(
+ new TephraZKClientService(zkQuorumServersString, timeOut, 
null,
+ ArrayListMultimap.create()), 
+ RetryStrategies.exponentialDelay(500, 2000, 
TimeUnit.MILLISECONDS))
+ )
 );
-zkClientService.startAndWait();
-ZKDiscoveryService zkDiscoveryService = new 
ZKDiscoveryService(zkClientService);
+txZKClientService.startAndWait();
+ZKDiscoveryService zkDiscoveryService = new 
ZKDiscoveryService(txZKClientService);
 PooledClientProvider pooledClientProvider = new PooledClientProvider(
 config, zkDiscoveryService);
 this.txServiceClient = new 
TransactionServiceClient(config,pooledClientProvider);
@@ -389,11 +391,12 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 boolean transactionsEnabled = props.getBoolean(
 QueryServices.TRANSACTIONS_ENABLED,
 QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED);
-// only initialize the tx service client if needed
+this.connection = 
HBaseFactoryProvider.getHConnectionFactory().createConnection(this.config);
+// only initialize the tx service client if needed and if we 
succeeded in getting a connection
+// to HBase
 if (transactionsEnabled) {
 initTxServiceClient();
 }
-this.connection = 
HBaseFactoryProvider.getHConnectionFactory().createConnection(this.config);
 } catch (IOException e) {
 throw new 
SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION)
 .setRootCause(e).build().buildException();
@@ -463,14 +466,20 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 } finally {
 try {
 childServices.clear();
-if (renewLeaseExecutor != null) {
-renewLeaseExecutor.shutdownNow();
-}
 synchronized (latestMetaDataLock) {
 latestMetaData 

[2/6] phoenix git commit: PHOENIX-3563 Ensure we release ZooKeeper resources allocated by the Tephra client embedded in the Phoenix connection

2017-01-08 Thread apurtell
PHOENIX-3563 Ensure we release ZooKeeper resources allocated by the Tephra 
client embedded in the Phoenix connection


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/cdc77dfc
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/cdc77dfc
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/cdc77dfc

Branch: refs/heads/4.x-HBase-1.1
Commit: cdc77dfcb1a63f5ccb64cf95cdcee914c8803933
Parents: e24ec6a
Author: Andrew Purtell 
Authored: Wed Jan 4 16:48:44 2017 -0800
Committer: Andrew Purtell 
Committed: Sat Jan 7 10:04:22 2017 -0800

--
 .../query/ConnectionQueryServicesImpl.java  | 37 
 1 file changed, 23 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/cdc77dfc/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 16350e5..21acd5b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -254,6 +254,7 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 private final boolean returnSequenceValues ;
 
 private HConnection connection;
+private ZKClientService txZKClientService;
 private TransactionServiceClient txServiceClient;
 private volatile boolean initialized;
 private volatile int nSequenceSaltBuckets;
@@ -370,15 +371,16 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 
 int timeOut = props.getInt(HConstants.ZK_SESSION_TIMEOUT, 
HConstants.DEFAULT_ZK_SESSION_TIMEOUT);
 // Create instance of the tephra zookeeper client
-ZKClientService tephraZKClientService = new 
TephraZKClientService(zkQuorumServersString, timeOut, null, 
ArrayListMultimap.create());
-
-ZKClientService zkClientService = ZKClientServices.delegate(
-ZKClients.reWatchOnExpire(
-ZKClients.retryOnFailure(tephraZKClientService, 
RetryStrategies.exponentialDelay(500, 2000, TimeUnit.MILLISECONDS))
-)
+txZKClientService = ZKClientServices.delegate(
+ZKClients.reWatchOnExpire(
+ZKClients.retryOnFailure(
+ new TephraZKClientService(zkQuorumServersString, timeOut, 
null,
+ ArrayListMultimap.create()), 
+ RetryStrategies.exponentialDelay(500, 2000, 
TimeUnit.MILLISECONDS))
+ )
 );
-zkClientService.startAndWait();
-ZKDiscoveryService zkDiscoveryService = new 
ZKDiscoveryService(zkClientService);
+txZKClientService.startAndWait();
+ZKDiscoveryService zkDiscoveryService = new 
ZKDiscoveryService(txZKClientService);
 PooledClientProvider pooledClientProvider = new PooledClientProvider(
 config, zkDiscoveryService);
 this.txServiceClient = new 
TransactionServiceClient(config,pooledClientProvider);
@@ -389,11 +391,12 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 boolean transactionsEnabled = props.getBoolean(
 QueryServices.TRANSACTIONS_ENABLED,
 QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED);
-// only initialize the tx service client if needed
+this.connection = 
HBaseFactoryProvider.getHConnectionFactory().createConnection(this.config);
+// only initialize the tx service client if needed and if we 
succeeded in getting a connection
+// to HBase
 if (transactionsEnabled) {
 initTxServiceClient();
 }
-this.connection = 
HBaseFactoryProvider.getHConnectionFactory().createConnection(this.config);
 } catch (IOException e) {
 throw new 
SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION)
 .setRootCause(e).build().buildException();
@@ -463,14 +466,20 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 } finally {
 try {
 childServices.clear();
-if (renewLeaseExecutor != null) {
-renewLeaseExecutor.shutdownNow();
-}
 synchronized (latestMetaDataLock) {
 latestMetaData 

[6/6] phoenix git commit: PHOENIX-3563 Ensure we release ZooKeeper resources allocated by the Tephra client embedded in the Phoenix connection

2017-01-08 Thread apurtell
PHOENIX-3563 Ensure we release ZooKeeper resources allocated by the Tephra 
client embedded in the Phoenix connection


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3592782a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3592782a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3592782a

Branch: refs/heads/4.9-HBase-0.98
Commit: 3592782ae2c337b696a879bc2f6610d280729dce
Parents: 4340dad
Author: Andrew Purtell 
Authored: Wed Jan 4 16:48:44 2017 -0800
Committer: Andrew Purtell 
Committed: Sat Jan 7 18:54:14 2017 -0800

--
 .../query/ConnectionQueryServicesImpl.java  | 37 
 1 file changed, 23 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/3592782a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index fc0925d..0a1333f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -255,6 +255,7 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 private final boolean returnSequenceValues ;
 
 private HConnection connection;
+private ZKClientService txZKClientService;
 private TransactionServiceClient txServiceClient;
 private volatile boolean initialized;
 private volatile int nSequenceSaltBuckets;
@@ -371,15 +372,16 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 
 int timeOut = props.getInt(HConstants.ZK_SESSION_TIMEOUT, 
HConstants.DEFAULT_ZK_SESSION_TIMEOUT);
 // Create instance of the tephra zookeeper client
-ZKClientService tephraZKClientService = new 
TephraZKClientService(zkQuorumServersString, timeOut, null, 
ArrayListMultimap.create());
-
-ZKClientService zkClientService = ZKClientServices.delegate(
-ZKClients.reWatchOnExpire(
-ZKClients.retryOnFailure(tephraZKClientService, 
RetryStrategies.exponentialDelay(500, 2000, TimeUnit.MILLISECONDS))
-)
+txZKClientService = ZKClientServices.delegate(
+ZKClients.reWatchOnExpire(
+ZKClients.retryOnFailure(
+ new TephraZKClientService(zkQuorumServersString, timeOut, 
null,
+ ArrayListMultimap.create()), 
+ RetryStrategies.exponentialDelay(500, 2000, 
TimeUnit.MILLISECONDS))
+ )
 );
-zkClientService.startAndWait();
-ZKDiscoveryService zkDiscoveryService = new 
ZKDiscoveryService(zkClientService);
+txZKClientService.startAndWait();
+ZKDiscoveryService zkDiscoveryService = new 
ZKDiscoveryService(txZKClientService);
 PooledClientProvider pooledClientProvider = new PooledClientProvider(
 config, zkDiscoveryService);
 this.txServiceClient = new 
TransactionServiceClient(config,pooledClientProvider);
@@ -390,11 +392,12 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 boolean transactionsEnabled = props.getBoolean(
 QueryServices.TRANSACTIONS_ENABLED,
 QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED);
-// only initialize the tx service client if needed
+this.connection = 
HBaseFactoryProvider.getHConnectionFactory().createConnection(this.config);
+// only initialize the tx service client if needed and if we 
succeeded in getting a connection
+// to HBase
 if (transactionsEnabled) {
 initTxServiceClient();
 }
-this.connection = 
HBaseFactoryProvider.getHConnectionFactory().createConnection(this.config);
 } catch (IOException e) {
 throw new 
SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION)
 .setRootCause(e).build().buildException();
@@ -464,14 +467,20 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 } finally {
 try {
 childServices.clear();
-if (renewLeaseExecutor != null) {
-renewLeaseExecutor.shutdownNow();
-}
 synchronized (latestMetaDataLock) {
 

[3/4] phoenix git commit: PHOENIX-3037 Setup proper security context in compaction/split coprocessor hooks

2016-06-30 Thread apurtell
PHOENIX-3037 Setup proper security context in compaction/split coprocessor hooks


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9cddc463
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9cddc463
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9cddc463

Branch: refs/heads/4.x-HBase-0.98
Commit: 9cddc4631b207a76c9c4c5385c189ae0c173fcb0
Parents: 34662a3
Author: Andrew Purtell 
Authored: Wed Jun 29 12:22:04 2016 -0700
Committer: Andrew Purtell 
Committed: Thu Jun 30 14:24:34 2016 -0700

--
 .../coprocessor/DelegateRegionObserver.java | 248 ++-
 .../UngroupedAggregateRegionObserver.java   |  50 ++--
 .../org/apache/phoenix/hbase/index/Indexer.java |  20 +-
 3 files changed, 231 insertions(+), 87 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9cddc463/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java
index 8a5f9b4..6522b30 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java
@@ -18,6 +18,7 @@
 package org.apache.phoenix.coprocessor;
 
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.util.List;
 import java.util.NavigableSet;
 
@@ -58,6 +59,7 @@ import org.apache.hadoop.hbase.regionserver.StoreFile.Reader;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Pair;
 
 import com.google.common.collect.ImmutableList;
@@ -123,115 +125,235 @@ public class DelegateRegionObserver implements 
RegionObserver {
 delegate.postFlush(c, store, resultFile);
 }
 
-@Override
-public void 
preCompactSelection(ObserverContext c, Store 
store,
-List candidates, CompactionRequest request) throws 
IOException {
-delegate.preCompactSelection(c, store, candidates, request);
-}
-
-@Override
-public void 
preCompactSelection(ObserverContext c, Store 
store,
-List candidates) throws IOException {
-delegate.preCompactSelection(c, store, candidates);
-}
-
-@Override
-public void 
postCompactSelection(ObserverContext c, Store 
store,
-ImmutableList selected, CompactionRequest request) {
-delegate.postCompactSelection(c, store, selected, request);
-}
-
-@Override
-public void 
postCompactSelection(ObserverContext c, Store 
store,
-ImmutableList selected) {
-delegate.postCompactSelection(c, store, selected);
-}
-
-@Override
-public InternalScanner 
preCompact(ObserverContext c, Store store,
-InternalScanner scanner, ScanType scanType, CompactionRequest 
request)
+// Compaction and split upcalls run with the effective user context of the 
requesting user.
+// This will lead to failure of cross cluster RPC if the effective user is 
not
+// the login user. Switch to the login user context to ensure we have the 
expected
+// security context.
+
+@Override
+public void preCompactSelection(final 
ObserverContext c, final Store store,
+final List candidates, final CompactionRequest request) 
throws IOException {
+User.runAsLoginUser(new PrivilegedExceptionAction() {
+@Override
+public Void run() throws Exception {
+delegate.preCompactSelection(c, store, candidates, request);
+return null;
+}
+});
+}
+
+@Override
+public void preCompactSelection(final 
ObserverContext c, final Store store,
+final List candidates) throws IOException {
+User.runAsLoginUser(new PrivilegedExceptionAction() {
+@Override
+public Void run() throws Exception {
+delegate.preCompactSelection(c, store, candidates);
+return null;
+}
+});
+}
+
+@Override
+public void postCompactSelection(final 
ObserverContext c, final Store store,
+final ImmutableList selected, final CompactionRequest 
request) {
+try {
+User.runAsLoginUser(new PrivilegedExceptionAction() {
+@Override
+public Void run() throws Exception {
+  

[1/4] phoenix git commit: PHOENIX-3037 Setup proper security context in compaction/split coprocessor hooks

2016-06-30 Thread apurtell
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 34662a314 -> 9cddc4631
  refs/heads/4.x-HBase-1.0 736cf83a4 -> 197f85f2b
  refs/heads/4.x-HBase-1.1 1d29ec0b6 -> 96234fa32
  refs/heads/master d1cde87fa -> cec2340d0


PHOENIX-3037 Setup proper security context in compaction/split coprocessor hooks


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/cec2340d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/cec2340d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/cec2340d

Branch: refs/heads/master
Commit: cec2340d00cdc7bcbc1945b3952f8e6948febc61
Parents: d1cde87
Author: Andrew Purtell 
Authored: Wed Jun 29 12:22:04 2016 -0700
Committer: Andrew Purtell 
Committed: Thu Jun 30 14:24:11 2016 -0700

--
 .../coprocessor/DelegateRegionObserver.java | 248 ++-
 .../UngroupedAggregateRegionObserver.java   |  47 ++--
 .../org/apache/phoenix/hbase/index/Indexer.java |  20 +-
 3 files changed, 230 insertions(+), 85 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/cec2340d/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java
index 1b321b8..59b2271 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java
@@ -18,6 +18,7 @@
 package org.apache.phoenix.coprocessor;
 
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.util.List;
 import java.util.NavigableSet;
 
@@ -57,6 +58,7 @@ import org.apache.hadoop.hbase.regionserver.StoreFile.Reader;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.wal.WALKey;
 
@@ -123,115 +125,235 @@ public class DelegateRegionObserver implements 
RegionObserver {
 delegate.postFlush(c, store, resultFile);
 }
 
-@Override
-public void 
preCompactSelection(ObserverContext c, Store 
store,
-List candidates, CompactionRequest request) throws 
IOException {
-delegate.preCompactSelection(c, store, candidates, request);
-}
-
-@Override
-public void 
preCompactSelection(ObserverContext c, Store 
store,
-List candidates) throws IOException {
-delegate.preCompactSelection(c, store, candidates);
-}
-
-@Override
-public void 
postCompactSelection(ObserverContext c, Store 
store,
-ImmutableList selected, CompactionRequest request) {
-delegate.postCompactSelection(c, store, selected, request);
-}
-
-@Override
-public void 
postCompactSelection(ObserverContext c, Store 
store,
-ImmutableList selected) {
-delegate.postCompactSelection(c, store, selected);
-}
-
-@Override
-public InternalScanner 
preCompact(ObserverContext c, Store store,
-InternalScanner scanner, ScanType scanType, CompactionRequest 
request)
+// Compaction and split upcalls run with the effective user context of the 
requesting user.
+// This will lead to failure of cross cluster RPC if the effective user is 
not
+// the login user. Switch to the login user context to ensure we have the 
expected
+// security context.
+
+@Override
+public void preCompactSelection(final 
ObserverContext c, final Store store,
+final List candidates, final CompactionRequest request) 
throws IOException {
+User.runAsLoginUser(new PrivilegedExceptionAction() {
+@Override
+public Void run() throws Exception {
+delegate.preCompactSelection(c, store, candidates, request);
+return null;
+}
+});
+}
+
+@Override
+public void preCompactSelection(final 
ObserverContext c, final Store store,
+final List candidates) throws IOException {
+User.runAsLoginUser(new PrivilegedExceptionAction() {
+@Override
+public Void run() throws Exception {
+delegate.preCompactSelection(c, store, candidates);
+return null;
+}
+});
+}
+
+@Override
+public void postCompactSelection(final 
ObserverContext c, final Store store,
+final 

[4/4] phoenix git commit: PHOENIX-3037 Setup proper security context in compaction/split coprocessor hooks

2016-06-30 Thread apurtell
PHOENIX-3037 Setup proper security context in compaction/split coprocessor hooks


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/197f85f2
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/197f85f2
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/197f85f2

Branch: refs/heads/4.x-HBase-1.0
Commit: 197f85f2b357d18a7cfb72d80dc54c05cc177a92
Parents: 736cf83
Author: Andrew Purtell 
Authored: Wed Jun 29 12:22:04 2016 -0700
Committer: Andrew Purtell 
Committed: Thu Jun 30 14:38:00 2016 -0700

--
 .../coprocessor/DelegateRegionObserver.java | 248 ++-
 .../UngroupedAggregateRegionObserver.java   |  51 ++--
 .../org/apache/phoenix/hbase/index/Indexer.java |  20 +-
 3 files changed, 231 insertions(+), 88 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/197f85f2/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java
index 82284ec..a8f2d88 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java
@@ -18,6 +18,7 @@
 package org.apache.phoenix.coprocessor;
 
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.util.List;
 import java.util.NavigableSet;
 
@@ -57,6 +58,7 @@ import org.apache.hadoop.hbase.regionserver.StoreFile.Reader;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.wal.WALKey;
 
@@ -113,115 +115,235 @@ public class DelegateRegionObserver implements 
RegionObserver {
 delegate.postFlush(c, store, resultFile);
 }
 
-@Override
-public void 
preCompactSelection(ObserverContext c, Store 
store,
-List candidates, CompactionRequest request) throws 
IOException {
-delegate.preCompactSelection(c, store, candidates, request);
-}
-
-@Override
-public void 
preCompactSelection(ObserverContext c, Store 
store,
-List candidates) throws IOException {
-delegate.preCompactSelection(c, store, candidates);
-}
-
-@Override
-public void 
postCompactSelection(ObserverContext c, Store 
store,
-ImmutableList selected, CompactionRequest request) {
-delegate.postCompactSelection(c, store, selected, request);
-}
-
-@Override
-public void 
postCompactSelection(ObserverContext c, Store 
store,
-ImmutableList selected) {
-delegate.postCompactSelection(c, store, selected);
-}
-
-@Override
-public InternalScanner 
preCompact(ObserverContext c, Store store,
-InternalScanner scanner, ScanType scanType, CompactionRequest 
request)
+// Compaction and split upcalls run with the effective user context of the 
requesting user.
+// This will lead to failure of cross cluster RPC if the effective user is 
not
+// the login user. Switch to the login user context to ensure we have the 
expected
+// security context.
+
+@Override
+public void preCompactSelection(final 
ObserverContext c, final Store store,
+final List candidates, final CompactionRequest request) 
throws IOException {
+User.runAsLoginUser(new PrivilegedExceptionAction() {
+@Override
+public Void run() throws Exception {
+delegate.preCompactSelection(c, store, candidates, request);
+return null;
+}
+});
+}
+
+@Override
+public void preCompactSelection(final 
ObserverContext c, final Store store,
+final List candidates) throws IOException {
+User.runAsLoginUser(new PrivilegedExceptionAction() {
+@Override
+public Void run() throws Exception {
+delegate.preCompactSelection(c, store, candidates);
+return null;
+}
+});
+}
+
+@Override
+public void postCompactSelection(final 
ObserverContext c, final Store store,
+final ImmutableList selected, final CompactionRequest 
request) {
+try {
+User.runAsLoginUser(new PrivilegedExceptionAction() {
+@Override
+public Void run() throws Exception {
+

[2/4] phoenix git commit: PHOENIX-3037 Setup proper security context in compaction/split coprocessor hooks

2016-06-30 Thread apurtell
PHOENIX-3037 Setup proper security context in compaction/split coprocessor hooks


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/96234fa3
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/96234fa3
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/96234fa3

Branch: refs/heads/4.x-HBase-1.1
Commit: 96234fa32124a5dc731709e130b81042463ef11d
Parents: 1d29ec0
Author: Andrew Purtell 
Authored: Wed Jun 29 12:22:04 2016 -0700
Committer: Andrew Purtell 
Committed: Thu Jun 30 14:24:19 2016 -0700

--
 .../coprocessor/DelegateRegionObserver.java | 248 ++-
 .../UngroupedAggregateRegionObserver.java   |  47 ++--
 .../org/apache/phoenix/hbase/index/Indexer.java |  20 +-
 3 files changed, 230 insertions(+), 85 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/96234fa3/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java
index 1b321b8..59b2271 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java
@@ -18,6 +18,7 @@
 package org.apache.phoenix.coprocessor;
 
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.util.List;
 import java.util.NavigableSet;
 
@@ -57,6 +58,7 @@ import org.apache.hadoop.hbase.regionserver.StoreFile.Reader;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.wal.WALKey;
 
@@ -123,115 +125,235 @@ public class DelegateRegionObserver implements 
RegionObserver {
 delegate.postFlush(c, store, resultFile);
 }
 
-@Override
-public void 
preCompactSelection(ObserverContext c, Store 
store,
-List candidates, CompactionRequest request) throws 
IOException {
-delegate.preCompactSelection(c, store, candidates, request);
-}
-
-@Override
-public void 
preCompactSelection(ObserverContext c, Store 
store,
-List candidates) throws IOException {
-delegate.preCompactSelection(c, store, candidates);
-}
-
-@Override
-public void 
postCompactSelection(ObserverContext c, Store 
store,
-ImmutableList selected, CompactionRequest request) {
-delegate.postCompactSelection(c, store, selected, request);
-}
-
-@Override
-public void 
postCompactSelection(ObserverContext c, Store 
store,
-ImmutableList selected) {
-delegate.postCompactSelection(c, store, selected);
-}
-
-@Override
-public InternalScanner 
preCompact(ObserverContext c, Store store,
-InternalScanner scanner, ScanType scanType, CompactionRequest 
request)
+// Compaction and split upcalls run with the effective user context of the 
requesting user.
+// This will lead to failure of cross cluster RPC if the effective user is 
not
+// the login user. Switch to the login user context to ensure we have the 
expected
+// security context.
+
+@Override
+public void preCompactSelection(final 
ObserverContext c, final Store store,
+final List candidates, final CompactionRequest request) 
throws IOException {
+User.runAsLoginUser(new PrivilegedExceptionAction() {
+@Override
+public Void run() throws Exception {
+delegate.preCompactSelection(c, store, candidates, request);
+return null;
+}
+});
+}
+
+@Override
+public void preCompactSelection(final 
ObserverContext c, final Store store,
+final List candidates) throws IOException {
+User.runAsLoginUser(new PrivilegedExceptionAction() {
+@Override
+public Void run() throws Exception {
+delegate.preCompactSelection(c, store, candidates);
+return null;
+}
+});
+}
+
+@Override
+public void postCompactSelection(final 
ObserverContext c, final Store store,
+final ImmutableList selected, final CompactionRequest 
request) {
+try {
+User.runAsLoginUser(new PrivilegedExceptionAction() {
+@Override
+public Void run() throws Exception {
+

[35/37] phoenix git commit: PHOENIX-2095 Lower the default for phoenix.sequence.saltBuckets

2015-07-08 Thread apurtell
PHOENIX-2095 Lower the default for phoenix.sequence.saltBuckets


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/efb941ae
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/efb941ae
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/efb941ae

Branch: refs/heads/master
Commit: efb941aef6062fe704460fd37f9cc062c2ed2eee
Parents: 973bccb
Author: Andrew Purtell apurt...@apache.org
Authored: Wed Jul 8 09:34:20 2015 -0700
Committer: Andrew Purtell apurt...@apache.org
Committed: Wed Jul 8 09:34:20 2015 -0700

--
 .../main/java/org/apache/phoenix/query/QueryServicesOptions.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/efb941ae/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 3efd79f..ea81cf5 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -174,7 +174,7 @@ public class QueryServicesOptions {
 /**
  * Use only first time SYSTEM.SEQUENCE table is created.
  */
-public static final int DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS = 
SaltingUtil.MAX_BUCKET_NUM;
+public static final int DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS = 0;
 /**
  * Default value for coprocessor priority is between SYSTEM and USER 
priority.
  */



[07/37] phoenix git commit: PHOENIX-2005 Connection utilities omit zk client port, parent znode

2015-07-08 Thread apurtell
PHOENIX-2005 Connection utilities omit zk client port, parent znode


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c6b37b97
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c6b37b97
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c6b37b97

Branch: refs/heads/4.x-HBase-1.1
Commit: c6b37b979da1b514bcb9257c7e095e39b0c2c215
Parents: 3cdc323
Author: Nick Dimiduk ndimi...@apache.org
Authored: Tue May 26 11:11:48 2015 -0700
Committer: Nick Dimiduk ndimi...@apache.org
Committed: Tue May 26 13:27:03 2015 -0700

--
 .../phoenix/jdbc/PhoenixEmbeddedDriver.java | 28 --
 .../phoenix/mapreduce/CsvBulkLoadTool.java  | 93 ++--
 .../phoenix/mapreduce/CsvToKeyValueMapper.java  | 26 +-
 .../query/ConnectionQueryServicesImpl.java  |  4 +-
 .../java/org/apache/phoenix/util/QueryUtil.java | 45 --
 .../phoenix/jdbc/PhoenixEmbeddedDriverTest.java | 14 ++-
 .../phoenix/mapreduce/CsvBulkLoadToolTest.java  | 11 ---
 .../mapreduce/CsvToKeyValueMapperTest.java  | 15 
 .../org/apache/phoenix/util/QueryUtilTest.java  | 33 ---
 9 files changed, 139 insertions(+), 130 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c6b37b97/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
index 9e95667..2451603 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
@@ -31,6 +31,7 @@ import java.util.logging.Logger;
 
 import javax.annotation.concurrent.Immutable;
 
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
@@ -174,10 +175,10 @@ public abstract class PhoenixEmbeddedDriver implements 
Driver, org.apache.phoeni
 }
 
 /**
- * 
+ *
  * Class to encapsulate connection info for HBase
  *
- * 
+ *
  * @since 0.1.1
  */
 public static class ConnectionInfo {
@@ -204,12 +205,18 @@ public abstract class PhoenixEmbeddedDriver implements 
Driver, org.apache.phoeni
 return false;
 }
 
-protected static ConnectionInfo create(String url) throws SQLException 
{
-StringTokenizer tokenizer = new StringTokenizer(url == null ?  : 
url.substring(PhoenixRuntime.JDBC_PROTOCOL.length()),DELIMITERS, true);
+public static ConnectionInfo create(String url) throws SQLException {
+url = url == null ?  : url;
+url = url.startsWith(PhoenixRuntime.JDBC_PROTOCOL)
+? url.substring(PhoenixRuntime.JDBC_PROTOCOL.length())
+: url;
+StringTokenizer tokenizer = new StringTokenizer(url, DELIMITERS, 
true);
 int nTokens = 0;
 String[] tokens = new String[5];
 String token = null;
-while (tokenizer.hasMoreTokens()  
!(token=tokenizer.nextToken()).equals(TERMINATOR)  tokenizer.hasMoreTokens() 
 nTokens  tokens.length) {
+while (tokenizer.hasMoreTokens() 
+!(token=tokenizer.nextToken()).equals(TERMINATOR) 
+tokenizer.hasMoreTokens()  nTokens  tokens.length) {
 token = tokenizer.nextToken();
 // This would mean we have an empty string for a token which 
is illegal
 if (DELIMITERS.contains(token)) {
@@ -316,8 +323,7 @@ public abstract class PhoenixEmbeddedDriver implements 
Driver, org.apache.phoeni
 private final String principal;
 private final String keytab;
 
-// used for testing
-ConnectionInfo(String zookeeperQuorum, Integer port, String rootNode, 
String principal, String keytab) {
+public ConnectionInfo(String zookeeperQuorum, Integer port, String 
rootNode, String principal, String keytab) {
 this.zookeeperQuorum = zookeeperQuorum;
 this.port = port;
 this.rootNode = rootNode;
@@ -326,8 +332,7 @@ public abstract class PhoenixEmbeddedDriver implements 
Driver, org.apache.phoeni
 this.keytab = keytab;
 }
 
-// used for testing
-ConnectionInfo(String zookeeperQuorum, Integer port, String rootNode) {
+public ConnectionInfo(String zookeeperQuorum, Integer port, String 
rootNode) {
this(zookeeperQuorum, port, rootNode, null, null);
 }
 
@@ -417,6 +422,11 @@ 

[13/37] phoenix git commit: PHOENIX-2010 Properly validate number of arguments passed to the functions in FunctionParseNode#validate(Rajeshbabu)

2015-07-08 Thread apurtell
PHOENIX-2010 Properly validate number of arguments passed to the functions in 
FunctionParseNode#validate(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b2c0cb90
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b2c0cb90
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b2c0cb90

Branch: refs/heads/4.x-HBase-1.1
Commit: b2c0cb9002ee881f21d968817c386a98d39074ca
Parents: a600cc4
Author: Rajeshbabu Chintaguntla rajeshb...@apache.org
Authored: Sun May 31 07:40:39 2015 +0530
Committer: Rajeshbabu Chintaguntla rajeshb...@apache.org
Committed: Sun May 31 07:40:39 2015 +0530

--
 .../phoenix/end2end/UserDefinedFunctionsIT.java   | 14 ++
 .../org/apache/phoenix/parse/FunctionParseNode.java   |  4 
 .../main/java/org/apache/phoenix/parse/PFunction.java |  4 +---
 3 files changed, 19 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b2c0cb90/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
index 7dbde3c..868e19d 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
@@ -442,6 +442,20 @@ public class UserDefinedFunctionsIT extends 
BaseOwnClusterIT{
 rs = stmt.executeQuery(select k from t9 where mysum9(k)=11);
 assertTrue(rs.next());
 assertEquals(1, rs.getInt(1));
+try {
+rs = stmt.executeQuery(select k from t9 where 
mysum9(k,10,'x')=11);
+fail(FunctionNotFoundException should be thrown);
+} catch(FunctionNotFoundException e) {
+} catch(Exception e) {
+fail(FunctionNotFoundException should be thrown);
+}
+try {
+rs = stmt.executeQuery(select mysum9() from t9);
+fail(FunctionNotFoundException should be thrown);
+} catch(FunctionNotFoundException e) {
+} catch(Exception e) {
+fail(FunctionNotFoundException should be thrown);
+}
 stmt.execute(drop function mysum9);
 try {
 rs = stmt.executeQuery(select k from t9 where mysum9(k)=11);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b2c0cb90/phoenix-core/src/main/java/org/apache/phoenix/parse/FunctionParseNode.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/parse/FunctionParseNode.java 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/FunctionParseNode.java
index d1001ee..be52d89 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/FunctionParseNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/FunctionParseNode.java
@@ -41,6 +41,7 @@ import 
org.apache.phoenix.expression.function.FunctionExpression;
 import org.apache.phoenix.expression.function.UDFExpression;
 import org.apache.phoenix.parse.PFunction.FunctionArgument;
 import org.apache.phoenix.schema.ArgumentTypeMismatchException;
+import org.apache.phoenix.schema.FunctionNotFoundException;
 import org.apache.phoenix.schema.ValueRangeExcpetion;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PDataTypeFactory;
@@ -133,6 +134,9 @@ public class FunctionParseNode extends CompoundParseNode {
 public ListExpression validate(ListExpression children, 
StatementContext context) throws SQLException {
 BuiltInFunctionInfo info = this.getInfo();
 BuiltInFunctionArgInfo[] args = info.getArgs();
+if (args.length  children.size() || info.getRequiredArgCount()  
children.size()) {
+throw new FunctionNotFoundException(this.name);
+}
 if (args.length  children.size()) {
 ListExpression moreChildren = new 
ArrayListExpression(children);
 for (int i = children.size(); i  info.getArgs().length; i++) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b2c0cb90/phoenix-core/src/main/java/org/apache/phoenix/parse/PFunction.java
--
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/PFunction.java 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/PFunction.java
index 351bec7..aeed3ac 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/PFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/PFunction.java
@@ -96,9 +96,7 @@ public class PFunction implements PMetaDataEntity {
 }
 
 public 

[30/37] phoenix git commit: PHOENIX-1935 org.apache.phoenix.end2end.ArithmeticQueryIT tests are failing (Alicia Ying Shu)

2015-07-08 Thread apurtell
PHOENIX-1935 org.apache.phoenix.end2end.ArithmeticQueryIT tests are failing 
(Alicia Ying Shu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/05b1b8b1
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/05b1b8b1
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/05b1b8b1

Branch: refs/heads/4.x-HBase-1.1
Commit: 05b1b8b13f4137602567f67642946c883646d4d8
Parents: 329d749
Author: Nick Dimiduk ndimi...@apache.org
Authored: Wed Jun 17 12:28:35 2015 -0700
Committer: Nick Dimiduk ndimi...@apache.org
Committed: Wed Jun 17 12:31:28 2015 -0700

--
 .../src/it/java/org/apache/phoenix/end2end/BaseViewIT.java  | 2 ++
 phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java | 3 +++
 .../src/test/java/org/apache/phoenix/query/BaseTest.java| 5 -
 3 files changed, 9 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/05b1b8b1/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
index b9d7180..3140077 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
@@ -98,6 +98,7 @@ public abstract class BaseViewIT extends 
BaseOwnClusterHBaseManagedTimeIT {
 assertEquals(1, rs.getInt(1));
 assertEquals(121, rs.getInt(2));
 assertFalse(rs.next());
+conn.close();
 }
 
 protected void testUpdatableViewIndex(Integer saltBuckets) throws 
Exception {
@@ -179,6 +180,7 @@ public abstract class BaseViewIT extends 
BaseOwnClusterHBaseManagedTimeIT {
 + CLIENT MERGE SORT,
 QueryUtil.getExplainPlan(rs));
 }
+conn.close();
 }
 
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/05b1b8b1/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
--
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
index 266438d..fb58a8f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
@@ -92,8 +92,11 @@ public class ViewIT extends BaseViewIT {
 fail();
 } catch (ReadOnlyTableException e) {
 
+} finally {
+conn.close();
 }
 
+conn = DriverManager.getConnection(getUrl());
 int count = 0;
 ResultSet rs = conn.createStatement().executeQuery(SELECT k FROM v2);
 while (rs.next()) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/05b1b8b1/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
--
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index fa78656..3f09518 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -115,6 +115,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.IntegrationTestingUtility;
+import org.apache.hadoop.hbase.TableNotEnabledException;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.coprocessor.RegionServerObserver;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
@@ -1634,7 +1635,9 @@ public abstract class BaseTest {
 for (HTableDescriptor table : tables) {
 String schemaName = 
SchemaUtil.getSchemaNameFromFullName(table.getName());
 if (!QueryConstants.SYSTEM_SCHEMA_NAME.equals(schemaName)) {
-admin.disableTable(table.getName());
+try{
+admin.disableTable(table.getName());
+} catch (TableNotEnabledException ignored){}
 admin.deleteTable(table.getName());
 }
 }



[26/37] phoenix git commit: PHOENIX-2032 psql.py is broken after PHOENIX-2013

2015-07-08 Thread apurtell
PHOENIX-2032 psql.py is broken after PHOENIX-2013


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d0bcb7b2
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d0bcb7b2
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d0bcb7b2

Branch: refs/heads/4.x-HBase-1.1
Commit: d0bcb7b2304133031b945d50e01f0f1d5fd023d4
Parents: e64f61b
Author: Nick Dimiduk ndimi...@apache.org
Authored: Fri Jun 12 10:23:05 2015 -0700
Committer: Nick Dimiduk ndimi...@apache.org
Committed: Fri Jun 12 17:01:27 2015 -0700

--
 phoenix-assembly/pom.xml  |  4 
 phoenix-assembly/src/build/client.xml | 27 +++
 2 files changed, 23 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d0bcb7b2/phoenix-assembly/pom.xml
--
diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index d275d03..ebc5d71 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -152,10 +152,6 @@
 /dependency
 dependency
   groupIdorg.apache.phoenix/groupId
-  artifactIdphoenix-spark/artifactId
-/dependency
-dependency
-  groupIdorg.apache.phoenix/groupId
   artifactIdphoenix-server/artifactId
 /dependency
 dependency

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d0bcb7b2/phoenix-assembly/src/build/client.xml
--
diff --git a/phoenix-assembly/src/build/client.xml 
b/phoenix-assembly/src/build/client.xml
index 101ccd6..e99bb19 100644
--- a/phoenix-assembly/src/build/client.xml
+++ b/phoenix-assembly/src/build/client.xml
@@ -53,13 +53,32 @@
 /dependencySet
 
 !-- Make sure we get all the components, not just the minimal client ones 
(e.g.
-  phoenix-flume, phoenix-pig, etc) --
+  phoenix-flume, phoenix-pig, etc). We should exclude phoenix-server and
+  phoenix-server-client in the future, see PHOENIX-2032, PHOENIX-2038 --
 dependencySet
   outputDirectory//outputDirectory
   unpacktrue/unpack
-  includes
-includeorg.apache.phoenix:phoenix-*/include
-  /includes
+  !-- multiple deps provide some variant of LICENSE files/directories. 
These
+   overwrite each other at best, at worst conflict on case-insensitive
+   filesystems like HDFS+ and FAT32. Just exclude them --
+  unpackOptions
+excludes
+  exclude*license*/exclude
+  exclude*LICENSE*/exclude
+  exclude**/license/**/exclude
+  exclude**/LICENSE/**/exclude
+/excludes
+  /unpackOptions
+  !-- this is default, but make intentions clear --
+  useTransitiveDependenciestrue/useTransitiveDependencies
+  !-- When include subelements are present, they define a set of
+   artifact coordinates to include. If none is present, then includes
+   represents all valid values
+   
https://maven.apache.org/plugins/maven-assembly-plugin/assembly.html#class_dependencySet
+   This means bring in all dependencies transitively of the
+   phoenix-assembly module.
+  --
+  includes /
 /dependencySet
   /dependencySets
 /assembly



[06/37] phoenix git commit: Changing version to 4.5.0-HBase-1.1-SNAPSHOT

2015-07-08 Thread apurtell
Changing version to 4.5.0-HBase-1.1-SNAPSHOT


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3cdc3230
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3cdc3230
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3cdc3230

Branch: refs/heads/4.x-HBase-1.1
Commit: 3cdc3230c570ee8c22bb6c1bab975699fd02e94c
Parents: 56e1c0a
Author: Rajeshbabu Chintaguntla rajeshb...@apache.org
Authored: Mon May 25 17:46:18 2015 +0530
Committer: Rajeshbabu Chintaguntla rajeshb...@apache.org
Committed: Mon May 25 17:46:18 2015 +0530

--
 phoenix-assembly/pom.xml  | 2 +-
 phoenix-core/pom.xml  | 2 +-
 phoenix-flume/pom.xml | 2 +-
 phoenix-pherf/pom.xml | 2 +-
 phoenix-pig/pom.xml   | 2 +-
 phoenix-server-client/pom.xml | 2 +-
 phoenix-server/pom.xml| 2 +-
 phoenix-spark/pom.xml | 2 +-
 pom.xml   | 2 +-
 9 files changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/3cdc3230/phoenix-assembly/pom.xml
--
diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index 8d9a965..04d9335 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -26,7 +26,7 @@
   parent
 groupIdorg.apache.phoenix/groupId
 artifactIdphoenix/artifactId
-version4.4.0-SNAPSHOT/version
+version4.5.0-HBase-1.1-SNAPSHOT/version
   /parent
   artifactIdphoenix-assembly/artifactId
   namePhoenix Assembly/name

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3cdc3230/phoenix-core/pom.xml
--
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 22e6b60..951e969 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -4,7 +4,7 @@
   parent
 groupIdorg.apache.phoenix/groupId
 artifactIdphoenix/artifactId
-version4.4.0-SNAPSHOT/version
+version4.5.0-HBase-1.1-SNAPSHOT/version
   /parent
   artifactIdphoenix-core/artifactId
   namePhoenix Core/name

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3cdc3230/phoenix-flume/pom.xml
--
diff --git a/phoenix-flume/pom.xml b/phoenix-flume/pom.xml
index b2b9a47..ea87ab0 100644
--- a/phoenix-flume/pom.xml
+++ b/phoenix-flume/pom.xml
@@ -26,7 +26,7 @@
   parent
 groupIdorg.apache.phoenix/groupId
 artifactIdphoenix/artifactId
-version4.4.0-SNAPSHOT/version
+version4.5.0-HBase-1.1-SNAPSHOT/version
   /parent
   artifactIdphoenix-flume/artifactId
   namePhoenix - Flume/name

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3cdc3230/phoenix-pherf/pom.xml
--
diff --git a/phoenix-pherf/pom.xml b/phoenix-pherf/pom.xml
index 0901f71..e751d73 100644
--- a/phoenix-pherf/pom.xml
+++ b/phoenix-pherf/pom.xml
@@ -22,7 +22,7 @@
 parent
 groupIdorg.apache.phoenix/groupId
 artifactIdphoenix/artifactId
-version4.4.0-SNAPSHOT/version
+version4.5.0-HBase-1.1-SNAPSHOT/version
 /parent
 
 artifactIdphoenix-pherf/artifactId

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3cdc3230/phoenix-pig/pom.xml
--
diff --git a/phoenix-pig/pom.xml b/phoenix-pig/pom.xml
index 015a660..957c06f 100644
--- a/phoenix-pig/pom.xml
+++ b/phoenix-pig/pom.xml
@@ -26,7 +26,7 @@
   parent
 groupIdorg.apache.phoenix/groupId
 artifactIdphoenix/artifactId
-version4.4.0-SNAPSHOT/version
+version4.5.0-HBase-1.1-SNAPSHOT/version
   /parent
   artifactIdphoenix-pig/artifactId
   namePhoenix - Pig/name

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3cdc3230/phoenix-server-client/pom.xml
--
diff --git a/phoenix-server-client/pom.xml b/phoenix-server-client/pom.xml
index 4d6fd45..748e57c 100644
--- a/phoenix-server-client/pom.xml
+++ b/phoenix-server-client/pom.xml
@@ -4,7 +4,7 @@
   parent
 groupIdorg.apache.phoenix/groupId
 artifactIdphoenix/artifactId
-version4.4.0-SNAPSHOT/version
+version4.5.0-HBase-1.1-SNAPSHOT/version
   /parent
   artifactIdphoenix-server-client/artifactId
   namePhoenix Query Server Client/name

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3cdc3230/phoenix-server/pom.xml
--
diff --git a/phoenix-server/pom.xml b/phoenix-server/pom.xml
index 9f6289f..ab9a472 100644
--- a/phoenix-server/pom.xml
+++ b/phoenix-server/pom.xml
@@ -4,7 +4,7 @@
   parent
 groupIdorg.apache.phoenix/groupId
 artifactIdphoenix/artifactId
-version4.4.0-SNAPSHOT/version
+

[16/37] phoenix git commit: PHOENIX-2016 Some Phoenix tests failed with NPE(Alicia Ying Shu)

2015-07-08 Thread apurtell
PHOENIX-2016 Some Phoenix tests failed with NPE(Alicia Ying Shu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/dc46b144
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/dc46b144
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/dc46b144

Branch: refs/heads/4.x-HBase-1.1
Commit: dc46b144aa9eaf315c3969669dab7f0a50d94281
Parents: eb9452d
Author: Rajeshbabu Chintaguntla rajeshb...@apache.org
Authored: Mon Jun 1 21:34:16 2015 +0530
Committer: Rajeshbabu Chintaguntla rajeshb...@apache.org
Committed: Mon Jun 1 21:34:16 2015 +0530

--
 phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/dc46b144/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
--
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index b0574c3..fa78656 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -1627,6 +1627,7 @@ public abstract class BaseTest {
  * Disable and drop all the tables except SYSTEM.CATALOG and 
SYSTEM.SEQUENCE
  */
 private static void disableAndDropNonSystemTables() throws Exception {
+if (driver == null) return;
 HBaseAdmin admin = driver.getConnectionQueryServices(null, 
null).getAdmin();
 try {
 HTableDescriptor[] tables = admin.listTables();



[14/37] phoenix git commit: PHOENIX-2022 Make BaseRegionScanner.next abstract

2015-07-08 Thread apurtell
PHOENIX-2022 Make BaseRegionScanner.next abstract

Avoid infinite recursion by removing a recursive call within
BaseRegionScanner.next, which was already being used as an
abstract method.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c1882ee2
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c1882ee2
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c1882ee2

Branch: refs/heads/4.x-HBase-1.1
Commit: c1882ee279293b560fda9beb10ac50b8d3ead589
Parents: b2c0cb9
Author: Gabriel Reid gabri...@ngdata.com
Authored: Mon Jun 1 08:57:22 2015 +0200
Committer: Gabriel Reid gabri...@ngdata.com
Committed: Mon Jun 1 17:22:49 2015 +0200

--
 .../java/org/apache/phoenix/coprocessor/BaseRegionScanner.java   | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c1882ee2/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseRegionScanner.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseRegionScanner.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseRegionScanner.java
index 828f776..3f73048 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseRegionScanner.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseRegionScanner.java
@@ -33,9 +33,7 @@ public abstract class BaseRegionScanner implements 
RegionScanner {
 }
 
 @Override
-public boolean next(ListCell results) throws IOException {
-return next(results);
-}
+public abstract boolean next(ListCell results) throws IOException;
 
 @Override
 public boolean next(ListCell result, ScannerContext scannerContext) 
throws IOException {



[18/37] phoenix git commit: PHOENIX-1962 Apply check style to the build

2015-07-08 Thread apurtell
PHOENIX-1962 Apply check style to the build


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/29ea5035
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/29ea5035
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/29ea5035

Branch: refs/heads/4.x-HBase-1.1
Commit: 29ea503546265a619ce501c477a109b69f940a00
Parents: f2be913
Author: Nick Dimiduk ndimi...@apache.org
Authored: Sat May 9 11:10:54 2015 -0700
Committer: Nick Dimiduk ndimi...@apache.org
Committed: Mon Jun 1 12:21:48 2015 -0700

--
 phoenix-assembly/pom.xml|   4 +
 phoenix-core/pom.xml|   4 +
 phoenix-flume/pom.xml   |   4 +
 phoenix-pherf/pom.xml   |   1 +
 phoenix-pig/pom.xml |   4 +
 phoenix-server-client/pom.xml   |   4 +
 phoenix-server/pom.xml  |   4 +
 phoenix-spark/pom.xml   |   1 +
 pom.xml |  23 ++
 src/main/config/checkstyle/checker.xml  | 281 +++
 src/main/config/checkstyle/header.txt   |  16 ++
 src/main/config/checkstyle/suppressions.xml |  46 
 12 files changed, 392 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/29ea5035/phoenix-assembly/pom.xml
--
diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index 04d9335..d275d03 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -33,6 +33,10 @@
   descriptionAssemble Phoenix artifacts/description
   packagingpom/packaging
 
+  properties
+top.dir${project.basedir}/../top.dir
+  /properties
+
   build
 plugins
   plugin

http://git-wip-us.apache.org/repos/asf/phoenix/blob/29ea5035/phoenix-core/pom.xml
--
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 951e969..6302441 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -24,6 +24,10 @@
   urlhttp://www.apache.org/url
   /organization
 
+  properties
+top.dir${project.basedir}/../top.dir
+  /properties
+
   build
 resources
   resource

http://git-wip-us.apache.org/repos/asf/phoenix/blob/29ea5035/phoenix-flume/pom.xml
--
diff --git a/phoenix-flume/pom.xml b/phoenix-flume/pom.xml
index ea87ab0..c7f0650 100644
--- a/phoenix-flume/pom.xml
+++ b/phoenix-flume/pom.xml
@@ -31,6 +31,10 @@
   artifactIdphoenix-flume/artifactId
   namePhoenix - Flume/name
 
+  properties
+top.dir${project.basedir}/../top.dir
+  /properties
+
   dependencies
dependency
   groupIdorg.apache.phoenix/groupId

http://git-wip-us.apache.org/repos/asf/phoenix/blob/29ea5035/phoenix-pherf/pom.xml
--
diff --git a/phoenix-pherf/pom.xml b/phoenix-pherf/pom.xml
index e751d73..dd45075 100644
--- a/phoenix-pherf/pom.xml
+++ b/phoenix-pherf/pom.xml
@@ -30,6 +30,7 @@
 namePhoenix - Pherf/name
 
 properties
+  top.dir${project.basedir}/../top.dir
 /properties
 
 profiles

http://git-wip-us.apache.org/repos/asf/phoenix/blob/29ea5035/phoenix-pig/pom.xml
--
diff --git a/phoenix-pig/pom.xml b/phoenix-pig/pom.xml
index 957c06f..55b34d3 100644
--- a/phoenix-pig/pom.xml
+++ b/phoenix-pig/pom.xml
@@ -31,6 +31,10 @@
   artifactIdphoenix-pig/artifactId
   namePhoenix - Pig/name
 
+  properties
+top.dir${project.basedir}/../top.dir
+  /properties
+
   dependencies
 dependency
   groupIdorg.apache.phoenix/groupId

http://git-wip-us.apache.org/repos/asf/phoenix/blob/29ea5035/phoenix-server-client/pom.xml
--
diff --git a/phoenix-server-client/pom.xml b/phoenix-server-client/pom.xml
index 748e57c..3e54a07 100644
--- a/phoenix-server-client/pom.xml
+++ b/phoenix-server-client/pom.xml
@@ -24,6 +24,10 @@
 urlhttp://www.apache.org/url
   /organization
 
+  properties
+top.dir${project.basedir}/../top.dir
+  /properties
+
   build
 plugins
   plugin

http://git-wip-us.apache.org/repos/asf/phoenix/blob/29ea5035/phoenix-server/pom.xml
--
diff --git a/phoenix-server/pom.xml b/phoenix-server/pom.xml
index ab9a472..86b2525 100644
--- a/phoenix-server/pom.xml
+++ b/phoenix-server/pom.xml
@@ -24,6 +24,10 @@
 urlhttp://www.apache.org/url
   /organization
 
+  properties
+top.dir${project.basedir}/../top.dir
+  /properties
+
   build
 plugins
   plugin


[24/37] phoenix git commit: PHOENIX-1968: Should support saving arrays

2015-07-08 Thread apurtell
PHOENIX-1968: Should support saving arrays


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f7d73496
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f7d73496
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f7d73496

Branch: refs/heads/4.x-HBase-1.1
Commit: f7d734966f7172c3bc4a6f0ba31594ba74ee91a1
Parents: bfd860f
Author: ravimagham ravimag...@apache.org
Authored: Thu Jun 11 12:59:48 2015 -0700
Committer: ravimagham ravimag...@apache.org
Committed: Thu Jun 11 12:59:48 2015 -0700

--
 .../apache/phoenix/spark/PhoenixSparkIT.scala   | 21 
 .../phoenix/spark/PhoenixRecordWritable.scala   | 25 
 2 files changed, 41 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f7d73496/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
--
diff --git 
a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala 
b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
index 42e8676..5f256e6 100644
--- a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
+++ b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
@@ -415,4 +415,25 @@ class PhoenixSparkIT extends FunSuite with Matchers with 
BeforeAndAfterAll {
 
 results.toList shouldEqual checkResults
   }
+
+  test(Can save arrays back to phoenix) {
+val dataSet = List((2L, Array(String1, String2, String3)))
+
+sc
+  .parallelize(dataSet)
+  .saveToPhoenix(
+ARRAY_TEST_TABLE,
+Seq(ID,VCARRAY),
+zkUrl = Some(quorumAddress)
+  )
+
+// Load the results back
+val stmt = conn.createStatement()
+val rs = stmt.executeQuery(SELECT VCARRAY FROM ARRAY_TEST_TABLE WHERE ID 
= 2)
+rs.next()
+val sqlArray = rs.getArray(1).getArray().asInstanceOf[Array[String]]
+
+// Verify the arrays are equal
+sqlArray shouldEqual dataSet(0)._2
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f7d73496/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRecordWritable.scala
--
diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRecordWritable.scala
 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRecordWritable.scala
index 67e0bd2..3977657 100644
--- 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRecordWritable.scala
+++ 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRecordWritable.scala
@@ -16,11 +16,12 @@ package org.apache.phoenix.spark
 import java.sql.{PreparedStatement, ResultSet}
 import org.apache.hadoop.mapreduce.lib.db.DBWritable
 import org.apache.phoenix.mapreduce.util.ColumnInfoToStringEncoderDecoder
-import org.apache.phoenix.schema.types.{PDate, PhoenixArray}
+import org.apache.phoenix.schema.types.{PDataType, PDate, PhoenixArray}
 import org.joda.time.DateTime
 import scala.collection.{immutable, mutable}
 import scala.collection.JavaConversions._
 
+
 class PhoenixRecordWritable(var encodedColumns: String) extends DBWritable {
   val upsertValues = mutable.ArrayBuffer[Any]()
   val resultMap = mutable.Map[String, AnyRef]()
@@ -44,13 +45,27 @@ class PhoenixRecordWritable(var encodedColumns: String) 
extends DBWritable {
 upsertValues.zip(columns).zipWithIndex.foreach {
   case ((v, c), i) = {
 if (v != null) {
+
   // Both Java and Joda dates used to work in 4.2.3, but now they must 
be java.sql.Date
+  // Can override any other types here as needed
   val (finalObj, finalType) = v match {
-case dt: DateTime = (new java.sql.Date(dt.getMillis), 
PDate.INSTANCE.getSqlType)
-case d: java.util.Date = (new java.sql.Date(d.getTime), 
PDate.INSTANCE.getSqlType)
-case _ = (v, c.getSqlType)
+case dt: DateTime = (new java.sql.Date(dt.getMillis), 
PDate.INSTANCE)
+case d: java.util.Date = (new java.sql.Date(d.getTime), 
PDate.INSTANCE)
+case _ = (v, c.getPDataType)
+  }
+
+  // Save as array or object
+  finalObj match {
+case obj: Array[AnyRef] = {
+  // Create a java.sql.Array, need to lookup the base sql type name
+  val sqlArray = statement.getConnection.createArrayOf(
+PDataType.arrayBaseType(finalType).getSqlTypeName,
+obj
+  )
+  statement.setArray(i + 1, sqlArray)
+}
+case _ = statement.setObject(i + 1, finalObj)
   }
-  statement.setObject(i + 1, finalObj, finalType)
 } else {
   

[22/37] phoenix git commit: PHOENIX-1978 UDF ArgumentTypeMismatchException(Rajeshbabu)

2015-07-08 Thread apurtell
PHOENIX-1978 UDF ArgumentTypeMismatchException(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/18b9e727
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/18b9e727
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/18b9e727

Branch: refs/heads/4.x-HBase-1.1
Commit: 18b9e72756642e127b2e227ea46a4f70401e6187
Parents: 58ee706
Author: Rajeshbabu Chintaguntla rajeshb...@apache.org
Authored: Fri Jun 5 09:04:17 2015 +0530
Committer: Rajeshbabu Chintaguntla rajeshb...@apache.org
Committed: Fri Jun 5 09:04:17 2015 +0530

--
 .../phoenix/end2end/UserDefinedFunctionsIT.java | 58 ++--
 phoenix-core/src/main/antlr3/PhoenixSQL.g   | 17 +++---
 2 files changed, 61 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/18b9e727/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
index 868e19d..c6bd62f 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UserDefinedFunctionsIT.java
@@ -58,6 +58,8 @@ import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.FunctionAlreadyExistsException;
 import org.apache.phoenix.schema.FunctionNotFoundException;
 import org.apache.phoenix.schema.ValueRangeExcpetion;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PArrayDataType;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
@@ -121,11 +123,31 @@ public class UserDefinedFunctionsIT extends 
BaseOwnClusterIT{
 .append(
ptr.set(PInteger.INSTANCE.toBytes((Integer)sum));\n)
 .append(return true;\n)
 .append(}\n).toString();
-
+private static String ARRAY_INDEX_EVALUATE_METHOD =
+new StringBuffer()
+.append(public boolean evaluate(Tuple tuple, 
ImmutableBytesWritable ptr) {\n)
+.append(Expression indexExpr = 
children.get(1);\n)
+.append(if (!indexExpr.evaluate(tuple, ptr)) {\n)
+.append(   return false;\n)
+.append(} else if (ptr.getLength() == 0) {\n)
+.append(   return true;\n)
+.append(}\n)
+.append(// Use Codec to prevent Integer object 
allocation\n)
+.append(int index = 
PInteger.INSTANCE.getCodec().decodeInt(ptr, indexExpr.getSortOrder());\n)
+.append(if(index  0) {\n)
+.append(   throw new ParseException(\Index 
cannot be negative :\ + index);\n)
+.append(}\n)
+.append(Expression arrayExpr = 
children.get(0);\n)
+.append(return 
PArrayDataType.positionAtArrayElement(tuple, ptr, index, arrayExpr, 
getDataType(),getMaxLength());\n)
+.append(}\n).toString();
+
+
 private static String MY_REVERSE_CLASS_NAME = MyReverse;
 private static String MY_SUM_CLASS_NAME = MySum;
-private static String MY_REVERSE_PROGRAM = 
getProgram(MY_REVERSE_CLASS_NAME, STRING_REVERSE_EVALUATE_METHOD, PVarchar);
-private static String MY_SUM_PROGRAM = getProgram(MY_SUM_CLASS_NAME, 
SUM_COLUMN_VALUES_EVALUATE_METHOD, PInteger);
+private static String MY_ARRAY_INDEX_CLASS_NAME = MyArrayIndex;
+private static String MY_REVERSE_PROGRAM = 
getProgram(MY_REVERSE_CLASS_NAME, STRING_REVERSE_EVALUATE_METHOD, return 
PVarchar.INSTANCE;);
+private static String MY_SUM_PROGRAM = getProgram(MY_SUM_CLASS_NAME, 
SUM_COLUMN_VALUES_EVALUATE_METHOD, return PInteger.INSTANCE;);
+private static String MY_ARRAY_INDEX_PROGRAM = 
getProgram(MY_ARRAY_INDEX_CLASS_NAME, ARRAY_INDEX_EVALUATE_METHOD, return 
PDataType.fromTypeId(children.get(0).getDataType().getSqlType()- 
PDataType.ARRAY_TYPE_BASE););
 private static Properties EMPTY_PROPS = new Properties();
 
 
@@ -144,6 +166,8 @@ public class UserDefinedFunctionsIT extends 
BaseOwnClusterIT{
 .append(import org.apache.phoenix.schema.types.PInteger;\n)
 .append(import org.apache.phoenix.schema.types.PVarchar;\n)
 .append(import org.apache.phoenix.util.StringUtil;\n)
+.append(import 

[20/37] phoenix git commit: PHOENIX-777 - Support null value for fixed length ARRAY (Dumindu Buddhika)

2015-07-08 Thread apurtell
PHOENIX-777 - Support null value for fixed length ARRAY (Dumindu Buddhika)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6f890ade
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6f890ade
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6f890ade

Branch: refs/heads/4.x-HBase-1.1
Commit: 6f890ade0691d03469ff8fce81c2fa9edd6941af
Parents: 9c5f111
Author: ramkrishna ramkrishna.s.vasude...@gmail.com
Authored: Tue Jun 2 11:18:51 2015 +0530
Committer: ramkrishna ramkrishna.s.vasude...@gmail.com
Committed: Tue Jun 2 11:18:51 2015 +0530

--
 .../phoenix/end2end/ArraysWithNullsIT.java  | 300 +++
 .../phoenix/compile/ExpressionCompiler.java |   9 +-
 .../apache/phoenix/schema/types/PBinary.java|   2 +-
 .../org/apache/phoenix/schema/types/PChar.java  |   5 +-
 .../org/apache/phoenix/schema/types/PDate.java  |   6 +-
 .../apache/phoenix/schema/types/PDecimal.java   |   3 +
 .../apache/phoenix/schema/types/PTimestamp.java |  17 +-
 .../phoenix/schema/types/PhoenixArray.java  |  51 ++--
 8 files changed, 358 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/6f890ade/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArraysWithNullsIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArraysWithNullsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArraysWithNullsIT.java
new file mode 100644
index 000..b034193
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArraysWithNullsIT.java
@@ -0,0 +1,300 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.phoenix.end2end;
+
+import static org.junit.Assert.assertEquals;
+
+import java.sql.*;
+
+import org.apache.phoenix.schema.types.PTimestamp;
+import org.apache.phoenix.schema.types.PhoenixArray;
+import org.junit.Test;
+
+public class ArraysWithNullsIT extends BaseClientManagedTimeIT {
+
+@Test
+public void testArrayUpsertIntWithNulls() throws Exception {
+Connection conn = DriverManager.getConnection(getUrl());
+conn.createStatement().execute(CREATE TABLE t1 ( k VARCHAR PRIMARY 
KEY, a INTEGER[]));
+
+PreparedStatement stmt = conn.prepareStatement(UPSERT INTO t1 
VALUES('a',ARRAY[null,3,null]));
+stmt.execute();
+conn.commit();
+
+ResultSet rs = conn.createStatement().executeQuery(Select a from t1 
where k = 'a');
+rs.next();
+Array array = conn.createArrayOf(INTEGER,new Object[]{null,3,null});
+
+assertEquals(rs.getArray(1),array);
+conn.close();
+
+}
+
+
+
+@Test
+public void testArrayUpsertVarcharWithNulls() throws Exception {
+Connection conn = DriverManager.getConnection(getUrl());
+conn.createStatement().execute(CREATE TABLE t2 ( k VARCHAR PRIMARY 
KEY, a VARCHAR[]));
+
+PreparedStatement stmt = conn.prepareStatement(UPSERT INTO t2 
VALUES('a',ARRAY['10',null]));
+stmt.execute();
+conn.commit();
+
+ResultSet rs = conn.createStatement().executeQuery(Select a from t2 
where k = 'a');
+rs.next();
+Array array = conn.createArrayOf(VARCHAR,new Object[]{10,null});
+
+assertEquals(rs.getArray(1),array);
+conn.close();
+
+}
+
+@Test
+public void testArrayUpsertBigIntWithNulls() throws Exception {
+Connection conn = DriverManager.getConnection(getUrl());
+conn.createStatement().execute(CREATE TABLE t3 ( k VARCHAR PRIMARY 
KEY, a BIGINT[]));
+
+PreparedStatement stmt = conn.prepareStatement(UPSERT INTO t3 
VALUES('a',ARRAY[2,null,32335,4]));
+stmt.execute();
+conn.commit();
+
+ResultSet rs = conn.createStatement().executeQuery(Select a from t3 
where k = 'a');
+rs.next();
+Array array = conn.createArrayOf(BIGINT,new 
Object[]{(long)2,null,(long)32335,(long)4});
+
+assertEquals(rs.getArray(1),array);
+conn.close();

[08/37] phoenix git commit: PHOENIX-2005 Connection utilities omit zk client port, parent znode (addendum)

2015-07-08 Thread apurtell
PHOENIX-2005 Connection utilities omit zk client port, parent znode (addendum)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/5546a422
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/5546a422
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/5546a422

Branch: refs/heads/4.x-HBase-1.1
Commit: 5546a42226e3f0fdf0cc89f1c175ff3da7a75d8c
Parents: c6b37b9
Author: Nick Dimiduk ndimi...@apache.org
Authored: Tue May 26 17:41:04 2015 -0700
Committer: Nick Dimiduk ndimi...@apache.org
Committed: Tue May 26 17:52:24 2015 -0700

--
 .../phoenix/jdbc/PhoenixEmbeddedDriver.java |  2 +-
 .../java/org/apache/phoenix/util/QueryUtil.java |  2 +-
 .../phoenix/jdbc/PhoenixEmbeddedDriverTest.java | 20 
 3 files changed, 22 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/5546a422/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
index 2451603..3cfaacc 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
@@ -209,7 +209,7 @@ public abstract class PhoenixEmbeddedDriver implements 
Driver, org.apache.phoeni
 url = url == null ?  : url;
 url = url.startsWith(PhoenixRuntime.JDBC_PROTOCOL)
 ? url.substring(PhoenixRuntime.JDBC_PROTOCOL.length())
-: url;
+: PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + url;
 StringTokenizer tokenizer = new StringTokenizer(url, DELIMITERS, 
true);
 int nTokens = 0;
 String[] tokens = new String[5];

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5546a422/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
--
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
index bd38983..a2d4a91 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/QueryUtil.java
@@ -290,7 +290,7 @@ public final class QueryUtil {
 throws ClassNotFoundException,
 SQLException {
 String url = getConnectionUrl(props, conf);
-LOG.info(Creating connection with the jdbc url: + url);
+LOG.info(Creating connection with the jdbc url:  + url);
 PropertiesUtil.extractProperties(props, conf);
 return DriverManager.getConnection(url, props);
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5546a422/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriverTest.java
--
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriverTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriverTest.java
index 083b205..4eda825 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriverTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriverTest.java
@@ -34,23 +34,33 @@ public class PhoenixEmbeddedDriverTest {
 @Test
 public void testGetConnectionInfo() throws SQLException {
 String[] urls = new String[] {
+null,
+,
 jdbc:phoenix,
 jdbc:phoenix;test=true,
 jdbc:phoenix:localhost,
+localhost,
+localhost;,
 jdbc:phoenix:localhost:123,
 jdbc:phoenix:localhost:123;foo=bar,
+localhost:123,
 jdbc:phoenix:localhost:123:/hbase,
 jdbc:phoenix:localhost:123:/foo-bar,
 jdbc:phoenix:localhost:123:/foo-bar;foo=bas,
+localhost:123:/foo-bar,
 jdbc:phoenix:localhost:/hbase,
 jdbc:phoenix:localhost:/foo-bar,
 jdbc:phoenix:localhost:/foo-bar;test=true,
+localhost:/foo-bar,
 jdbc:phoenix:v1,v2,v3,
 jdbc:phoenix:v1,v2,v3;,
 jdbc:phoenix:v1,v2,v3;test=true,
+v1,v2,v3,
 jdbc:phoenix:v1,v2,v3:/hbase,
 jdbc:phoenix:v1,v2,v3:/hbase;test=true,
+v1,v2,v3:/foo-bar,
 jdbc:phoenix:v1,v2,v3:123:/hbase,
+v1,v2,v3:123:/hbase,
 jdbc:phoenix:v1,v2,v3:123:/hbase;test=false,
 

[19/37] phoenix git commit: PHOENIX-2012 RowKeyComparisonFilter logs unencoded data at DEBUG level

2015-07-08 Thread apurtell
PHOENIX-2012 RowKeyComparisonFilter logs unencoded data at DEBUG level


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9c5f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9c5f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9c5f

Branch: refs/heads/4.x-HBase-1.1
Commit: 9c5fae456f3a0934e43e02af0ef5188b9337
Parents: 29ea503
Author: Nick Dimiduk ndimi...@apache.org
Authored: Wed May 27 15:58:32 2015 -0700
Committer: Nick Dimiduk ndimi...@apache.org
Committed: Mon Jun 1 15:57:15 2015 -0700

--
 .../java/org/apache/phoenix/filter/RowKeyComparisonFilter.java  | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9c5f/phoenix-core/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java
index 2e2037b..b7de7ac 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java
@@ -73,8 +73,9 @@ public class RowKeyComparisonFilter extends 
BooleanExpressionFilter {
 if (evaluate) {
 inputTuple.setKey(v.getRowArray(), v.getRowOffset(), 
v.getRowLength());
 this.keepRow = Boolean.TRUE.equals(evaluate(inputTuple));
-if (logger.isDebugEnabled()) {
-logger.debug(RowKeyComparisonFilter:  + (this.keepRow ? 
KEEP : FILTER)  +  row  + inputTuple);
+if (logger.isTraceEnabled()) {
+logger.trace(RowKeyComparisonFilter:  + (this.keepRow ? 
KEEP : FILTER)
++  row  + inputTuple);
 }
 evaluate = false;
 }



[17/37] phoenix git commit: PHOENIX-1976 Exit gracefully if addShutdownHook fails.

2015-07-08 Thread apurtell
PHOENIX-1976 Exit gracefully if addShutdownHook fails.

If the JVM is already in the process of shutting down,
we don't need to add the shutdown hook for the PhoenixDriver
instance. Additionally, we shouldn't advertise this instance
either since we're going down.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f2be9138
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f2be9138
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f2be9138

Branch: refs/heads/4.x-HBase-1.1
Commit: f2be9138359b078fd3e285f3fd441de711789962
Parents: dc46b14
Author: Josh Elser josh.el...@gmail.com
Authored: Thu May 14 17:40:46 2015 -0400
Committer: Nick Dimiduk ndimi...@apache.org
Committed: Mon Jun 1 12:02:28 2015 -0700

--
 .../org/apache/phoenix/jdbc/PhoenixDriver.java  | 46 ++--
 1 file changed, 32 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f2be9138/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
index 6360d06..cfabe82 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
@@ -60,25 +60,43 @@ public final class PhoenixDriver extends 
PhoenixEmbeddedDriver {
 private static volatile String driverShutdownMsg;
 static {
 try {
-DriverManager.registerDriver( INSTANCE = new PhoenixDriver() );
-// Add shutdown hook to release any resources that were never 
closed
-// In theory not necessary, but it won't hurt anything
-Runtime.getRuntime().addShutdownHook(new Thread() {
-@Override
-public void run() {
-try {
-INSTANCE.close();
-} catch (SQLException e) {
-logger.warn(Unable to close PhoenixDriver on 
shutdown, e);
-} finally {
-driverShutdownMsg = Phoenix driver closed because 
server is shutting down;
+INSTANCE = new PhoenixDriver();
+try {
+// Add shutdown hook to release any resources that were never 
closed
+// In theory not necessary, but it won't hurt anything
+Runtime.getRuntime().addShutdownHook(new Thread() {
+@Override
+public void run() {
+closeInstance(INSTANCE);
 }
-}
-});
+});
+
+// Only register the driver when we successfully register the 
shutdown hook
+// Don't want to register it if we're already in the process 
of going down.
+DriverManager.registerDriver( INSTANCE );
+} catch (IllegalStateException e) {
+logger.warn(Failed to register PhoenixDriver shutdown hook as 
the JVM is already shutting down);
+
+// Close the instance now because we don't have the shutdown 
hook
+closeInstance(INSTANCE);
+
+throw e;
+}
 } catch (SQLException e) {
 throw new IllegalStateException(Unable to register  + 
PhoenixDriver.class.getName() + : + e.getMessage());
 }
 }
+
+private static void closeInstance(PhoenixDriver instance) {
+try {
+instance.close();
+} catch (SQLException e) {
+logger.warn(Unable to close PhoenixDriver on shutdown, e);
+} finally {
+driverShutdownMsg = Phoenix driver closed because server is 
shutting down;
+}
+}
+
 // One entry per cluster here
 private final ConcurrentMapConnectionInfo,ConnectionQueryServices 
connectionQueryServicesMap = new 
ConcurrentHashMapConnectionInfo,ConnectionQueryServices(3);
 



[04/37] phoenix git commit: PHOENIX-1763 Support building with HBase-1.1.0

2015-07-08 Thread apurtell
PHOENIX-1763 Support building with HBase-1.1.0


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/98271b88
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/98271b88
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/98271b88

Branch: refs/heads/4.x-HBase-1.1
Commit: 98271b888c113f10e174205434e05d3b36b7eb67
Parents: bf01eb2
Author: Enis Soztutar e...@apache.org
Authored: Thu May 21 23:08:26 2015 -0700
Committer: Enis Soztutar e...@apache.org
Committed: Fri May 22 00:30:56 2015 -0700

--
 phoenix-core/pom.xml| 17 +++--
 .../regionserver/IndexHalfStoreFileReader.java  | 31 ++--
 .../regionserver/IndexSplitTransaction.java | 39 --
 .../hbase/regionserver/LocalIndexMerger.java|  3 +-
 .../cache/aggcache/SpillableGroupByCache.java   | 13 +++-
 .../phoenix/coprocessor/BaseRegionScanner.java  | 12 +--
 .../coprocessor/BaseScannerRegionObserver.java  | 77 +++-
 .../coprocessor/DelegateRegionScanner.java  | 23 --
 .../GroupedAggregateRegionObserver.java | 53 --
 .../coprocessor/HashJoinRegionScanner.java  | 60 ---
 .../coprocessor/MetaDataRegionObserver.java | 23 +++---
 .../phoenix/coprocessor/ScanRegionObserver.java | 11 ++-
 .../UngroupedAggregateRegionObserver.java   | 55 +++---
 .../hbase/index/covered/data/LocalTable.java|  2 +-
 .../index/covered/filter/FamilyOnlyFilter.java  |  6 +-
 .../index/scanner/FilteredKeyValueScanner.java  |  2 +-
 .../phoenix/index/PhoenixIndexBuilder.java  |  6 +-
 .../iterate/RegionScannerResultIterator.java|  9 ++-
 .../phoenix/schema/stats/StatisticsScanner.java | 10 ++-
 .../hbase/ipc/PhoenixIndexRpcSchedulerTest.java |  6 +-
 .../index/covered/TestLocalTableState.java  |  1 -
 .../covered/filter/TestFamilyOnlyFilter.java| 12 +--
 .../index/write/TestWALRecoveryCaching.java |  4 +-
 phoenix-flume/pom.xml   |  9 ---
 phoenix-pig/pom.xml | 31 +---
 phoenix-spark/pom.xml   |  7 ++
 pom.xml | 41 ++-
 27 files changed, 361 insertions(+), 202 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/98271b88/phoenix-core/pom.xml
--
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 45b8d73..22e6b60 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -350,16 +350,25 @@
 dependency
   groupIdorg.apache.hbase/groupId
   artifactIdhbase-it/artifactId
-  version${hbase.version}/version
   typetest-jar/type
   scopetest/scope
 /dependency
 dependency
   groupIdorg.apache.hbase/groupId
+  artifactIdhbase-annotations/artifactId
+/dependency
+dependency
+  groupIdorg.apache.hbase/groupId
   artifactIdhbase-common/artifactId
 /dependency
 dependency
   groupIdorg.apache.hbase/groupId
+  artifactIdhbase-common/artifactId
+  scopetest/scope
+  typetest-jar/type
+/dependency
+dependency
+  groupIdorg.apache.hbase/groupId
   artifactIdhbase-protocol/artifactId
 /dependency
 dependency
@@ -369,18 +378,16 @@
 dependency
   groupIdorg.apache.hbase/groupId
   artifactIdhbase-server/artifactId
-  version${hbase.version}/version
 /dependency
 dependency
   groupIdorg.apache.hbase/groupId
   artifactIdhbase-server/artifactId
-  version${hbase.version}/version
   typetest-jar/type
+  scopetest/scope
 /dependency
 dependency
   groupIdorg.apache.hbase/groupId
   artifactIdhbase-hadoop-compat/artifactId
-  scopetest/scope
 /dependency
 dependency
   groupIdorg.apache.hbase/groupId
@@ -391,13 +398,11 @@
 dependency
   groupIdorg.apache.hbase/groupId
   artifactIdhbase-hadoop2-compat/artifactId
-  version${hbase.version}/version
   scopetest/scope
 /dependency
 dependency
   groupIdorg.apache.hbase/groupId
   artifactIdhbase-hadoop2-compat/artifactId
-  version${hbase.version}/version
   typetest-jar/type
   scopetest/scope
 /dependency

http://git-wip-us.apache.org/repos/asf/phoenix/blob/98271b88/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
index 49e2022..9befc8c 100644
--- 

[23/37] phoenix git commit: PHOENIX-2027 Subqueries with no data are raising IllegalStateException(Alicia Ying Shu)

2015-07-08 Thread apurtell
PHOENIX-2027 Subqueries with no data are raising IllegalStateException(Alicia 
Ying Shu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/bfd860ff
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/bfd860ff
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/bfd860ff

Branch: refs/heads/4.x-HBase-1.1
Commit: bfd860ffec62a784f1229997cf98892ea3c0592d
Parents: 18b9e72
Author: Rajeshbabu Chintaguntla rajeshb...@apache.org
Authored: Wed Jun 10 01:01:29 2015 +0530
Committer: Rajeshbabu Chintaguntla rajeshb...@apache.org
Committed: Wed Jun 10 01:01:29 2015 +0530

--
 .../apache/phoenix/end2end/SortMergeJoinIT.java | 54 
 .../phoenix/execute/SortMergeJoinPlan.java  |  4 +-
 2 files changed, 56 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/bfd860ff/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinIT.java
index 6f14a45..8b65ab3 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SortMergeJoinIT.java
@@ -2658,5 +2658,59 @@ public class SortMergeJoinIT extends 
BaseHBaseManagedTimeIT {
 }
 }
 
+@Test
+public void testSubqueryWithoutData() throws Exception {
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+Connection conn = DriverManager.getConnection(getUrl(), props);
+conn.setAutoCommit(false);
+
+try {
+String GRAMMAR_TABLE = CREATE TABLE IF NOT EXISTS GRAMMAR_TABLE 
(ID INTEGER PRIMARY KEY,  +
+unsig_id UNSIGNED_INT, big_id BIGINT, unsig_long_id 
UNSIGNED_LONG, tiny_id TINYINT, +
+unsig_tiny_id UNSIGNED_TINYINT, small_id SMALLINT, 
unsig_small_id UNSIGNED_SMALLINT, + 
+float_id FLOAT, unsig_float_id UNSIGNED_FLOAT, double_id 
DOUBLE, unsig_double_id UNSIGNED_DOUBLE, + 
+decimal_id DECIMAL, boolean_id BOOLEAN, time_id TIME, 
date_id DATE, timestamp_id TIMESTAMP, + 
+unsig_time_id TIME, unsig_date_id DATE, 
unsig_timestamp_id TIMESTAMP, varchar_id VARCHAR (30), + 
+char_id CHAR (30), binary_id BINARY (100), varbinary_id 
VARBINARY (100));
+
+String LARGE_TABLE = CREATE TABLE IF NOT EXISTS LARGE_TABLE (ID 
INTEGER PRIMARY KEY,  +
+unsig_id UNSIGNED_INT, big_id BIGINT, unsig_long_id 
UNSIGNED_LONG, tiny_id TINYINT, +
+unsig_tiny_id UNSIGNED_TINYINT, small_id SMALLINT, 
unsig_small_id UNSIGNED_SMALLINT, + 
+float_id FLOAT, unsig_float_id UNSIGNED_FLOAT, double_id 
DOUBLE, unsig_double_id UNSIGNED_DOUBLE, + 
+decimal_id DECIMAL, boolean_id BOOLEAN, time_id TIME, 
date_id DATE, timestamp_id TIMESTAMP, + 
+unsig_time_id TIME, unsig_date_id DATE, 
unsig_timestamp_id TIMESTAMP, varchar_id VARCHAR (30), + 
+char_id CHAR (30), binary_id BINARY (100), varbinary_id 
VARBINARY (100));
+
+String SECONDARY_LARGE_TABLE = CREATE TABLE IF NOT EXISTS 
SECONDARY_LARGE_TABLE (SEC_ID INTEGER PRIMARY KEY, +
+sec_unsig_id UNSIGNED_INT, sec_big_id BIGINT, 
sec_usnig_long_id UNSIGNED_LONG, sec_tiny_id TINYINT, + 
+sec_unsig_tiny_id UNSIGNED_TINYINT, sec_small_id 
SMALLINT, sec_unsig_small_id UNSIGNED_SMALLINT, + 
+sec_float_id FLOAT, sec_unsig_float_id UNSIGNED_FLOAT, 
sec_double_id DOUBLE, sec_unsig_double_id UNSIGNED_DOUBLE, +
+sec_decimal_id DECIMAL, sec_boolean_id BOOLEAN, 
sec_time_id TIME, sec_date_id DATE, +
+sec_timestamp_id TIMESTAMP, sec_unsig_time_id TIME, 
sec_unsig_date_id DATE, sec_unsig_timestamp_id TIMESTAMP, +
+sec_varchar_id VARCHAR (30), sec_char_id CHAR (30), 
sec_binary_id BINARY (100), sec_varbinary_id VARBINARY (100));
+createTestTable(getUrl(), GRAMMAR_TABLE);
+createTestTable(getUrl(), LARGE_TABLE);
+createTestTable(getUrl(), SECONDARY_LARGE_TABLE);
+
+String ddl = SELECT /*+USE_SORT_MERGE_JOIN*/ * FROM (SELECT ID, 
BIG_ID, DATE_ID FROM LARGE_TABLE AS A WHERE (A.ID % 5) = 0) AS A  +
+INNER JOIN (SELECT SEC_ID, SEC_TINY_ID, 
SEC_UNSIG_FLOAT_ID FROM SECONDARY_LARGE_TABLE AS B WHERE (B.SEC_ID % 5) = 0) AS 
B  + 
+ON A.ID=B.SEC_ID WHERE A.DATE_ID  ALL (SELECT 
SEC_DATE_ID FROM SECONDARY_LARGE_TABLE LIMIT 100)  +  
+AND 

[29/37] phoenix git commit: PHOENIX-1941 Phoenix tests are failing in linux env with missing class: StaticMapping (Alicia Ying Shu)

2015-07-08 Thread apurtell
PHOENIX-1941 Phoenix tests are failing in linux env with missing class: 
StaticMapping (Alicia Ying Shu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/329d7494
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/329d7494
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/329d7494

Branch: refs/heads/4.x-HBase-1.1
Commit: 329d74948521ed974593e455369a27d9cd705249
Parents: 52f5b04
Author: Nick Dimiduk ndimi...@apache.org
Authored: Wed Jun 17 12:17:33 2015 -0700
Committer: Nick Dimiduk ndimi...@apache.org
Committed: Wed Jun 17 12:23:47 2015 -0700

--
 .../phoenix/end2end/End2EndTestDriver.java   | 19 +++
 1 file changed, 15 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/329d7494/phoenix-core/src/it/java/org/apache/phoenix/end2end/End2EndTestDriver.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/End2EndTestDriver.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/End2EndTestDriver.java
index 26d18cf..743f729 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/End2EndTestDriver.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/End2EndTestDriver.java
@@ -21,6 +21,7 @@ package org.apache.phoenix.end2end;
 
 import java.io.IOException;
 import java.io.PrintStream;
+import java.lang.annotation.Annotation;
 import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.List;
@@ -79,10 +80,20 @@ public class End2EndTestDriver extends AbstractHBaseTool {
 
   @Override
   public boolean isCandidateClass(Class? c) {
-return testFilterRe.matcher(c.getName()).find() 
-  // Our pattern will match the below NON-IntegrationTest. Rather than
-  // do exotic regex, just filter it out here
-  super.isCandidateClass(c);
+  Annotation[] annotations = c.getAnnotations();
+  for (Annotation curAnnotation : annotations) {
+  if 
(curAnnotation.toString().contains(NeedsOwnMiniClusterTest)) {
+  /* Skip tests that aren't designed to run against a live 
cluster.
+   * For a live cluster, we cannot bring it up and down as 
required
+   * for these tests to run.
+   */
+  return false;
+  }
+  }
+  return testFilterRe.matcher(c.getName()).find() 
+  // Our pattern will match the below NON-IntegrationTest. 
Rather than
+  // do exotic regex, just filter it out here
+  super.isCandidateClass(c);
   }
 }
 



[21/37] phoenix git commit: PHOENIX-777 - Support null value for fixed length ARRAY - Addendum (Ram)

2015-07-08 Thread apurtell
PHOENIX-777 - Support null value for fixed length ARRAY - Addendum (Ram)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/58ee7062
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/58ee7062
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/58ee7062

Branch: refs/heads/4.x-HBase-1.1
Commit: 58ee7062c624dd72a5cdaa41ec5b107a1e7b14c2
Parents: 6f890ad
Author: ramkrishna ramkrishna.s.vasude...@gmail.com
Authored: Tue Jun 2 14:32:02 2015 +0530
Committer: ramkrishna ramkrishna.s.vasude...@gmail.com
Committed: Tue Jun 2 14:36:05 2015 +0530

--
 .../main/java/org/apache/phoenix/schema/types/PTimestamp.java   | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/58ee7062/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java
index d396adc..16b110e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PTimestamp.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.DateUtil;
 
 public class PTimestamp extends PDataTypeTimestamp {
@@ -47,6 +48,10 @@ public class PTimestamp extends PDataTypeTimestamp {
   @Override
   public int toBytes(Object object, byte[] bytes, int offset) {
 if (object == null) {
+  // Create the byte[] of size MAX_TIMESTAMP_BYTES
+  if(bytes.length != getByteSize()) {
+  bytes = Bytes.padTail(bytes, (getByteSize() - bytes.length));
+  }
   PDate.INSTANCE.getCodec().encodeLong(0l, bytes, offset);
   Bytes.putInt(bytes, offset + Bytes.SIZEOF_LONG, 0);
   return getByteSize();



[27/37] phoenix git commit: PHOENIX-2040 Mark spark/scala dependencies as 'provided' (Josh Mahonin)

2015-07-08 Thread apurtell
PHOENIX-2040 Mark spark/scala dependencies as 'provided' (Josh Mahonin)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/43c722ca
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/43c722ca
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/43c722ca

Branch: refs/heads/4.x-HBase-1.1
Commit: 43c722ca6d2d55347d1f2caf7641ce03339e1e1e
Parents: d0bcb7b
Author: Nick Dimiduk ndimi...@apache.org
Authored: Mon Jun 15 16:16:03 2015 -0700
Committer: Nick Dimiduk ndimi...@apache.org
Committed: Mon Jun 15 16:16:30 2015 -0700

--
 phoenix-assembly/pom.xml |  4 
 phoenix-spark/pom.xml| 51 ---
 2 files changed, 32 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/43c722ca/phoenix-assembly/pom.xml
--
diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index ebc5d71..d275d03 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -152,6 +152,10 @@
 /dependency
 dependency
   groupIdorg.apache.phoenix/groupId
+  artifactIdphoenix-spark/artifactId
+/dependency
+dependency
+  groupIdorg.apache.phoenix/groupId
   artifactIdphoenix-server/artifactId
 /dependency
 dependency

http://git-wip-us.apache.org/repos/asf/phoenix/blob/43c722ca/phoenix-spark/pom.xml
--
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
index 1747573..aea5c7e 100644
--- a/phoenix-spark/pom.xml
+++ b/phoenix-spark/pom.xml
@@ -45,12 +45,7 @@
   groupIdorg.apache.phoenix/groupId
   artifactIdphoenix-core/artifactId
 /dependency
-dependency
-  groupIdorg.apache.phoenix/groupId
-  artifactIdphoenix-core/artifactId
-  classifiertests/classifier
-  scopetest/scope
-/dependency
+
 !-- Force import of Spark's servlet API for unit tests --
 dependency
   groupIdjavax.servlet/groupId
@@ -59,16 +54,38 @@
   scopetest/scope
 /dependency
 
+!-- Mark Spark / Scala as provided --
 dependency
-  groupIdjunit/groupId
-  artifactIdjunit/artifactId
+  groupIdorg.scala-lang/groupId
+  artifactIdscala-library/artifactId
+  version${scala.version}/version
+  scopeprovided/scope
+/dependency
+dependency
+  groupIdorg.apache.spark/groupId
+  artifactIdspark-core_${scala.binary.version}/artifactId
+  version${spark.version}/version
+  scopeprovided/scope
+/dependency
+dependency
+  groupIdorg.apache.spark/groupId
+  artifactIdspark-sql_${scala.binary.version}/artifactId
+  version${spark.version}/version
+  scopeprovided/scope
+/dependency
+
+!-- Test dependencies --
+dependency
+  groupIdorg.apache.phoenix/groupId
+  artifactIdphoenix-core/artifactId
+  classifiertests/classifier
   scopetest/scope
 /dependency
 
 dependency
-  groupIdorg.scala-lang/groupId
-  artifactIdscala-library/artifactId
-  version${scala.version}/version
+  groupIdjunit/groupId
+  artifactIdjunit/artifactId
+  scopetest/scope
 /dependency
 
 dependency
@@ -86,18 +103,6 @@
 /dependency
 
 dependency
-  groupIdorg.apache.spark/groupId
-  artifactIdspark-core_${scala.binary.version}/artifactId
-  version${spark.version}/version
-/dependency
-
-dependency
-  groupIdorg.apache.spark/groupId
-  artifactIdspark-sql_${scala.binary.version}/artifactId
-  version${spark.version}/version
-/dependency
-
-dependency
   groupIdorg.apache.hadoop/groupId
   artifactIdhadoop-client/artifactId
   version${hadoop-two.version}/version



[15/37] phoenix git commit: PHOENIX-2007 java.sql.SQLException: Encountered exception in sub plan [0] execution(Alicia Ying Shu)

2015-07-08 Thread apurtell
PHOENIX-2007 java.sql.SQLException: Encountered exception in sub plan [0] 
execution(Alicia Ying Shu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/eb9452d5
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/eb9452d5
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/eb9452d5

Branch: refs/heads/4.x-HBase-1.1
Commit: eb9452d55068ff4574b48938aebba765c28caaaf
Parents: c1882ee
Author: Rajeshbabu Chintaguntla rajeshb...@apache.org
Authored: Mon Jun 1 21:05:24 2015 +0530
Committer: Rajeshbabu Chintaguntla rajeshb...@apache.org
Committed: Mon Jun 1 21:05:24 2015 +0530

--
 .../org/apache/phoenix/end2end/HashJoinIT.java  | 54 
 .../apache/phoenix/execute/HashJoinPlan.java|  7 +--
 2 files changed, 58 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/eb9452d5/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
index a03204a..88e03ca 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
@@ -3813,6 +3813,60 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
 }
 }
 
+@Test
+public void testSubqueryWithoutData() throws Exception {
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+Connection conn = DriverManager.getConnection(getUrl(), props);
+conn.setAutoCommit(false);
+
+try {
+String GRAMMAR_TABLE = CREATE TABLE IF NOT EXISTS GRAMMAR_TABLE 
(ID INTEGER PRIMARY KEY,  +
+unsig_id UNSIGNED_INT, big_id BIGINT, unsig_long_id 
UNSIGNED_LONG, tiny_id TINYINT, +
+unsig_tiny_id UNSIGNED_TINYINT, small_id SMALLINT, 
unsig_small_id UNSIGNED_SMALLINT, + 
+float_id FLOAT, unsig_float_id UNSIGNED_FLOAT, double_id 
DOUBLE, unsig_double_id UNSIGNED_DOUBLE, + 
+decimal_id DECIMAL, boolean_id BOOLEAN, time_id TIME, 
date_id DATE, timestamp_id TIMESTAMP, + 
+unsig_time_id TIME, unsig_date_id DATE, 
unsig_timestamp_id TIMESTAMP, varchar_id VARCHAR (30), + 
+char_id CHAR (30), binary_id BINARY (100), varbinary_id 
VARBINARY (100));
+
+String LARGE_TABLE = CREATE TABLE IF NOT EXISTS LARGE_TABLE (ID 
INTEGER PRIMARY KEY,  +
+unsig_id UNSIGNED_INT, big_id BIGINT, unsig_long_id 
UNSIGNED_LONG, tiny_id TINYINT, +
+unsig_tiny_id UNSIGNED_TINYINT, small_id SMALLINT, 
unsig_small_id UNSIGNED_SMALLINT, + 
+float_id FLOAT, unsig_float_id UNSIGNED_FLOAT, double_id 
DOUBLE, unsig_double_id UNSIGNED_DOUBLE, + 
+decimal_id DECIMAL, boolean_id BOOLEAN, time_id TIME, 
date_id DATE, timestamp_id TIMESTAMP, + 
+unsig_time_id TIME, unsig_date_id DATE, 
unsig_timestamp_id TIMESTAMP, varchar_id VARCHAR (30), + 
+char_id CHAR (30), binary_id BINARY (100), varbinary_id 
VARBINARY (100));
+
+String SECONDARY_LARGE_TABLE = CREATE TABLE IF NOT EXISTS 
SECONDARY_LARGE_TABLE (SEC_ID INTEGER PRIMARY KEY, +
+sec_unsig_id UNSIGNED_INT, sec_big_id BIGINT, 
sec_usnig_long_id UNSIGNED_LONG, sec_tiny_id TINYINT, + 
+sec_unsig_tiny_id UNSIGNED_TINYINT, sec_small_id 
SMALLINT, sec_unsig_small_id UNSIGNED_SMALLINT, + 
+sec_float_id FLOAT, sec_unsig_float_id UNSIGNED_FLOAT, 
sec_double_id DOUBLE, sec_unsig_double_id UNSIGNED_DOUBLE, +
+sec_decimal_id DECIMAL, sec_boolean_id BOOLEAN, 
sec_time_id TIME, sec_date_id DATE, +
+sec_timestamp_id TIMESTAMP, sec_unsig_time_id TIME, 
sec_unsig_date_id DATE, sec_unsig_timestamp_id TIMESTAMP, +
+sec_varchar_id VARCHAR (30), sec_char_id CHAR (30), 
sec_binary_id BINARY (100), sec_varbinary_id VARBINARY (100));
+createTestTable(getUrl(), GRAMMAR_TABLE);
+createTestTable(getUrl(), LARGE_TABLE);
+createTestTable(getUrl(), SECONDARY_LARGE_TABLE);
+
+String ddl = SELECT * FROM (SELECT ID, BIG_ID, DATE_ID FROM 
LARGE_TABLE AS A WHERE (A.ID % 5) = 0) AS A  +
+INNER JOIN (SELECT SEC_ID, SEC_TINY_ID, 
SEC_UNSIG_FLOAT_ID FROM SECONDARY_LARGE_TABLE AS B WHERE (B.SEC_ID % 5) = 0) AS 
B  + 
+ON A.ID=B.SEC_ID WHERE A.DATE_ID  ALL (SELECT 
SEC_DATE_ID FROM SECONDARY_LARGE_TABLE LIMIT 100)  +  
+AND B.SEC_UNSIG_FLOAT_ID = ANY (SELECT sec_unsig_float_id 
FROM 

[32/37] phoenix git commit: PHOENIX-1981 : PhoenixHBase Load and Store Funcs should handle all Pig data types

2015-07-08 Thread apurtell
PHOENIX-1981 : PhoenixHBase Load and Store Funcs should handle all Pig data 
types


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e83dc203
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e83dc203
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e83dc203

Branch: refs/heads/4.x-HBase-1.1
Commit: e83dc203684ea03aae950e660c27251b853902bc
Parents: 5ea65c8
Author: Prashant Kommireddi 
pkommire...@pkommireddi-ltm.internal.salesforce.com
Authored: Mon May 18 19:47:01 2015 -0700
Committer: Eli Levine elilev...@apache.org
Committed: Wed Jun 17 13:09:57 2015 -0700

--
 .../org/apache/phoenix/pig/util/TypeUtil.java   | 24 ++--
 .../apache/phoenix/pig/util/TypeUtilTest.java   | 20 
 2 files changed, 37 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e83dc203/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
--
diff --git 
a/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java 
b/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
index 6549445..c8bc9d8 100644
--- a/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
+++ b/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
@@ -1,11 +1,21 @@
 /*
- * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license agreements. See the NOTICE
- * file distributed with this work for additional information regarding 
copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the License); you may not 
use this file except in compliance with the
- * License. You may obtain a copy of the License at 
http://www.apache.org/licenses/LICENSE-2.0 Unless required by
- * applicable law or agreed to in writing, software distributed under the 
License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 
See the License for the specific language
- * governing permissions and limitations under the License.
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ *distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you maynot use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicablelaw or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
  */
 
 package org.apache.phoenix.pig.util;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e83dc203/phoenix-pig/src/test/java/org/apache/phoenix/pig/util/TypeUtilTest.java
--
diff --git 
a/phoenix-pig/src/test/java/org/apache/phoenix/pig/util/TypeUtilTest.java 
b/phoenix-pig/src/test/java/org/apache/phoenix/pig/util/TypeUtilTest.java
index 25d9f48..56167f6 100644
--- a/phoenix-pig/src/test/java/org/apache/phoenix/pig/util/TypeUtilTest.java
+++ b/phoenix-pig/src/test/java/org/apache/phoenix/pig/util/TypeUtilTest.java
@@ -1,3 +1,23 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ *distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you maynot use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicablelaw or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 package org.apache.phoenix.pig.util;
 
 import static org.junit.Assert.assertEquals;



[37/37] phoenix git commit: PHOENIX-2095 Lower the default for phoenix.sequence.saltBuckets

2015-07-08 Thread apurtell
PHOENIX-2095 Lower the default for phoenix.sequence.saltBuckets


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/18e52cc4
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/18e52cc4
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/18e52cc4

Branch: refs/heads/4.x-HBase-0.98
Commit: 18e52cc4ce2384bdc7a9c72d63901058e40f04ae
Parents: b82c5cb
Author: Andrew Purtell apurt...@apache.org
Authored: Wed Jul 8 09:34:20 2015 -0700
Committer: Andrew Purtell apurt...@apache.org
Committed: Wed Jul 8 09:35:33 2015 -0700

--
 .../main/java/org/apache/phoenix/query/QueryServicesOptions.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/18e52cc4/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 4e8879b..79776e7 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -173,7 +173,7 @@ public class QueryServicesOptions {
 /**
  * Use only first time SYSTEM.SEQUENCE table is created.
  */
-public static final int DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS = 
SaltingUtil.MAX_BUCKET_NUM;
+public static final int DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS = 0;
 /**
  * Default value for coprocessor priority is between SYSTEM and USER 
priority.
  */



[12/37] phoenix git commit: PHOENIX-1939 Test are failing with DoNotRetryIOException: ATABLE: null (Alicia Ying Shu)

2015-07-08 Thread apurtell
PHOENIX-1939 Test are failing with DoNotRetryIOException: ATABLE: null (Alicia 
Ying Shu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a600cc4d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a600cc4d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a600cc4d

Branch: refs/heads/4.x-HBase-1.1
Commit: a600cc4d7acc2c828ae7782e59d094f99e5631f0
Parents: c95e28d
Author: Nick Dimiduk ndimi...@apache.org
Authored: Fri May 29 17:12:25 2015 -0700
Committer: Nick Dimiduk ndimi...@apache.org
Committed: Fri May 29 17:13:08 2015 -0700

--
 .../src/main/java/org/apache/phoenix/schema/PTableImpl.java  | 4 ++--
 .../src/test/java/org/apache/phoenix/query/BaseTest.java | 3 ++-
 2 files changed, 4 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a600cc4d/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index bf4420c..bdc95b8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -88,8 +88,8 @@ public class PTableImpl implements PTable {
 
 private PTableKey key;
 private PName name;
-private PName schemaName;
-private PName tableName;
+private PName schemaName = PName.EMPTY_NAME;
+private PName tableName = PName.EMPTY_NAME;
 private PName tenantId;
 private PTableType type;
 private PIndexState state;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a600cc4d/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
--
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index 54ae670..b0574c3 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -121,7 +121,6 @@ import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.ipc.controller.ServerRpcControllerFactory;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.LocalIndexMerger;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -829,6 +828,7 @@ public abstract class BaseTest {
 logger.info(Table  + fullTableName +  is already 
deleted.);
 }
 }
+rs.close();
 if (lastTenantId != null) {
 conn.close();
 }
@@ -860,6 +860,7 @@ public abstract class BaseTest {
 logger.info(DROP SEQUENCE STATEMENT: DROP SEQUENCE  + 
SchemaUtil.getEscapedTableName(rs.getString(2), rs.getString(3)));
 conn.createStatement().execute(DROP SEQUENCE  + 
SchemaUtil.getEscapedTableName(rs.getString(2), rs.getString(3)));
 }
+rs.close();
 }
 
 protected static void initSumDoubleValues(byte[][] splits, String url) 
throws Exception {



[34/37] phoenix git commit: minor changes based on jesses feedback

2015-07-08 Thread apurtell
minor changes based on jesses feedback


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/fbd2922f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/fbd2922f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/fbd2922f

Branch: refs/heads/4.x-HBase-1.1
Commit: fbd2922f286fd4c1658281b62cc3f414c618106f
Parents: 6225047
Author: Prashant Kommireddi 
pkommire...@pkommireddi-ltm.internal.salesforce.com
Authored: Mon Jun 15 16:18:47 2015 -0700
Committer: Eli Levine elilev...@apache.org
Committed: Wed Jun 17 13:09:58 2015 -0700

--
 .../src/main/java/org/apache/phoenix/pig/util/TypeUtil.java   | 7 +++
 1 file changed, 3 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/fbd2922f/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
--
diff --git 
a/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java 
b/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
index 6e32fb5..5820ec6 100644
--- a/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
+++ b/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
@@ -76,9 +76,7 @@ public final class TypeUtil {
 private TypeUtil() {}
 
 /**
- * A map of Phoenix to Pig data types.
- * 
- * @return
+ * @return map of Phoenix to Pig data types.
  */
 private static ImmutableMapPDataType, Byte init() {
 final ImmutableMap.BuilderPDataType, Byte builder = new 
BuilderPDataType, Byte();
@@ -160,7 +158,8 @@ public final class TypeUtil {
 
 /**
  * This method encodes a value with Phoenix data type. It begins with 
checking whether an object is BINARY and makes
- * a call to {@link #castBytes(Object, PDataType)} to convery bytes to 
targetPhoenixType
+ * a call to {@link #castBytes(Object, PDataType)} to convert bytes to 
targetPhoenixType. It returns a {@link RuntimeException}
+ * when object can not be coerced.
  * 
  * @param o
  * @param targetPhoenixType



[11/37] phoenix git commit: PHOENIX-1964 - porting from master

2015-07-08 Thread apurtell
PHOENIX-1964 - porting from master


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c95e28df
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c95e28df
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c95e28df

Branch: refs/heads/4.x-HBase-1.1
Commit: c95e28df94241f47d5cfe9a1515b21960c93adf2
Parents: 0e0b4dd
Author: cmarcel cmar...@salesforce.com
Authored: Wed May 27 13:58:45 2015 -0700
Committer: cmarcel cmar...@salesforce.com
Committed: Wed May 27 13:58:45 2015 -0700

--
 phoenix-pherf/config/pherf.properties   |  3 ++
 .../org/apache/phoenix/pherf/DataIngestIT.java  |  3 +-
 .../apache/phoenix/pherf/ResultBaseTestIT.java  | 45 ++
 .../java/org/apache/phoenix/pherf/Pherf.java|  7 +--
 .../apache/phoenix/pherf/PherfConstants.java| 50 +++-
 .../phoenix/pherf/loaddata/DataLoader.java  |  2 +-
 .../apache/phoenix/pherf/result/ResultUtil.java |  4 +-
 .../pherf/result/impl/CSVResultHandler.java |  5 +-
 .../pherf/result/impl/ImageResultHandler.java   |  5 +-
 .../pherf/result/impl/XMLResultHandler.java |  6 ++-
 .../apache/phoenix/pherf/util/ResourceList.java | 26 --
 .../pherf/workload/WorkloadExecutor.java|  2 +-
 .../phoenix/pherf/ConfigurationParserTest.java  |  2 +-
 .../org/apache/phoenix/pherf/ResourceTest.java  |  8 ++--
 .../apache/phoenix/pherf/ResultBaseTest.java| 44 +
 .../org/apache/phoenix/pherf/ResultTest.java|  5 +-
 16 files changed, 168 insertions(+), 49 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c95e28df/phoenix-pherf/config/pherf.properties
--
diff --git a/phoenix-pherf/config/pherf.properties 
b/phoenix-pherf/config/pherf.properties
index 354707a..1142f9b5 100644
--- a/phoenix-pherf/config/pherf.properties
+++ b/phoenix-pherf/config/pherf.properties
@@ -29,3 +29,6 @@ pherf.default.dataloader.threadpool=0
 # When upserting, this is the max # of rows that will be inserted in a single 
commit
 pherf.default.dataloader.batchsize=1000
 
+# Directory where results from a scenario run will be written
+pherf.default.results.dir=RESULTS
+

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c95e28df/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/DataIngestIT.java
--
diff --git 
a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/DataIngestIT.java 
b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/DataIngestIT.java
index b29656d..2b56f43 100644
--- a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/DataIngestIT.java
+++ b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/DataIngestIT.java
@@ -18,7 +18,6 @@
 
 package org.apache.phoenix.pherf;
 
-import org.apache.phoenix.end2end.BaseHBaseManagedTimeIT;
 import org.apache.phoenix.pherf.configuration.Column;
 import org.apache.phoenix.pherf.configuration.DataTypeMapping;
 import org.apache.phoenix.pherf.configuration.Scenario;
@@ -39,7 +38,7 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-public class DataIngestIT extends BaseHBaseManagedTimeIT {
+public class DataIngestIT extends ResultBaseTestIT {
 protected static PhoenixUtil util = new PhoenixUtil(true);
 static final String matcherScenario = .*scenario/.*test.*xml;
 static final String matcherSchema = .*datamodel/.*test.*sql;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c95e28df/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/ResultBaseTestIT.java
--
diff --git 
a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/ResultBaseTestIT.java 
b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/ResultBaseTestIT.java
new file mode 100644
index 000..6e103b8
--- /dev/null
+++ b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/ResultBaseTestIT.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *   or more contributor license agreements.  See the NOTICE file
+ *   distributed with this work for additional information
+ *   regarding copyright ownership.  The ASF licenses this file
+ *   to you under the Apache License, Version 2.0 (the
+ *   License); you may not use this file except in compliance
+ *   with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *   Unless required by applicable law or agreed to in writing, software
+ *   distributed under the License is distributed on an AS IS BASIS,
+ *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *   See the License for the specific language governing 

[25/37] phoenix git commit: PHOENIX-2033 PQS log environment details on launch

2015-07-08 Thread apurtell
PHOENIX-2033 PQS log environment details on launch


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e64f61ba
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e64f61ba
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e64f61ba

Branch: refs/heads/4.x-HBase-1.1
Commit: e64f61ba431b8db938bf60992bbde56f4c540946
Parents: f7d7349
Author: Nick Dimiduk ndimi...@apache.org
Authored: Tue Jun 9 17:12:21 2015 -0700
Committer: Nick Dimiduk ndimi...@apache.org
Committed: Fri Jun 12 09:38:42 2015 -0700

--
 .../apache/phoenix/queryserver/server/Main.java | 69 
 1 file changed, 69 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e64f61ba/phoenix-server/src/main/java/org/apache/phoenix/queryserver/server/Main.java
--
diff --git 
a/phoenix-server/src/main/java/org/apache/phoenix/queryserver/server/Main.java 
b/phoenix-server/src/main/java/org/apache/phoenix/queryserver/server/Main.java
index 55febc5..9f9bfc7 100644
--- 
a/phoenix-server/src/main/java/org/apache/phoenix/queryserver/server/Main.java
+++ 
b/phoenix-server/src/main/java/org/apache/phoenix/queryserver/server/Main.java
@@ -34,7 +34,12 @@ import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
+import java.lang.management.ManagementFactory;
+import java.lang.management.RuntimeMXBean;
 import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 
@@ -50,6 +55,11 @@ public final class Main extends Configured implements Tool, 
Runnable {
   phoenix.queryserver.http.port;
   public static final int DEFAULT_HTTP_PORT = 8765;
 
+  public static final String QUERY_SERVER_ENV_LOGGING_KEY =
+  phoenix.queryserver.envvars.logging.disabled;
+  public static final String QUERY_SERVER_ENV_LOGGING_SKIPWORDS_KEY =
+  phoenix.queryserver.envvars.logging.skipwords;
+
   public static final String KEYTAB_FILENAME_KEY = 
phoenix.queryserver.keytab.file;
   public static final String KERBEROS_PRINCIPAL_KEY = 
phoenix.queryserver.kerberos.principal;
   public static final String DNS_NAMESERVER_KEY = 
phoenix.queryserver.dns.nameserver;
@@ -58,12 +68,70 @@ public final class Main extends Configured implements Tool, 
Runnable {
 
   protected static final Log LOG = LogFactory.getLog(Main.class);
 
+  @SuppressWarnings(serial)
+  private static final SetString DEFAULT_SKIP_WORDS = new HashSetString() {
+{
+  add(secret);
+  add(passwd);
+  add(password);
+  add(credential);
+}
+  };
+
   private final String[] argv;
   private final CountDownLatch runningLatch = new CountDownLatch(1);
   private HttpServer server = null;
   private int retCode = 0;
   private Throwable t = null;
 
+  /**
+   * Log information about the currently running JVM.
+   */
+  public static void logJVMInfo() {
+// Print out vm stats before starting up.
+RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean();
+if (runtime != null) {
+  LOG.info(vmName= + runtime.getVmName() + , vmVendor= +
+  runtime.getVmVendor() + , vmVersion= + runtime.getVmVersion());
+  LOG.info(vmInputArguments= + runtime.getInputArguments());
+}
+  }
+
+  /**
+   * Logs information about the currently running JVM process including
+   * the environment variables. Logging of env vars can be disabled by
+   * setting {@code phoenix.envvars.logging.disabled} to {@code true}.
+   * pIf enabled, you can also exclude environment variables containing
+   * certain substrings by setting {@code phoenix.envvars.logging.skipwords}
+   * to comma separated list of such substrings.
+   */
+  public static void logProcessInfo(Configuration conf) {
+// log environment variables unless asked not to
+if (conf == null || !conf.getBoolean(QUERY_SERVER_ENV_LOGGING_KEY, false)) 
{
+  SetString skipWords = new HashSetString(DEFAULT_SKIP_WORDS);
+  if (conf != null) {
+String[] confSkipWords = 
conf.getStrings(QUERY_SERVER_ENV_LOGGING_SKIPWORDS_KEY);
+if (confSkipWords != null) {
+  skipWords.addAll(Arrays.asList(confSkipWords));
+}
+  }
+
+  nextEnv:
+  for (Map.EntryString, String entry : System.getenv().entrySet()) {
+String key = entry.getKey().toLowerCase();
+String value = entry.getValue().toLowerCase();
+// exclude variables which may contain skip words
+for(String skipWord : skipWords) {
+  if (key.contains(skipWord) || value.contains(skipWord))
+continue nextEnv;
+}
+LOG.info(env:+entry);
+  }
+}
+// and 

[33/37] phoenix git commit: PHOENIX-1981 : PhoenixHBase Load and Store Funcs should handle all Pig data types

2015-07-08 Thread apurtell
PHOENIX-1981 : PhoenixHBase Load and Store Funcs should handle all Pig data 
types


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/62250475
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/62250475
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/62250475

Branch: refs/heads/4.x-HBase-1.1
Commit: 62250475e741a72d8ba17e66593d5fdcebac2fa6
Parents: e83dc20
Author: Prashant Kommireddi 
pkommire...@pkommireddi-ltm.internal.salesforce.com
Authored: Mon May 18 19:48:30 2015 -0700
Committer: Eli Levine elilev...@apache.org
Committed: Wed Jun 17 13:09:57 2015 -0700

--
 .../src/main/java/org/apache/phoenix/pig/util/TypeUtil.java  | 8 +++-
 .../test/java/org/apache/phoenix/pig/util/TypeUtilTest.java  | 8 +++-
 2 files changed, 6 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/62250475/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
--
diff --git 
a/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java 
b/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
index c8bc9d8..6e32fb5 100644
--- a/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
+++ b/phoenix-pig/src/main/java/org/apache/phoenix/pig/util/TypeUtil.java
@@ -1,17 +1,15 @@
 /*
- * Copyright 2010 The Apache Software Foundation
- *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
- *distributed with this work for additional information
+ * distributed with this work for additional information
  * regarding copyright ownership.  The ASF licenses this file
  * to you under the Apache License, Version 2.0 (the
- * License); you maynot use this file except in compliance
+ * License); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
  *
  * http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicablelaw or agreed to in writing, software
+ * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an AS IS BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and

http://git-wip-us.apache.org/repos/asf/phoenix/blob/62250475/phoenix-pig/src/test/java/org/apache/phoenix/pig/util/TypeUtilTest.java
--
diff --git 
a/phoenix-pig/src/test/java/org/apache/phoenix/pig/util/TypeUtilTest.java 
b/phoenix-pig/src/test/java/org/apache/phoenix/pig/util/TypeUtilTest.java
index 56167f6..0b44d2b 100644
--- a/phoenix-pig/src/test/java/org/apache/phoenix/pig/util/TypeUtilTest.java
+++ b/phoenix-pig/src/test/java/org/apache/phoenix/pig/util/TypeUtilTest.java
@@ -1,17 +1,15 @@
 /*
- * Copyright 2010 The Apache Software Foundation
- *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
- *distributed with this work for additional information
+ * distributed with this work for additional information
  * regarding copyright ownership.  The ASF licenses this file
  * to you under the Apache License, Version 2.0 (the
- * License); you maynot use this file except in compliance
+ * License); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
  *
  * http://www.apache.org/licenses/LICENSE-2.0
  *
- * Unless required by applicablelaw or agreed to in writing, software
+ * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an AS IS BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and



Git Push Summary

2015-07-08 Thread apurtell
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.1 [deleted] 2fd1a1ac5


[11/31] phoenix git commit: PHOENIX-1684 Functional Index using REGEXP_SUBSTR doesn't work correctly

2015-05-20 Thread apurtell
PHOENIX-1684 Functional Index using REGEXP_SUBSTR doesn't work correctly


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a8b27e3f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a8b27e3f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a8b27e3f

Branch: refs/heads/4.x-HBase-1.x
Commit: a8b27e3f010d15d3f3b519c38fbb052ebb4a6cdb
Parents: 250474d
Author: Thomas tdsi...@salesforce.com
Authored: Thu Mar 19 13:57:27 2015 -0700
Committer: Thomas tdsi...@salesforce.com
Committed: Tue Mar 24 15:56:25 2015 -0700

--
 .../end2end/index/IndexExpressionIT.java| 161 ++-
 .../phoenix/compile/PostIndexDDLCompiler.java   |   4 +-
 .../parse/IndexExpressionParseNodeRewriter.java |  30 +---
 .../apache/phoenix/schema/MetaDataClient.java   |   4 +-
 .../org/apache/phoenix/util/StringUtil.java |   5 +
 .../phoenix/compile/QueryCompilerTest.java  |  22 ++-
 6 files changed, 153 insertions(+), 73 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a8b27e3f/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
index 1e3733b..0203e35 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
@@ -1202,54 +1202,60 @@ public class IndexExpressionIT extends 
BaseHBaseManagedTimeIT {
 
 @Test
 public void testViewUsesTableIndex() throws Exception {
-ResultSet rs;
 Connection conn = DriverManager.getConnection(getUrl());
-String ddl = CREATE TABLE t (k1 INTEGER NOT NULL, k2 INTEGER NOT 
NULL, s1 VARCHAR, s2 VARCHAR, s3 VARCHAR, s4 VARCHAR CONSTRAINT pk PRIMARY KEY 
(k1, k2));
-conn.createStatement().execute(ddl);
-conn.createStatement().execute(CREATE INDEX i1 ON t(k2, s2, s3, s1));
-conn.createStatement().execute(CREATE INDEX i2 ON t(k2, s2||'_'||s3, 
s1, s4));
-
-ddl = CREATE VIEW v AS SELECT * FROM t WHERE s1 = 'foo';
-conn.createStatement().execute(ddl);
-conn.createStatement().execute(UPSERT INTO t 
VALUES(1,1,'foo','abc','cab'));
-conn.createStatement().execute(UPSERT INTO t 
VALUES(2,2,'bar','xyz','zyx'));
-conn.commit();
-
-rs = conn.createStatement().executeQuery(SELECT count(*) FROM v);
-assertTrue(rs.next());
-assertEquals(1, rs.getLong(1));
-assertFalse(rs.next());
-
-//i2 should be used since it contains s3||'_'||s4 i
-String query = SELECT s2||'_'||s3 FROM v WHERE k2=1 AND 
(s2||'_'||s3)='abc_cab';
-rs = conn.createStatement(  ).executeQuery(EXPLAIN  + query);
-String queryPlan = QueryUtil.getExplainPlan(rs);
-assertEquals(
-CLIENT PARALLEL 1-WAY RANGE SCAN OVER I2 
[1,'abc_cab','foo']\n + 
-SERVER FILTER BY FIRST KEY ONLY, queryPlan);
-rs = conn.createStatement().executeQuery(query);
-assertTrue(rs.next());
-assertEquals(abc_cab, rs.getString(1));
-assertFalse(rs.next());
-
-conn.createStatement().execute(ALTER VIEW v DROP COLUMN s4);
-//i2 cannot be used since s4 has been dropped from the view, so i1 
will be used 
-rs = conn.createStatement().executeQuery(EXPLAIN  + query);
-queryPlan = QueryUtil.getExplainPlan(rs);
-assertEquals(
-CLIENT PARALLEL 1-WAY RANGE SCAN OVER I1 [1]\n + 
-SERVER FILTER BY FIRST KEY ONLY AND ((\S2\ || '_' || 
\S3\) = 'abc_cab' AND \S1\ = 'foo'), queryPlan);
-rs = conn.createStatement().executeQuery(query);
-assertTrue(rs.next());
-assertEquals(abc_cab, rs.getString(1));
-assertFalse(rs.next());
+try 
+{
+   ResultSet rs;
+   String ddl = CREATE TABLE t (k1 INTEGER NOT NULL, k2 INTEGER 
NOT NULL, s1 VARCHAR, s2 VARCHAR, s3 VARCHAR, s4 VARCHAR CONSTRAINT pk PRIMARY 
KEY (k1, k2));
+   conn.createStatement().execute(ddl);
+   conn.createStatement().execute(CREATE INDEX i1 ON t(k2, s2, 
s3, s1));
+   conn.createStatement().execute(CREATE INDEX i2 ON t(k2, 
s2||'_'||s3, s1, s4));
+   
+   ddl = CREATE VIEW v AS SELECT * FROM t WHERE s1 = 'foo';
+   conn.createStatement().execute(ddl);
+   conn.createStatement().execute(UPSERT INTO t 
VALUES(1,1,'foo','abc','cab'));
+   conn.createStatement().execute(UPSERT INTO t 

[29/31] phoenix git commit: PHOENIX-1979 Remove unused FamilyOnlyFilter

2015-05-20 Thread apurtell
PHOENIX-1979 Remove unused FamilyOnlyFilter


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e9623da1
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e9623da1
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e9623da1

Branch: refs/heads/4.x-HBase-1.0
Commit: e9623da1747f00158a2a291d23ba989361f44162
Parents: deb4786
Author: Andrew Purtell apurt...@apache.org
Authored: Wed May 20 09:53:53 2015 -0700
Committer: Andrew Purtell apurt...@apache.org
Committed: Wed May 20 09:54:27 2015 -0700

--
 .../index/covered/filter/FamilyOnlyFilter.java  |  80 --
 .../covered/filter/TestFamilyOnlyFilter.java| 106 ---
 2 files changed, 186 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e9623da1/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
deleted file mode 100644
index 68555ef..000
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.covered.filter;
-
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.ByteArrayComparable;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-
-/**
- * Similar to the {@link FamilyFilter} but stops when the end of the family is 
reached and only
- * supports equality
- */
-public class FamilyOnlyFilter extends FamilyFilter {
-
-  boolean done = false;
-  private boolean previousMatchFound;
-
-  /**
-   * Filter on exact binary matches to the passed family
-   * @param family to compare against
-   */
-  public FamilyOnlyFilter(final byte[] family) {
-this(new BinaryComparator(family));
-  }
-
-  public FamilyOnlyFilter(final ByteArrayComparable familyComparator) {
-super(CompareOp.EQUAL, familyComparator);
-  }
-
-
-  @Override
-  public boolean filterAllRemaining() {
-return done;
-  }
-
-  @Override
-  public void reset() {
-done = false;
-previousMatchFound = false;
-  }
-
-  @Override
-  public ReturnCode filterKeyValue(Cell v) {
-if (done) {
-  return ReturnCode.SKIP;
-}
-ReturnCode code = super.filterKeyValue(v);
-if (previousMatchFound) {
-  // we found a match before, and now we are skipping the key because of 
the family, therefore
-  // we are done (no more of the family).
-  if (code.equals(ReturnCode.SKIP)) {
-  done = true;
-  }
-} else {
-  // if we haven't seen a match before, then it doesn't matter what we see 
now, except to mark
-  // if we've seen a match
-  if (code.equals(ReturnCode.INCLUDE)) {
-previousMatchFound = true;
-  }
-}
-return code;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e9623da1/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
--
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
deleted file mode 100644
index 216f548..000
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 

[14/31] phoenix git commit: Fix IndexExpressionIT test failures

2015-05-20 Thread apurtell
Fix IndexExpressionIT test failures


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8ea426ce
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8ea426ce
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8ea426ce

Branch: refs/heads/4.x-HBase-1.x
Commit: 8ea426ceb55d29c3c4f06489cdd0a6b87d69d68c
Parents: 4d71610
Author: Thomas D'Silva twdsi...@gmail.com
Authored: Thu Mar 26 12:45:20 2015 -0700
Committer: Thomas tdsi...@salesforce.com
Committed: Thu Mar 26 13:06:51 2015 -0700

--
 .../java/org/apache/phoenix/end2end/index/IndexExpressionIT.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/8ea426ce/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
index 0203e35..1a5fbcc 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexExpressionIT.java
@@ -480,7 +480,7 @@ public class IndexExpressionIT extends 
BaseHBaseManagedTimeIT {
 String expectedPlan = CLIENT PARALLEL 1-WAY 
 + (localIndex ? RANGE SCAN OVER _LOCAL_IDX_ + 
fullDataTableName +  [-32768]
 : FULL SCAN OVER INDEX_TEST.IDX)
-+ \nSERVER FILTER BY FIRST KEY ONLY\nSERVER 
AGGREGATE INTO ORDERED DISTINCT ROWS BY [TO_BIGINT((A.INT_COL1 + 
B.INT_COL2))]\nCLIENT MERGE SORT;
++ \nSERVER FILTER BY FIRST KEY ONLY\nSERVER 
AGGREGATE INTO ORDERED DISTINCT ROWS BY [TO_BIGINT(\(A.INT_COL1 + 
B.INT_COL2)\)]\nCLIENT MERGE SORT;
 assertEquals(expectedPlan, QueryUtil.getExplainPlan(rs));
 rs = conn.createStatement().executeQuery(groupBySql);
 assertTrue(rs.next());
@@ -531,7 +531,7 @@ public class IndexExpressionIT extends 
BaseHBaseManagedTimeIT {
 String expectedPlan = CLIENT PARALLEL 1-WAY RANGE SCAN OVER 
 + (localIndex ? _LOCAL_IDX_ + fullDataTableName +  
[-32768,0] - [-32768,*]
 : INDEX_TEST.IDX [0] - [*])
-+ \nSERVER FILTER BY FIRST KEY ONLY\nSERVER 
AGGREGATE INTO ORDERED DISTINCT ROWS BY [TO_BIGINT((A.INT_COL1 + 1))]\nCLIENT 
MERGE SORT;
++ \nSERVER FILTER BY FIRST KEY ONLY\nSERVER 
AGGREGATE INTO ORDERED DISTINCT ROWS BY [TO_BIGINT(\(A.INT_COL1 + 
1)\)]\nCLIENT MERGE SORT;
 assertEquals(expectedPlan, QueryUtil.getExplainPlan(rs));
 rs = conn.createStatement().executeQuery(sql);
 assertTrue(rs.next());



[17/31] phoenix git commit: PHOENIX-1457 Use high priority queue for metadata endpoint calls

2015-05-20 Thread apurtell
PHOENIX-1457 Use high priority queue for metadata endpoint calls


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f0c2ed4e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f0c2ed4e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f0c2ed4e

Branch: refs/heads/4.x-HBase-1.x
Commit: f0c2ed4e567eb4efc5a59d70d8880800b144fd09
Parents: 24ee2c6
Author: Thomas D'Silva twdsi...@gmail.com
Authored: Tue Mar 24 17:17:44 2015 -0700
Committer: Thomas tdsi...@salesforce.com
Committed: Fri Mar 27 11:54:40 2015 -0700

--
 .../phoenix/end2end/index/IndexHandlerIT.java   |  12 +-
 .../phoenix/end2end/index/IndexQosIT.java   | 242 ---
 .../apache/phoenix/rpc/PhoenixClientRpcIT.java  | 122 ++
 .../apache/phoenix/rpc/PhoenixServerRpcIT.java  | 235 ++
 .../TestPhoenixIndexRpcSchedulerFactory.java|  64 +
 .../hbase/ipc/PhoenixIndexRpcScheduler.java | 123 --
 .../hadoop/hbase/ipc/PhoenixRpcScheduler.java   | 123 ++
 .../hbase/ipc/PhoenixRpcSchedulerFactory.java   |  95 
 .../controller/ClientRpcControllerFactory.java  |  60 +
 .../ipc/controller/IndexRpcController.java  |  51 
 .../ipc/controller/MetadataRpcController.java   |  55 +
 .../controller/ServerRpcControllerFactory.java  |  62 +
 .../index/IndexQosRpcControllerFactory.java |  82 ---
 .../ipc/PhoenixIndexRpcSchedulerFactory.java|  90 ---
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   4 -
 .../org/apache/phoenix/query/QueryServices.java |   5 +-
 .../phoenix/query/QueryServicesOptions.java |  12 +-
 .../org/apache/phoenix/util/SchemaUtil.java |   7 -
 .../hbase/ipc/PhoenixIndexRpcSchedulerTest.java |  16 +-
 .../PhoenixIndexRpcSchedulerFactoryTest.java| 106 
 .../PhoenixRpcSchedulerFactoryTest.java | 125 ++
 .../java/org/apache/phoenix/query/BaseTest.java |  12 +-
 22 files changed, 1023 insertions(+), 680 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f0c2ed4e/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexHandlerIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexHandlerIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexHandlerIT.java
index 1507d6b..20a780a 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexHandlerIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexHandlerIT.java
@@ -35,8 +35,8 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.ipc.DelegatingPayloadCarryingRpcController;
 import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.ipc.controller.ServerRpcControllerFactory;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.hbase.index.IndexQosRpcControllerFactory;
 import org.apache.phoenix.hbase.index.TableName;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.junit.After;
@@ -53,11 +53,11 @@ public class IndexHandlerIT {
 
 public static class CountingIndexClientRpcFactory extends 
RpcControllerFactory {
 
-private IndexQosRpcControllerFactory delegate;
+private ServerRpcControllerFactory delegate;
 
 public CountingIndexClientRpcFactory(Configuration conf) {
 super(conf);
-this.delegate = new IndexQosRpcControllerFactory(conf);
+this.delegate = new ServerRpcControllerFactory(conf);
 }
 
 @Override
@@ -146,8 +146,8 @@ public class IndexHandlerIT {
 conf.set(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY,
 CountingIndexClientRpcFactory.class.getName());
 // and set the index table as the current table
-conf.setStrings(IndexQosRpcControllerFactory.INDEX_TABLE_NAMES_KEY,
-TestTable.getTableNameString());
+//conf.setStrings(PhoenixRpcControllerFactory.INDEX_TABLE_NAMES_KEY,
+//TestTable.getTableNameString());
 HTable table = new HTable(conf, TestTable.getTableName());
 
 // do a write to the table
@@ -159,7 +159,7 @@ public class IndexHandlerIT {
 // check the counts on the rpc controller
 assertEquals(Didn't get the expected number of index priority 
writes!, 1,
 (int) CountingIndexClientRpcController.priorityCounts
-.get(QueryServicesOptions.DEFAULT_INDEX_MIN_PRIORITY));
+.get(QueryServicesOptions.DEFAULT_INDEX_PRIORITY));
 
 table.close();
 }


[07/31] phoenix git commit: PHOENIX-1676 Set priority of Index Updates correctly

2015-05-20 Thread apurtell
PHOENIX-1676 Set priority of Index Updates correctly


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8b0591ec
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8b0591ec
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8b0591ec

Branch: refs/heads/4.x-HBase-1.x
Commit: 8b0591ecd38ffa6a110f9fd5d9c8ce086d537e2c
Parents: 096586e
Author: Thomas tdsi...@salesforce.com
Authored: Mon Mar 23 22:17:16 2015 -0700
Committer: Thomas tdsi...@salesforce.com
Committed: Mon Mar 23 22:40:55 2015 -0700

--
 .../phoenix/end2end/index/IndexQosIT.java   | 240 +++
 .../hbase/ipc/PhoenixIndexRpcScheduler.java |   3 +
 .../phoenix/hbase/index/IndexQosCompat.java |  98 
 .../index/IndexQosRpcControllerFactory.java |  12 +-
 .../index/table/CoprocessorHTableFactory.java   |  20 --
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   7 +-
 .../org/apache/phoenix/util/SchemaUtil.java |   7 +
 7 files changed, 260 insertions(+), 127 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/8b0591ec/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexQosIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexQosIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexQosIT.java
new file mode 100644
index 000..7338b40
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexQosIT.java
@@ -0,0 +1,240 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license agreements. See the NOTICE
+ * file distributed with this work for additional information regarding 
copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the License); you may not 
use this file except in compliance with the
+ * License. You may obtain a copy of the License at 
http://www.apache.org/licenses/LICENSE-2.0 Unless required by
+ * applicable law or agreed to in writing, software distributed under the 
License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 
See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+package org.apache.phoenix.end2end.index;
+
+import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL;
+import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR;
+import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR;
+import static 
org.apache.phoenix.util.PhoenixRuntime.PHOENIX_TEST_DRIVER_URL_PARAM;
+import static org.apache.phoenix.util.TestUtil.LOCALHOST;
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.util.List;
+import java.util.Properties;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.ipc.BalancedQueueRpcExecutor;
+import org.apache.hadoop.hbase.ipc.CallRunner;
+import org.apache.hadoop.hbase.ipc.PhoenixIndexRpcScheduler;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.ipc.RpcExecutor;
+import org.apache.hadoop.hbase.ipc.RpcScheduler;
+import org.apache.hadoop.hbase.master.AssignmentManager;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.RegionServerServices;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
+import org.apache.phoenix.hbase.index.IndexQosRpcControllerFactory;
+import org.apache.phoenix.hbase.index.ipc.PhoenixIndexRpcSchedulerFactory;
+import org.apache.phoenix.jdbc.PhoenixTestDriver;
+import org.apache.phoenix.query.BaseTest;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.QueryUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+
+

[06/31] phoenix git commit: PHOENIX-1746 Pass through guidepost config params on UPDATE STATISTICS call

2015-05-20 Thread apurtell
PHOENIX-1746 Pass through guidepost config params on UPDATE STATISTICS call


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/096586e6
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/096586e6
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/096586e6

Branch: refs/heads/4.x-HBase-1.x
Commit: 096586e65e2779433bf30c30e97f78ae2316365e
Parents: e06ceaf
Author: James Taylor jtay...@salesforce.com
Authored: Mon Mar 23 10:28:23 2015 -0700
Committer: James Taylor jtay...@salesforce.com
Committed: Mon Mar 23 10:29:29 2015 -0700

--
 .../StatsCollectorWithSplitsAndMultiCFIT.java   |  6 ++
 phoenix-core/src/main/antlr3/PhoenixSQL.g   |  4 ++--
 .../coprocessor/BaseScannerRegionObserver.java  |  6 --
 .../UngroupedAggregateRegionObserver.java   |  4 +++-
 .../apache/phoenix/jdbc/PhoenixStatement.java   |  9 +
 .../apache/phoenix/parse/ParseNodeFactory.java  |  4 ++--
 .../parse/UpdateStatisticsStatement.java| 11 +-
 .../apache/phoenix/schema/MetaDataClient.java   | 19 ++
 .../schema/stats/StatisticsCollector.java   | 21 +++-
 9 files changed, 63 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/096586e6/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorWithSplitsAndMultiCFIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorWithSplitsAndMultiCFIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorWithSplitsAndMultiCFIT.java
index c34d598..bcb3a0a 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorWithSplitsAndMultiCFIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorWithSplitsAndMultiCFIT.java
@@ -136,6 +136,12 @@ public class StatsCollectorWithSplitsAndMultiCFIT extends 
StatsCollectorAbstract
 assertRowCountAndByteCount(info, rowCountArr[i], byteCountArr[i]);
 i++;
 }
+
+TestUtil.analyzeTable(conn, STATS_TEST_TABLE_NAME_NEW);
+String query = UPDATE STATISTICS  + STATS_TEST_TABLE_NAME_NEW +  
SET \ + QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB + \= + 
Long.toString(2000);
+conn.createStatement().execute(query);
+keyRanges = getAllSplits(conn, STATS_TEST_TABLE_NAME_NEW);
+assertEquals(6, keyRanges.size());
 }
 
 protected void assertRowCountAndByteCount(GuidePostsInfo info, long 
rowCount, long byteCount) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/096586e6/phoenix-core/src/main/antlr3/PhoenixSQL.g
--
diff --git a/phoenix-core/src/main/antlr3/PhoenixSQL.g 
b/phoenix-core/src/main/antlr3/PhoenixSQL.g
index 6a2e3b9..0330a39 100644
--- a/phoenix-core/src/main/antlr3/PhoenixSQL.g
+++ b/phoenix-core/src/main/antlr3/PhoenixSQL.g
@@ -520,8 +520,8 @@ alter_table_node returns [AlterTableStatement ret]
 ;
 
 update_statistics_node returns [UpdateStatisticsStatement ret]
-   :   UPDATE STATISTICS t=from_table_name (s=INDEX | s=ALL | s=COLUMNS)?
-   {ret = factory.updateStatistics(factory.namedTable(null, t), s 
== null ? StatisticsCollectionScope.getDefault() : 
StatisticsCollectionScope.valueOf(SchemaUtil.normalizeIdentifier(s.getText(;}
+   :   UPDATE STATISTICS t=from_table_name (s=INDEX | s=ALL | s=COLUMNS)? 
(SET (p=properties))?
+   {ret = factory.updateStatistics(factory.namedTable(null, t), s 
== null ? StatisticsCollectionScope.getDefault() : 
StatisticsCollectionScope.valueOf(SchemaUtil.normalizeIdentifier(s.getText())), 
p);}
;
 
 prop_name returns [String ret]

http://git-wip-us.apache.org/repos/asf/phoenix/blob/096586e6/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
index c3988a0..a2269b4 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
@@ -38,6 +38,8 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.htrace.Span;
+import org.apache.htrace.Trace;
 import org.apache.phoenix.execute.TupleProjector;
 import 

[16/31] phoenix git commit: PHOENIX-1457 Use high priority queue for metadata endpoint calls

2015-05-20 Thread apurtell
http://git-wip-us.apache.org/repos/asf/phoenix/blob/f0c2ed4e/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixIndexRpcSchedulerFactoryTest.java
--
diff --git 
a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixIndexRpcSchedulerFactoryTest.java
 
b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixIndexRpcSchedulerFactoryTest.java
deleted file mode 100644
index 7d08c0d..000
--- 
a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixIndexRpcSchedulerFactoryTest.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.phoenix.hbase.index.ipc.PhoenixIndexRpcSchedulerFactory;
-import org.apache.phoenix.query.QueryServices;
-import org.junit.Test;
-
-public class PhoenixIndexRpcSchedulerFactoryTest {
-
-@Test
-public void ensureInstantiation() throws Exception {
-Configuration conf = new Configuration(false);
-conf.setClass(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
-PhoenixIndexRpcSchedulerFactory.class, RpcSchedulerFactory.class);
-// kinda lame that we copy the copy from the regionserver to do this 
and can't use a static
-// method, but meh
-try {
-Class? rpcSchedulerFactoryClass =
-
conf.getClass(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
-SimpleRpcSchedulerFactory.class);
-Object o = rpcSchedulerFactoryClass.newInstance();
-assertTrue(o instanceof PhoenixIndexRpcSchedulerFactory);
-} catch (InstantiationException e) {
-assertTrue(Should not have got an exception when instantiing the 
rpc scheduler:  + e,
-false);
-} catch (IllegalAccessException e) {
-assertTrue(Should not have got an exception when instantiing the 
rpc scheduler:  + e,
-false);
-}
-}
-
-/**
- * Ensure that we can't configure the index priority ranges inside the 
hbase ranges
- * @throws Exception
- */
-@Test
-public void testValidateIndexPriorityRanges() throws Exception {
-Configuration conf = new Configuration(false);
-// standard configs should be fine
-PhoenixIndexRpcSchedulerFactory factory = new 
PhoenixIndexRpcSchedulerFactory();
-factory.create(conf, null);
-
-setMinMax(conf, 0, 4);
-factory.create(conf, null);
-
-setMinMax(conf, 201, 202);
-factory.create(conf, null);
-
-setMinMax(conf, 102, 101);
-try {
-factory.create(conf, null);
-fail(Should not have allowed max less than min);
-} catch (IllegalArgumentException e) {
-// expected
-}
-
-setMinMax(conf, 5, 6);
-try {
-factory.create(conf, null);
-fail(Should not have allowed min in range);
-} catch (IllegalArgumentException e) {
-// expected
-}
-
-setMinMax(conf, 6, 60);
-try {
-factory.create(conf, null);
-fail(Should not have allowed min/max in hbase range);
-} catch (IllegalArgumentException e) {
-// expected
-}
-
-setMinMax(conf, 6, 101);
-try {
-factory.create(conf, null);
-fail(Should not have allowed in range);
-} catch (IllegalArgumentException e) {
-// expected
-}
-}
-
-private void setMinMax(Configuration conf, int min, int max) {
-conf.setInt(QueryServices.MIN_INDEX_PRIOIRTY_ATTRIB, min);
-conf.setInt(QueryServices.MAX_INDEX_PRIOIRTY_ATTRIB, max);
-}
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f0c2ed4e/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/PhoenixRpcSchedulerFactoryTest.java

[02/31] phoenix git commit: PHOENIX-1642 Make Phoenix Master Branch pointing to HBase1.0.0

2015-05-20 Thread apurtell
http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingUtils.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingUtils.java 
b/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingUtils.java
index cee3b95..8bd918e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingUtils.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingUtils.java
@@ -19,7 +19,7 @@ package org.apache.phoenix.trace;
 
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
-import org.cloudera.htrace.Span;
+import org.apache.htrace.Span;
 
 /**
  * Utilities for tracing

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/trace/util/NullSpan.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/trace/util/NullSpan.java 
b/phoenix-core/src/main/java/org/apache/phoenix/trace/util/NullSpan.java
index 3799fdb..b4f70b9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/util/NullSpan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/util/NullSpan.java
@@ -21,8 +21,9 @@ import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 
-import org.cloudera.htrace.Span;
-import org.cloudera.htrace.TimelineAnnotation;
+import org.apache.htrace.Span;
+import org.apache.htrace.TimelineAnnotation;
+import org.apache.phoenix.util.StringUtil;
 
 /**
  * Fake {@link Span} that doesn't save any state, in place of ttnull/tt 
return values, to avoid
@@ -109,4 +110,9 @@ public class NullSpan implements Span {
   public String getProcessId() {
 return null;
   }
+
+  @Override
+  public String toJson() {
+return StringUtil.EMPTY_STRING;
+  }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a29e163f/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java 
b/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
index 7cd55e8..c9add01 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
@@ -19,6 +19,7 @@ package org.apache.phoenix.trace.util;
 
 import static org.apache.phoenix.util.StringUtil.toBytes;
 
+import java.util.HashMap;
 import java.util.Map;
 import java.util.Properties;
 import java.util.concurrent.Callable;
@@ -28,20 +29,22 @@ import javax.annotation.Nullable;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.htrace.HTraceConfiguration;
 import org.apache.phoenix.call.CallRunner;
 import org.apache.phoenix.call.CallWrapper;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.parse.TraceStatement;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.trace.TraceMetricSource;
-import org.cloudera.htrace.Sampler;
-import org.cloudera.htrace.Span;
-import org.cloudera.htrace.Trace;
-import org.cloudera.htrace.TraceScope;
-import org.cloudera.htrace.Tracer;
-import org.cloudera.htrace.impl.ProbabilitySampler;
-import org.cloudera.htrace.wrappers.TraceCallable;
-import org.cloudera.htrace.wrappers.TraceRunnable;
+import org.apache.htrace.Sampler;
+import org.apache.htrace.Span;
+import org.apache.htrace.Trace;
+import org.apache.htrace.TraceScope;
+import org.apache.htrace.Tracer;
+import org.apache.htrace.impl.ProbabilitySampler;
+import org.apache.htrace.wrappers.TraceCallable;
+import org.apache.htrace.wrappers.TraceRunnable;
 
 import com.google.common.base.Function;
 import com.google.common.base.Preconditions;
@@ -58,10 +61,10 @@ public class Tracing {
 // Constants for tracing across the wire
 public static final String TRACE_ID_ATTRIBUTE_KEY = 
phoenix.trace.traceid;
 public static final String SPAN_ID_ATTRIBUTE_KEY = phoenix.trace.spanid;
-
+
 // Constants for passing into the metrics system
 private static final String TRACE_METRIC_PREFIX = phoenix.trace.instance;
-
+
 /**
  * Manage the types of frequencies that we support. By default, we never 
turn on tracing.
  */
@@ -110,11 +113,12 @@ public class Tracing {
 private static FunctionConfigurationAdapter, Sampler? 
CREATE_PROBABILITY =
 new FunctionConfigurationAdapter, Sampler?() {
 @Override
-public Sampler? apply(ConfigurationAdapter conn) {
+public Sampler? apply(ConfigurationAdapter conf) {
 // get the connection properties for the probability 
information
-  

[01/31] phoenix git commit: Changed version to 4.4.0-HBase-1.x-SNAPSHOT

2015-05-20 Thread apurtell
Repository: phoenix
Updated Branches:
  refs/heads/4.3 52d183356 - 33cb45d0e
  refs/heads/4.x-HBase-0.98 1b943dbf2 - ff5d8b930
  refs/heads/4.x-HBase-1.0 deb478652 - e9623da17
  refs/heads/4.x-HBase-1.x [created] 166425dba
  refs/heads/5.x-HBase-1.1 [created] bd974e7b7
  refs/heads/master c83ab9edb - a4b4e0e2d


Changed version to 4.4.0-HBase-1.x-SNAPSHOT


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/03fce013
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/03fce013
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/03fce013

Branch: refs/heads/4.x-HBase-1.x
Commit: 03fce013c3a0c4883d3d1e9ad037d81c471ef74f
Parents: 174d0e6
Author: Enis Soztutar e...@apache.org
Authored: Thu Mar 19 13:34:46 2015 -0700
Committer: Enis Soztutar e...@apache.org
Committed: Thu Mar 19 13:34:46 2015 -0700

--
 phoenix-assembly/pom.xml | 2 +-
 phoenix-core/pom.xml | 2 +-
 phoenix-flume/pom.xml| 2 +-
 phoenix-pherf/pom.xml| 2 +-
 phoenix-pig/pom.xml  | 2 +-
 pom.xml  | 2 +-
 6 files changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/03fce013/phoenix-assembly/pom.xml
--
diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index a887dbf..96bb16f 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -26,7 +26,7 @@
   parent
 groupIdorg.apache.phoenix/groupId
 artifactIdphoenix/artifactId
-version4.4.0-HBase-0.98-SNAPSHOT/version
+version4.4.0-HBase-1.x-SNAPSHOT/version
   /parent
   artifactIdphoenix-assembly/artifactId
   namePhoenix Assembly/name

http://git-wip-us.apache.org/repos/asf/phoenix/blob/03fce013/phoenix-core/pom.xml
--
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 31b7afd..a325b27 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -4,7 +4,7 @@
   parent
 groupIdorg.apache.phoenix/groupId
 artifactIdphoenix/artifactId
-version4.4.0-HBase-0.98-SNAPSHOT/version
+version4.4.0-HBase-1.x-SNAPSHOT/version
   /parent
   artifactIdphoenix-core/artifactId
   namePhoenix Core/name

http://git-wip-us.apache.org/repos/asf/phoenix/blob/03fce013/phoenix-flume/pom.xml
--
diff --git a/phoenix-flume/pom.xml b/phoenix-flume/pom.xml
index 6e29227..af01f6b 100644
--- a/phoenix-flume/pom.xml
+++ b/phoenix-flume/pom.xml
@@ -26,7 +26,7 @@
   parent
 groupIdorg.apache.phoenix/groupId
 artifactIdphoenix/artifactId
-version4.4.0-HBase-0.98-SNAPSHOT/version
+version4.4.0-HBase-1.x-SNAPSHOT/version
   /parent
   artifactIdphoenix-flume/artifactId
   namePhoenix - Flume/name

http://git-wip-us.apache.org/repos/asf/phoenix/blob/03fce013/phoenix-pherf/pom.xml
--
diff --git a/phoenix-pherf/pom.xml b/phoenix-pherf/pom.xml
index 26698b2..a0d521c 100644
--- a/phoenix-pherf/pom.xml
+++ b/phoenix-pherf/pom.xml
@@ -22,7 +22,7 @@
 parent
 groupIdorg.apache.phoenix/groupId
 artifactIdphoenix/artifactId
-version4.4.0-HBase-0.98-SNAPSHOT/version
+version4.4.0-HBase-1.x-SNAPSHOT/version
 /parent
 
 artifactIdpherf/artifactId

http://git-wip-us.apache.org/repos/asf/phoenix/blob/03fce013/phoenix-pig/pom.xml
--
diff --git a/phoenix-pig/pom.xml b/phoenix-pig/pom.xml
index 7a2072c..0709657 100644
--- a/phoenix-pig/pom.xml
+++ b/phoenix-pig/pom.xml
@@ -26,7 +26,7 @@
   parent
 groupIdorg.apache.phoenix/groupId
 artifactIdphoenix/artifactId
-version4.4.0-HBase-0.98-SNAPSHOT/version
+version4.4.0-HBase-1.x-SNAPSHOT/version
   /parent
   artifactIdphoenix-pig/artifactId
   namePhoenix - Pig/name

http://git-wip-us.apache.org/repos/asf/phoenix/blob/03fce013/pom.xml
--
diff --git a/pom.xml b/pom.xml
index f8f268d..92b0ed6 100644
--- a/pom.xml
+++ b/pom.xml
@@ -3,7 +3,7 @@
   modelVersion4.0.0/modelVersion
   groupIdorg.apache.phoenix/groupId
   artifactIdphoenix/artifactId
-  version4.4.0-HBase-0.98-SNAPSHOT/version
+  version4.4.0-HBase-1.x-SNAPSHOT/version
   packagingpom/packaging
   nameApache Phoenix/name
   descriptionA SQL layer over HBase/description



[24/31] phoenix git commit: PHOENIX-1681 Use the new Region Interface

2015-05-20 Thread apurtell
http://git-wip-us.apache.org/repos/asf/phoenix/blob/260fe5ca/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
index f2d4fb5..5a410ea 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
 import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService;
 import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
-import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
@@ -100,7 +100,7 @@ public class StatisticsWriter implements Closeable {
 statsWriterTable.close();
 }
 
-public void splitStats(HRegion p, HRegion l, HRegion r, 
StatisticsCollector tracker, ImmutableBytesPtr cfKey,
+public void splitStats(Region p, Region l, Region r, StatisticsCollector 
tracker, ImmutableBytesPtr cfKey,
 ListMutation mutations) throws IOException {
 if (tracker == null) { return; }
 boolean useMaxTimeStamp = clientTimeStamp == 
StatisticsCollector.NO_TIMESTAMP;
@@ -108,8 +108,8 @@ public class StatisticsWriter implements Closeable {
 mutations.add(getLastStatsUpdatedTimePut(clientTimeStamp));
 }
 long readTimeStamp = useMaxTimeStamp ? HConstants.LATEST_TIMESTAMP : 
clientTimeStamp;
-Result result = StatisticsUtil.readRegionStatistics(statsReaderTable, 
tableName, cfKey, p.getRegionName(),
-readTimeStamp);
+Result result = StatisticsUtil.readRegionStatistics(statsReaderTable, 
tableName, cfKey,
+p.getRegionInfo().getRegionName(), readTimeStamp);
 if (result != null  !result.isEmpty()) {
Cell cell = 
result.getColumnLatestCell(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, 
PhoenixDatabaseMetaData.GUIDE_POSTS_BYTES);
Cell rowCountCell = 
result.getColumnLatestCell(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, 
PhoenixDatabaseMetaData.GUIDE_POSTS_ROW_COUNT_BYTES);
@@ -119,13 +119,13 @@ public class StatisticsWriter implements Closeable {
 
 GuidePostsInfo guidePostsRegionInfo = 
GuidePostsInfo.deserializeGuidePostsInfo(cell.getValueArray(),
 cell.getValueOffset(), cell.getValueLength(), 
rowCount);
-byte[] pPrefix = StatisticsUtil.getRowKey(tableName, cfKey, 
p.getRegionName());
+byte[] pPrefix = StatisticsUtil.getRowKey(tableName, cfKey, 
p.getRegionInfo().getRegionName());
 mutations.add(new Delete(pPrefix, writeTimeStamp));
 
long byteSize = 0;
 Cell byteSizeCell = 
result.getColumnLatestCell(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES,
 PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES);
-int index = 
Collections.binarySearch(guidePostsRegionInfo.getGuidePosts(), r.getStartKey(),
+int index = 
Collections.binarySearch(guidePostsRegionInfo.getGuidePosts(), 
r.getRegionInfo().getStartKey(),
 Bytes.BYTES_COMPARATOR);
 int size = guidePostsRegionInfo.getGuidePosts().size();
 int midEndIndex, midStartIndex;
@@ -159,7 +159,7 @@ public class StatisticsWriter implements Closeable {
 .getGuidePosts().subList(0, midEndIndex), 
leftRowCount);
 tracker.clear();
tracker.addGuidePost(cfKey, lguidePosts, leftByteCount, 
cell.getTimestamp());
-   addStats(l.getRegionName(), tracker, cfKey, mutations);
+   addStats(l.getRegionInfo().getRegionName(), tracker, 
cfKey, mutations);
}
if (midStartIndex  size) {
GuidePostsInfo rguidePosts = new 
GuidePostsInfo(rightByteCount, guidePostsRegionInfo
@@ -167,7 +167,7 @@ public class StatisticsWriter implements Closeable {
 rightRowCount);
tracker.clear();
tracker.addGuidePost(cfKey, rguidePosts, 
rightByteCount, cell.getTimestamp());
-   addStats(r.getRegionName(), tracker, cfKey, mutations);
+   addStats(r.getRegionInfo().getRegionName(), tracker, 
cfKey, mutations);
 

[20/31] phoenix git commit: PHOENIX-1722 Speedup CONVERT_TZ function add JodaTimezoneCache (Vaclav Loffelmann)

2015-05-20 Thread apurtell
PHOENIX-1722 Speedup CONVERT_TZ function add JodaTimezoneCache (Vaclav 
Loffelmann)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/fcedbe6a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/fcedbe6a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/fcedbe6a

Branch: refs/heads/4.x-HBase-1.x
Commit: fcedbe6a492faa7cc39fe0ed7c4c24c7d41db1a5
Parents: 4248be3
Author: Thomas tdsi...@salesforce.com
Authored: Fri Mar 27 15:32:38 2015 -0700
Committer: Thomas tdsi...@salesforce.com
Committed: Fri Mar 27 15:32:38 2015 -0700

--
 .../apache/phoenix/cache/JodaTimezoneCache.java | 84 
 .../phoenix/cache/JodaTimezoneCacheTest.java| 51 
 2 files changed, 135 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/fcedbe6a/phoenix-core/src/main/java/org/apache/phoenix/cache/JodaTimezoneCache.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/cache/JodaTimezoneCache.java 
b/phoenix-core/src/main/java/org/apache/phoenix/cache/JodaTimezoneCache.java
new file mode 100644
index 000..54904d7
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/JodaTimezoneCache.java
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2015 Apache Software Foundation.
+ *
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.cache;
+
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+import com.google.common.util.concurrent.UncheckedExecutionException;
+import java.nio.ByteBuffer;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.schema.IllegalDataException;
+import org.joda.time.DateTimeZone;
+
+public class JodaTimezoneCache {
+
+public static final int CACHE_EXPRIRE_TIME_MINUTES = 10;
+private static final LoadingCacheByteBuffer, DateTimeZone 
cachedJodaTimeZones = createTimezoneCache();
+
+/**
+ * Returns joda's DateTimeZone instance from cache or create new instance 
and cache it.
+ *
+ * @param timezoneId Timezone Id as accepted by {@code 
DateTimeZone.forID()}. E.g. Europe/Isle_of_Man
+ * @return joda's DateTimeZone instance
+ * @throws IllegalDataException if unknown timezone id is passed
+ */
+public static DateTimeZone getInstance(ByteBuffer timezoneId) {
+try {
+return cachedJodaTimeZones.get(timezoneId);
+} catch (ExecutionException ex) {
+throw new IllegalDataException(ex);
+} catch (UncheckedExecutionException e) {
+throw new IllegalDataException(Unknown timezone  + 
Bytes.toString(timezoneId.array()));
+}
+}
+
+/**
+ * Returns joda's DateTimeZone instance from cache or create new instance 
and cache it.
+ *
+ * @param timezoneId Timezone Id as accepted by {@code 
DateTimeZone.forID()}. E.g. Europe/Isle_of_Man
+ * @return joda's DateTimeZone instance
+ * @throws IllegalDataException if unknown timezone id is passed
+ */
+public static DateTimeZone getInstance(ImmutableBytesWritable timezoneId) {
+return getInstance(ByteBuffer.wrap(timezoneId.copyBytes()));
+}
+
+/**
+ * Returns joda's DateTimeZone instance from cache or create new instance 
and cache it.
+ *
+ * @param timezoneId Timezone Id as accepted by {@code 
DateTimeZone.forID()}. E.g. Europe/Isle_of_Man
+ * @return joda's DateTimeZone instance
+ * @throws IllegalDataException if unknown timezone id is passed
+ */
+public static DateTimeZone getInstance(String timezoneId) {
+return getInstance(ByteBuffer.wrap(Bytes.toBytes(timezoneId)));
+}
+
+private static LoadingCacheByteBuffer, DateTimeZone 
createTimezoneCache() {
+return 
CacheBuilder.newBuilder().expireAfterAccess(CACHE_EXPRIRE_TIME_MINUTES, 
TimeUnit.MINUTES).build(new CacheLoaderByteBuffer, DateTimeZone() {
+
+@Override
+public DateTimeZone load(ByteBuffer timezone) throws Exception {
+return 

[04/31] phoenix git commit: PHOENIX-1703 Fail connection when server minor version is less than client minor version

2015-05-20 Thread apurtell
PHOENIX-1703 Fail connection when server minor version is less than client 
minor version


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4bc162d8
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4bc162d8
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4bc162d8

Branch: refs/heads/4.x-HBase-1.x
Commit: 4bc162d8f254d01bfff71d429a124e5c2d146054
Parents: a29e163
Author: James Taylor jtay...@salesforce.com
Authored: Thu Mar 19 22:23:20 2015 -0700
Committer: James Taylor jtay...@salesforce.com
Committed: Sat Mar 21 11:10:04 2015 -0700

--
 .../org/apache/phoenix/util/MetaDataUtil.java   | 22 +---
 .../apache/phoenix/util/MetaDataUtilTest.java   | 18 +---
 2 files changed, 24 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/4bc162d8/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
index c1aa2cc..f916f5b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
@@ -49,16 +49,16 @@ import org.apache.phoenix.hbase.index.util.VersionUtil;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.QueryConstants;
-import org.apache.phoenix.schema.types.PBoolean;
-import org.apache.phoenix.schema.types.PDataType;
-import org.apache.phoenix.schema.types.PLong;
 import org.apache.phoenix.schema.PName;
-import org.apache.phoenix.schema.types.PSmallint;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.SequenceKey;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.TableNotFoundException;
+import org.apache.phoenix.schema.types.PBoolean;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PLong;
+import org.apache.phoenix.schema.types.PSmallint;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -79,21 +79,22 @@ public class MetaDataUtil {
 public static final String PARENT_TABLE_KEY = PARENT_TABLE;
 public static final byte[] PARENT_TABLE_KEY_BYTES = 
Bytes.toBytes(PARENT_TABLE);
 
-public static boolean areClientAndServerCompatible(long version) {
+public static boolean areClientAndServerCompatible(long 
serverHBaseAndPhoenixVersion) {
 // As of 3.0, we allow a client and server to differ for the minor 
version.
 // Care has to be taken to upgrade the server before the client, as 
otherwise
 // the client may call expressions that don't yet exist on the server.
 // Differing by the patch version has always been allowed.
 // Only differing by the major version is not allowed.
-return 
areClientAndServerCompatible(MetaDataUtil.decodePhoenixVersion(version), 
MetaDataProtocol.PHOENIX_MAJOR_VERSION);
+return 
areClientAndServerCompatible(MetaDataUtil.decodePhoenixVersion(serverHBaseAndPhoenixVersion),
 MetaDataProtocol.PHOENIX_MAJOR_VERSION, 
MetaDataProtocol.PHOENIX_MINOR_VERSION);
 }
 
 // Default scope for testing
-static boolean areClientAndServerCompatible(int version, int pMajor) {
+static boolean areClientAndServerCompatible(int serverVersion, int 
clientMajorVersion, int clientMinorVersion) {
 // A server and client with the same major and minor version number 
must be compatible.
 // So it's important that we roll the PHOENIX_MAJOR_VERSION or 
PHOENIX_MINOR_VERSION
 // when we make an incompatible change.
-return VersionUtil.encodeMaxMinorVersion(pMajor) = version  
VersionUtil.encodeMinMinorVersion(pMajor) = version;
+return VersionUtil.encodeMinPatchVersion(clientMajorVersion, 
clientMinorVersion) = serverVersion  // Minor major and minor cannot be 
ahead of server
+VersionUtil.encodeMaxMinorVersion(clientMajorVersion) = 
serverVersion; // Major version must at least be up to server version
 }
 
 // Given the encoded integer representing the phoenix version in the 
encoded version value.
@@ -129,6 +130,11 @@ public class MetaDataUtil {
 return major + . + minor + . + patch;
 }
 
+public static int encodePhoenixVersion() {
+return 
VersionUtil.encodeVersion(MetaDataProtocol.PHOENIX_MAJOR_VERSION, 
MetaDataProtocol.PHOENIX_MINOR_VERSION,
+MetaDataProtocol.PHOENIX_PATCH_NUMBER);
+}
+
 public static long encodeHBaseAndPhoenixVersions(String hbaseVersion) 

[09/31] phoenix git commit: PHOENIX-1653 Support separate clusters for MR jobs

2015-05-20 Thread apurtell
PHOENIX-1653 Support separate clusters for MR jobs

Add support for the input and output formats of a Phoenix MapReduce job to
point to separate clusters using override configuration settings. Defaults to
existing behavior (HConstants.ZOOKEEPER_QUORUM)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7de8ee1e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7de8ee1e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7de8ee1e

Branch: refs/heads/4.x-HBase-1.x
Commit: 7de8ee1e914f5e0008ca9d983869757e4ca92b78
Parents: f4180fa
Author: gjacoby gjac...@salesforce.com
Authored: Fri Feb 27 16:49:14 2015 -0800
Committer: Gabriel Reid gabri...@ngdata.com
Committed: Tue Mar 24 20:07:52 2015 +0100

--
 .../phoenix/mapreduce/PhoenixInputFormat.java   | 15 ++--
 .../phoenix/mapreduce/PhoenixRecordWriter.java  |  2 +-
 .../phoenix/mapreduce/index/IndexTool.java  |  2 +-
 .../index/PhoenixIndexImportMapper.java |  2 +-
 .../phoenix/mapreduce/util/ConnectionUtil.java  | 88 ++--
 .../util/PhoenixConfigurationUtil.java  | 72 ++--
 .../mapreduce/util/PhoenixMapReduceUtil.java| 22 -
 .../util/PhoenixConfigurationUtilTest.java  | 60 -
 .../pig/util/QuerySchemaParserFunction.java |  2 +-
 .../pig/util/SqlQueryToColumnInfoFunction.java  |  2 +-
 10 files changed, 219 insertions(+), 48 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/7de8ee1e/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
index a83b9ae..31759b4 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
@@ -98,15 +98,16 @@ public class PhoenixInputFormatT extends DBWritable 
extends InputFormatNullWr
  * @throws IOException
  * @throws SQLException
  */
-private QueryPlan getQueryPlan(final JobContext context,final 
Configuration configuration) throws IOException {
+private QueryPlan getQueryPlan(final JobContext context, final 
Configuration configuration)
+throws IOException {
 Preconditions.checkNotNull(context);
-try{
+try {
 final String currentScnValue = 
configuration.get(PhoenixConfigurationUtil.CURRENT_SCN_VALUE);
 final Properties overridingProps = new Properties();
 if(currentScnValue != null) {
 overridingProps.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
currentScnValue);
 }
-final Connection connection = 
ConnectionUtil.getConnection(configuration,overridingProps);
+final Connection connection = 
ConnectionUtil.getInputConnection(configuration, overridingProps);
 final String selectStatement = 
PhoenixConfigurationUtil.getSelectStatement(configuration);
 Preconditions.checkNotNull(selectStatement);
 final Statement statement = connection.createStatement();
@@ -116,9 +117,11 @@ public class PhoenixInputFormatT extends DBWritable 
extends InputFormatNullWr
 // Initialize the query plan so it sets up the parallel scans
 queryPlan.iterator();
 return queryPlan;
-} catch(Exception exception) {
-LOG.error(String.format(Failed to get the query plan with error 
[%s],exception.getMessage()));
+} catch (Exception exception) {
+LOG.error(String.format(Failed to get the query plan with error 
[%s],
+exception.getMessage()));
 throw new RuntimeException(exception);
 }
-   }
+}
+
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7de8ee1e/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWriter.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWriter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWriter.java
index 4d26bf4..5843076 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWriter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordWriter.java
@@ -46,7 +46,7 @@ public class PhoenixRecordWriterT extends DBWritable  
extends RecordWriterNul
 private long numRecords = 0;
 
 public PhoenixRecordWriter(final Configuration configuration) throws 
SQLException {
-this.conn = ConnectionUtil.getConnection(configuration);
+ 

[05/31] phoenix git commit: PHOENIX-1753 Query with RVC that doesn't lead with the row key can return incorrect results

2015-05-20 Thread apurtell
PHOENIX-1753 Query with RVC that doesn't lead with the row key can return 
incorrect results


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e06ceaf4
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e06ceaf4
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e06ceaf4

Branch: refs/heads/4.x-HBase-1.x
Commit: e06ceaf455642d92b500d0e4edc343a3342a0d93
Parents: 4bc162d
Author: James Taylor jtay...@salesforce.com
Authored: Thu Mar 19 18:54:57 2015 -0700
Committer: James Taylor jtay...@salesforce.com
Committed: Sat Mar 21 11:13:24 2015 -0700

--
 .../phoenix/end2end/RowValueConstructorIT.java  | 33 +++-
 .../apache/phoenix/compile/WhereOptimizer.java  |  4 +++
 .../phoenix/compile/WhereOptimizerTest.java | 16 ++
 3 files changed, 52 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e06ceaf4/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowValueConstructorIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowValueConstructorIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowValueConstructorIT.java
index 8d67fa4..3859785 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowValueConstructorIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowValueConstructorIT.java
@@ -1362,6 +1362,37 @@ public class RowValueConstructorIT extends 
BaseClientManagedTimeIT {
 conn.close();
 }
 
-
+@Test
+public void testRVCWithRowKeyNotLeading() throws Exception {
+String ddl = CREATE TABLE sorttest4 (rownum BIGINT primary key, name 
varchar(16), age integer);
+Connection conn = nextConnection(getUrl());
+conn.createStatement().execute(ddl);
+conn.close();
+conn = nextConnection(getUrl());
+String dml = UPSERT INTO sorttest4 (rownum, name, age) values (?, ?, 
?);
+PreparedStatement stmt = conn.prepareStatement(dml);
+stmt.setInt(1, 1);
+stmt.setString(2, A);
+stmt.setInt(3, 1);
+stmt.executeUpdate();
+stmt.setInt(1, 2);
+stmt.setString(2, B);
+stmt.setInt(3, 2);
+stmt.executeUpdate();
+conn.commit();
+conn.close();
+// the below query should only return one record - (1, A, 1)
+String query = SELECT rownum, name, age FROM sorttest4 where (age, 
rownum)  (2, 2);
+conn = nextConnection(getUrl());
+ResultSet rs = conn.createStatement().executeQuery(query);
+int numRecords = 0;
+while (rs.next()) {
+assertEquals(1, rs.getInt(1));
+assertEquals(A, rs.getString(2));
+assertEquals(1, rs.getInt(3));
+numRecords++;
+}
+assertEquals(1, numRecords);
+}
 
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e06ceaf4/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
index 713076e..b03793d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
@@ -542,6 +542,10 @@ public class WhereOptimizer {
 int span = position - initialPosition;
 return new SingleKeySlot(new 
RowValueConstructorKeyPart(table.getPKColumns().get(initialPosition), rvc, 
span, childSlots), initialPosition, span, EVERYTHING_RANGES);
 }
+// If we don't clear the child list, we end up passing some of
+// the child expressions of previous matches up the tree, causing
+// those expressions to form the scan start/stop key. PHOENIX-1753
+childSlots.clear();
 return null;
 }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e06ceaf4/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
--
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
index 0ec6b45..94b25d0 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
@@ -1275,6 +1275,22 @@ public class WhereOptimizerTest extends 
BaseConnectionlessQueryTest {
 }
 
 @Test
+public void 

[26/31] phoenix git commit: PHOENIX-1979 Remove unused FamilyOnlyFilter

2015-05-20 Thread apurtell
PHOENIX-1979 Remove unused FamilyOnlyFilter


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a4b4e0e2
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a4b4e0e2
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a4b4e0e2

Branch: refs/heads/master
Commit: a4b4e0e2d862d5d4ee0f3a6f9587f53fe87d629f
Parents: c83ab9e
Author: Andrew Purtell apurt...@apache.org
Authored: Wed May 20 09:53:53 2015 -0700
Committer: Andrew Purtell apurt...@apache.org
Committed: Wed May 20 09:53:53 2015 -0700

--
 .../index/covered/filter/FamilyOnlyFilter.java  |  80 --
 .../covered/filter/TestFamilyOnlyFilter.java| 106 ---
 2 files changed, 186 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a4b4e0e2/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
deleted file mode 100644
index 68555ef..000
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.covered.filter;
-
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.ByteArrayComparable;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-
-/**
- * Similar to the {@link FamilyFilter} but stops when the end of the family is 
reached and only
- * supports equality
- */
-public class FamilyOnlyFilter extends FamilyFilter {
-
-  boolean done = false;
-  private boolean previousMatchFound;
-
-  /**
-   * Filter on exact binary matches to the passed family
-   * @param family to compare against
-   */
-  public FamilyOnlyFilter(final byte[] family) {
-this(new BinaryComparator(family));
-  }
-
-  public FamilyOnlyFilter(final ByteArrayComparable familyComparator) {
-super(CompareOp.EQUAL, familyComparator);
-  }
-
-
-  @Override
-  public boolean filterAllRemaining() {
-return done;
-  }
-
-  @Override
-  public void reset() {
-done = false;
-previousMatchFound = false;
-  }
-
-  @Override
-  public ReturnCode filterKeyValue(Cell v) {
-if (done) {
-  return ReturnCode.SKIP;
-}
-ReturnCode code = super.filterKeyValue(v);
-if (previousMatchFound) {
-  // we found a match before, and now we are skipping the key because of 
the family, therefore
-  // we are done (no more of the family).
-  if (code.equals(ReturnCode.SKIP)) {
-  done = true;
-  }
-} else {
-  // if we haven't seen a match before, then it doesn't matter what we see 
now, except to mark
-  // if we've seen a match
-  if (code.equals(ReturnCode.INCLUDE)) {
-previousMatchFound = true;
-  }
-}
-return code;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a4b4e0e2/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
--
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
deleted file mode 100644
index 216f548..000
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the

[30/31] phoenix git commit: PHOENIX-1979 Remove unused FamilyOnlyFilter

2015-05-20 Thread apurtell
PHOENIX-1979 Remove unused FamilyOnlyFilter


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/166425db
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/166425db
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/166425db

Branch: refs/heads/4.x-HBase-1.x
Commit: 166425dba7ed851c45702c7cc4d7fb0e0c32b923
Parents: 0d74cff
Author: Andrew Purtell apurt...@apache.org
Authored: Wed May 20 09:53:53 2015 -0700
Committer: Andrew Purtell apurt...@apache.org
Committed: Wed May 20 09:54:33 2015 -0700

--
 .../index/covered/filter/FamilyOnlyFilter.java  |  80 --
 .../covered/filter/TestFamilyOnlyFilter.java| 106 ---
 2 files changed, 186 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/166425db/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
deleted file mode 100644
index 68555ef..000
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.covered.filter;
-
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.ByteArrayComparable;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-
-/**
- * Similar to the {@link FamilyFilter} but stops when the end of the family is 
reached and only
- * supports equality
- */
-public class FamilyOnlyFilter extends FamilyFilter {
-
-  boolean done = false;
-  private boolean previousMatchFound;
-
-  /**
-   * Filter on exact binary matches to the passed family
-   * @param family to compare against
-   */
-  public FamilyOnlyFilter(final byte[] family) {
-this(new BinaryComparator(family));
-  }
-
-  public FamilyOnlyFilter(final ByteArrayComparable familyComparator) {
-super(CompareOp.EQUAL, familyComparator);
-  }
-
-
-  @Override
-  public boolean filterAllRemaining() {
-return done;
-  }
-
-  @Override
-  public void reset() {
-done = false;
-previousMatchFound = false;
-  }
-
-  @Override
-  public ReturnCode filterKeyValue(Cell v) {
-if (done) {
-  return ReturnCode.SKIP;
-}
-ReturnCode code = super.filterKeyValue(v);
-if (previousMatchFound) {
-  // we found a match before, and now we are skipping the key because of 
the family, therefore
-  // we are done (no more of the family).
-  if (code.equals(ReturnCode.SKIP)) {
-  done = true;
-  }
-} else {
-  // if we haven't seen a match before, then it doesn't matter what we see 
now, except to mark
-  // if we've seen a match
-  if (code.equals(ReturnCode.INCLUDE)) {
-previousMatchFound = true;
-  }
-}
-return code;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/166425db/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
--
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
deleted file mode 100644
index 216f548..000
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 

[31/31] phoenix git commit: PHOENIX-1979 Remove unused FamilyOnlyFilter

2015-05-20 Thread apurtell
PHOENIX-1979 Remove unused FamilyOnlyFilter


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/bd974e7b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/bd974e7b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/bd974e7b

Branch: refs/heads/5.x-HBase-1.1
Commit: bd974e7b71e9fed74697b8ea86d887dddfb6daee
Parents: 260fe5c
Author: Andrew Purtell apurt...@apache.org
Authored: Wed May 20 09:53:53 2015 -0700
Committer: Andrew Purtell apurt...@apache.org
Committed: Wed May 20 09:55:23 2015 -0700

--
 .../index/covered/filter/FamilyOnlyFilter.java  |  80 --
 .../covered/filter/TestFamilyOnlyFilter.java| 106 ---
 2 files changed, 186 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/bd974e7b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
deleted file mode 100644
index d39b01d..000
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.covered.filter;
-
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.ByteArrayComparable;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-
-/**
- * Similar to the {@link FamilyFilter} but stops when the end of the family is 
reached and only
- * supports equality
- */
-public class FamilyOnlyFilter extends FamilyFilter {
-
-  boolean done = false;
-  private boolean previousMatchFound;
-
-  /**
-   * Filter on exact binary matches to the passed family
-   * @param family to compare against
-   */
-  public FamilyOnlyFilter(final byte[] family) {
-this(new BinaryComparator(family));
-  }
-
-  public FamilyOnlyFilter(final ByteArrayComparable familyComparator) {
-super(CompareOp.EQUAL, familyComparator);
-  }
-
-
-  @Override
-  public boolean filterAllRemaining() {
-return done;
-  }
-
-  @Override
-  public void reset() {
-done = false;
-previousMatchFound = false;
-  }
-
-  @Override
-  public ReturnCode filterKeyValue(Cell v) {
-if (done) {
-  return ReturnCode.NEXT_ROW;
-}
-ReturnCode code = super.filterKeyValue(v);
-if (previousMatchFound) {
-  // we found a match before, and now we are skipping the key because of 
the family, therefore
-  // we are done (no more of the family).
-  if (code.equals(ReturnCode.SKIP) || code.equals(ReturnCode.NEXT_ROW)) {
-done = true;
-  }
-} else {
-  // if we haven't seen a match before, then it doesn't matter what we see 
now, except to mark
-  // if we've seen a match
-  if (code.equals(ReturnCode.INCLUDE)) {
-previousMatchFound = true;
-  }
-}
-return code;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/bd974e7b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
--
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
deleted file mode 100644
index 808e6bc..000
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * 

[10/31] phoenix git commit: PHOENIX-1744 Allow Integer, UnsignedInt and UnsignedLong to be Cast to TIMESTAMP (Dave Hacker)

2015-05-20 Thread apurtell
PHOENIX-1744 Allow Integer, UnsignedInt and UnsignedLong to be Cast to 
TIMESTAMP (Dave Hacker)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/250474de
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/250474de
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/250474de

Branch: refs/heads/4.x-HBase-1.x
Commit: 250474deb381b376d5ed442186470f65b36a8117
Parents: 7de8ee1
Author: David dhac...@salesforce.com
Authored: Wed Mar 18 13:37:20 2015 -0700
Committer: Thomas tdsi...@salesforce.com
Committed: Tue Mar 24 14:00:20 2015 -0700

--
 .../phoenix/end2end/ToDateFunctionIT.java   | 57 
 .../phoenix/schema/types/PUnsignedLong.java |  5 ++
 2 files changed, 62 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/250474de/phoenix-core/src/it/java/org/apache/phoenix/end2end/ToDateFunctionIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ToDateFunctionIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ToDateFunctionIT.java
index bda4ea5..8de39b7 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ToDateFunctionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ToDateFunctionIT.java
@@ -33,6 +33,7 @@ import java.sql.Timestamp;
 import java.util.Properties;
 
 import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.TypeMismatchException;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -176,4 +177,60 @@ public class ToDateFunctionIT extends 
BaseHBaseManagedTimeIT {
 callToDateFunction(
 customTimeZoneConn, TO_DATE('1970-01-01', 
'-MM-dd')).getTime());
 }
+
+@Test
+public void testTimestampCast() throws SQLException {
+Properties props = new Properties();
+props.setProperty(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB, GMT+1);
+Connection customTimeZoneConn = DriverManager.getConnection(getUrl(), 
props);
+
+assertEquals(
+1426188807198L,
+callToDateFunction(
+customTimeZoneConn, CAST(1426188807198 AS 
TIMESTAMP)).getTime());
+
+
+try {
+callToDateFunction(
+customTimeZoneConn, CAST(22005 AS TIMESTAMP));
+fail();
+} catch (TypeMismatchException e) {
+
+}
+}
+
+@Test
+public void testUnsignedLongToTimestampCast() throws SQLException {
+Properties props = new Properties();
+props.setProperty(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB, GMT+1);
+Connection conn = DriverManager.getConnection(getUrl(), props);
+conn.setAutoCommit(false);
+try {
+conn.prepareStatement(
+create table TT(
++ a unsigned_int not null, 
++ b unsigned_int not null, 
++ ts unsigned_long not null 
++ constraint PK primary key (a, b, ts))).execute();
+conn.commit();
+
+conn.prepareStatement(upsert into TT values (0, 22120, 
1426188807198)).execute();
+conn.commit();
+
+ResultSet rs = conn.prepareStatement(select a, b, ts, CAST(ts AS 
TIMESTAMP) from TT).executeQuery();
+assertTrue(rs.next());
+assertEquals(new Date(1426188807198L), rs.getObject(4));
+rs.close();
+
+try {
+rs = conn.prepareStatement(select a, b, ts, CAST(b AS 
TIMESTAMP) from TT).executeQuery();
+fail();
+} catch (TypeMismatchException e) {
+
+}
+
+} finally {
+conn.close();
+}
+}
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/250474de/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedLong.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedLong.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedLong.java
index 67ae05a..a21ccc3 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedLong.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedLong.java
@@ -95,6 +95,11 @@ public class PUnsignedLong extends PWholeNumberLong {
   }
 
   @Override
+public boolean isCastableTo(PDataType targetType) {
+  return super.isCastableTo(targetType) || 
targetType.isCoercibleTo(PTimestamp.INSTANCE);
+}
+
+  @Override
   public boolean isCoercibleTo(PDataType targetType) {
 return targetType == this || targetType == PUnsignedDouble.INSTANCE || 

[27/31] phoenix git commit: PHOENIX-1979 Remove unused FamilyOnlyFilter

2015-05-20 Thread apurtell
PHOENIX-1979 Remove unused FamilyOnlyFilter


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/33cb45d0
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/33cb45d0
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/33cb45d0

Branch: refs/heads/4.3
Commit: 33cb45d0ea53f0155824a84fc3ca6243ace9ecef
Parents: 52d1833
Author: Andrew Purtell apurt...@apache.org
Authored: Wed May 20 09:53:53 2015 -0700
Committer: Andrew Purtell apurt...@apache.org
Committed: Wed May 20 09:54:13 2015 -0700

--
 .../index/covered/filter/FamilyOnlyFilter.java  |  80 --
 .../covered/filter/TestFamilyOnlyFilter.java| 106 ---
 2 files changed, 186 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/33cb45d0/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
deleted file mode 100644
index 68555ef..000
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/FamilyOnlyFilter.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.hbase.index.covered.filter;
-
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.ByteArrayComparable;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-
-/**
- * Similar to the {@link FamilyFilter} but stops when the end of the family is 
reached and only
- * supports equality
- */
-public class FamilyOnlyFilter extends FamilyFilter {
-
-  boolean done = false;
-  private boolean previousMatchFound;
-
-  /**
-   * Filter on exact binary matches to the passed family
-   * @param family to compare against
-   */
-  public FamilyOnlyFilter(final byte[] family) {
-this(new BinaryComparator(family));
-  }
-
-  public FamilyOnlyFilter(final ByteArrayComparable familyComparator) {
-super(CompareOp.EQUAL, familyComparator);
-  }
-
-
-  @Override
-  public boolean filterAllRemaining() {
-return done;
-  }
-
-  @Override
-  public void reset() {
-done = false;
-previousMatchFound = false;
-  }
-
-  @Override
-  public ReturnCode filterKeyValue(Cell v) {
-if (done) {
-  return ReturnCode.SKIP;
-}
-ReturnCode code = super.filterKeyValue(v);
-if (previousMatchFound) {
-  // we found a match before, and now we are skipping the key because of 
the family, therefore
-  // we are done (no more of the family).
-  if (code.equals(ReturnCode.SKIP)) {
-  done = true;
-  }
-} else {
-  // if we haven't seen a match before, then it doesn't matter what we see 
now, except to mark
-  // if we've seen a match
-  if (code.equals(ReturnCode.INCLUDE)) {
-previousMatchFound = true;
-  }
-}
-return code;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/33cb45d0/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
--
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
deleted file mode 100644
index 216f548..000
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/filter/TestFamilyOnlyFilter.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- 

[08/31] phoenix git commit: PHOENIX-1676 Set priority of Index Updates correctly, fix IndexQosIT

2015-05-20 Thread apurtell
PHOENIX-1676 Set priority of Index Updates correctly, fix IndexQosIT


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f4180fa4
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f4180fa4
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f4180fa4

Branch: refs/heads/4.x-HBase-1.x
Commit: f4180fa40e26c685bfbf1b59cf4385f9b0e713e9
Parents: 8b0591e
Author: Thomas tdsi...@salesforce.com
Authored: Mon Mar 23 22:51:53 2015 -0700
Committer: Thomas tdsi...@salesforce.com
Committed: Mon Mar 23 22:54:56 2015 -0700

--
 .../java/org/apache/phoenix/end2end/index/IndexQosIT.java | 10 ++
 1 file changed, 6 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f4180fa4/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexQosIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexQosIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexQosIT.java
index 7338b40..9558bcb 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexQosIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexQosIT.java
@@ -27,6 +27,7 @@ import java.util.List;
 import java.util.Properties;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HRegionInfo;
@@ -36,13 +37,14 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.ipc.BalancedQueueRpcExecutor;
 import org.apache.hadoop.hbase.ipc.CallRunner;
 import org.apache.hadoop.hbase.ipc.PhoenixIndexRpcScheduler;
+import org.apache.hadoop.hbase.ipc.PriorityFunction;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.ipc.RpcExecutor;
 import org.apache.hadoop.hbase.ipc.RpcScheduler;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.regionserver.RegionServerServices;
+import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
 import org.apache.phoenix.hbase.index.IndexQosRpcControllerFactory;
@@ -82,8 +84,8 @@ public class IndexQosIT extends BaseTest {
  */
 public static class TestPhoenixIndexRpcSchedulerFactory extends 
PhoenixIndexRpcSchedulerFactory {
 @Override
-public RpcScheduler create(Configuration conf, RegionServerServices 
services) {
-PhoenixIndexRpcScheduler phoenixIndexRpcScheduler = 
(PhoenixIndexRpcScheduler)super.create(conf, services);
+public RpcScheduler create(Configuration conf, PriorityFunction 
priorityFunction, Abortable abortable) {
+PhoenixIndexRpcScheduler phoenixIndexRpcScheduler = 
(PhoenixIndexRpcScheduler)super.create(conf, priorityFunction, abortable);
 phoenixIndexRpcScheduler.setExecutorForTesting(spyRpcExecutor);
 return phoenixIndexRpcScheduler;
 }
@@ -93,7 +95,7 @@ public class IndexQosIT extends BaseTest {
 public void doSetup() throws Exception {
 conf = HBaseConfiguration.create();
 setUpConfigForMiniCluster(conf);
-conf.set(HRegionServer.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
+conf.set(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
 TestPhoenixIndexRpcSchedulerFactory.class.getName());
 conf.set(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY, 
IndexQosRpcControllerFactory.class.getName());
 util = new HBaseTestingUtility(conf);



[23/31] phoenix git commit: PHOENIX-1763 Support building with HBase-1.1.0 (Enis Soztutar)

2015-05-20 Thread apurtell
PHOENIX-1763 Support building with HBase-1.1.0 (Enis Soztutar)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/41ad9188
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/41ad9188
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/41ad9188

Branch: refs/heads/5.x-HBase-1.1
Commit: 41ad9188a22a9e258b2d748f60d73c61726528fd
Parents: d147423
Author: Andrew Purtell apurt...@apache.org
Authored: Wed Apr 15 11:26:39 2015 -0700
Committer: Andrew Purtell apurt...@apache.org
Committed: Wed Apr 15 11:26:39 2015 -0700

--
 phoenix-core/pom.xml| 17 +++--
 .../regionserver/IndexHalfStoreFileReader.java  | 31 ++--
 .../regionserver/IndexSplitTransaction.java | 39 --
 .../hbase/regionserver/LocalIndexMerger.java|  3 +-
 .../cache/aggcache/SpillableGroupByCache.java   | 13 +++-
 .../phoenix/coprocessor/BaseRegionScanner.java  | 12 +--
 .../coprocessor/BaseScannerRegionObserver.java  | 77 +++-
 .../coprocessor/DelegateRegionScanner.java  | 23 --
 .../GroupedAggregateRegionObserver.java | 53 --
 .../coprocessor/HashJoinRegionScanner.java  | 60 ---
 .../coprocessor/MetaDataRegionObserver.java | 23 +++---
 .../phoenix/coprocessor/ScanRegionObserver.java | 11 ++-
 .../UngroupedAggregateRegionObserver.java   | 55 +++---
 .../hbase/index/covered/data/LocalTable.java|  2 +-
 .../index/covered/filter/FamilyOnlyFilter.java  |  6 +-
 .../index/scanner/FilteredKeyValueScanner.java  |  2 +-
 .../phoenix/index/PhoenixIndexBuilder.java  |  6 +-
 .../iterate/RegionScannerResultIterator.java|  9 ++-
 .../phoenix/schema/stats/StatisticsScanner.java | 10 ++-
 .../hbase/ipc/PhoenixIndexRpcSchedulerTest.java |  6 +-
 .../index/covered/TestLocalTableState.java  |  1 -
 .../covered/filter/TestFamilyOnlyFilter.java| 12 +--
 .../index/write/TestWALRecoveryCaching.java |  4 +-
 phoenix-flume/pom.xml   |  9 ---
 phoenix-pig/pom.xml | 31 +---
 pom.xml | 48 +++-
 26 files changed, 361 insertions(+), 202 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/41ad9188/phoenix-core/pom.xml
--
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 45b8d73..22e6b60 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -350,16 +350,25 @@
 dependency
   groupIdorg.apache.hbase/groupId
   artifactIdhbase-it/artifactId
-  version${hbase.version}/version
   typetest-jar/type
   scopetest/scope
 /dependency
 dependency
   groupIdorg.apache.hbase/groupId
+  artifactIdhbase-annotations/artifactId
+/dependency
+dependency
+  groupIdorg.apache.hbase/groupId
   artifactIdhbase-common/artifactId
 /dependency
 dependency
   groupIdorg.apache.hbase/groupId
+  artifactIdhbase-common/artifactId
+  scopetest/scope
+  typetest-jar/type
+/dependency
+dependency
+  groupIdorg.apache.hbase/groupId
   artifactIdhbase-protocol/artifactId
 /dependency
 dependency
@@ -369,18 +378,16 @@
 dependency
   groupIdorg.apache.hbase/groupId
   artifactIdhbase-server/artifactId
-  version${hbase.version}/version
 /dependency
 dependency
   groupIdorg.apache.hbase/groupId
   artifactIdhbase-server/artifactId
-  version${hbase.version}/version
   typetest-jar/type
+  scopetest/scope
 /dependency
 dependency
   groupIdorg.apache.hbase/groupId
   artifactIdhbase-hadoop-compat/artifactId
-  scopetest/scope
 /dependency
 dependency
   groupIdorg.apache.hbase/groupId
@@ -391,13 +398,11 @@
 dependency
   groupIdorg.apache.hbase/groupId
   artifactIdhbase-hadoop2-compat/artifactId
-  version${hbase.version}/version
   scopetest/scope
 /dependency
 dependency
   groupIdorg.apache.hbase/groupId
   artifactIdhbase-hadoop2-compat/artifactId
-  version${hbase.version}/version
   typetest-jar/type
   scopetest/scope
 /dependency

http://git-wip-us.apache.org/repos/asf/phoenix/blob/41ad9188/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
index 49e2022..9befc8c 100644
--- 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
+++ 

[19/31] phoenix git commit: PHOENIX-1722 Speedup CONVERT_TZ function (Vaclav Loffelmann)

2015-05-20 Thread apurtell
PHOENIX-1722 Speedup CONVERT_TZ function (Vaclav Loffelmann)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4248be3d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4248be3d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4248be3d

Branch: refs/heads/4.x-HBase-1.x
Commit: 4248be3d8a5a3efeb0e103eadac4594fe5de9519
Parents: 709d867
Author: Thomas tdsi...@salesforce.com
Authored: Fri Mar 27 15:17:21 2015 -0700
Committer: Thomas tdsi...@salesforce.com
Committed: Fri Mar 27 15:17:21 2015 -0700

--
 .../end2end/ConvertTimezoneFunctionIT.java  | 24 -
 .../function/ConvertTimezoneFunction.java   | 38 +---
 .../function/TimezoneOffsetFunction.java| 25 +++--
 pom.xml |  2 +-
 4 files changed, 38 insertions(+), 51 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/4248be3d/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConvertTimezoneFunctionIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConvertTimezoneFunctionIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConvertTimezoneFunctionIT.java
index d89a03b..f415dc6 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConvertTimezoneFunctionIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConvertTimezoneFunctionIT.java
@@ -23,8 +23,10 @@ import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.sql.Statement;
 
 import org.apache.phoenix.exception.SQLExceptionCode;
+import static org.junit.Assert.assertFalse;
 import org.junit.Test;
 
 /**
@@ -129,7 +131,7 @@ public class ConvertTimezoneFunctionIT extends 
BaseHBaseManagedTimeIT {
 try {
 ResultSet rs = conn.createStatement().executeQuery(
 SELECT k1, dates, CONVERT_TZ(dates, 'UNKNOWN_TIMEZONE', 
'America/Adak') FROM TIMEZONE_OFFSET_TEST);
-
+
 rs.next();
 rs.getDate(3).getTime();
 fail();
@@ -137,4 +139,24 @@ public class ConvertTimezoneFunctionIT extends 
BaseHBaseManagedTimeIT {
 assertEquals(SQLExceptionCode.ILLEGAL_DATA.getErrorCode(), 
e.getErrorCode());
 }
 }
+
+   @Test
+   public void testConvertMultipleRecords() throws Exception {
+   Connection conn = DriverManager.getConnection(getUrl());
+   String ddl = CREATE TABLE IF NOT EXISTS TIMEZONE_OFFSET_TEST 
(k1 INTEGER NOT NULL, dates DATE CONSTRAINT pk PRIMARY KEY (k1));
+   Statement stmt = conn.createStatement();
+   stmt.execute(ddl);
+   stmt.execute(UPSERT INTO TIMEZONE_OFFSET_TEST (k1, dates) 
VALUES (1, TO_DATE('2014-03-01 00:00:00')));
+   stmt.execute(UPSERT INTO TIMEZONE_OFFSET_TEST (k1, dates) 
VALUES (2, TO_DATE('2014-03-01 00:00:00')));
+   conn.commit();
+
+   ResultSet rs = stmt.executeQuery(
+   SELECT k1, dates, CONVERT_TZ(dates, 'UTC', 
'America/Adak') FROM TIMEZONE_OFFSET_TEST);
+
+   assertTrue(rs.next());
+   assertEquals(139359600L, rs.getDate(3).getTime()); //Fri, 
28 Feb 2014 14:00:00
+   assertTrue(rs.next());
+   assertEquals(139359600L, rs.getDate(3).getTime()); //Fri, 
28 Feb 2014 14:00:00
+   assertFalse(rs.next());
+   }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4248be3d/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ConvertTimezoneFunction.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ConvertTimezoneFunction.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ConvertTimezoneFunction.java
index dcde31f..3ea47a6 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ConvertTimezoneFunction.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ConvertTimezoneFunction.java
@@ -15,21 +15,17 @@
  */
 package org.apache.phoenix.expression.function;
 
-import java.sql.Date;
 import java.sql.SQLException;
-import java.util.HashMap;
 import java.util.List;
-import java.util.Map;
-import java.util.TimeZone;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.cache.JodaTimezoneCache;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.parse.FunctionParseNode;
-import org.apache.phoenix.schema.IllegalDataException;
 import org.apache.phoenix.schema.types.PDataType;
 import 

[15/31] phoenix git commit: The literal -1.0 (floating point) should not be converted to -1 (Integer) (Dave Hacker)

2015-05-20 Thread apurtell
The literal -1.0 (floating point) should not be converted to -1 (Integer) (Dave 
Hacker)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/24ee2c66
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/24ee2c66
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/24ee2c66

Branch: refs/heads/4.x-HBase-1.x
Commit: 24ee2c66711664eb296d89522ecf8f6a950eb249
Parents: 8ea426c
Author: Thomas tdsi...@salesforce.com
Authored: Thu Mar 26 13:11:35 2015 -0700
Committer: Thomas tdsi...@salesforce.com
Committed: Thu Mar 26 13:11:35 2015 -0700

--
 .../phoenix/end2end/ArithmeticQueryIT.java  | 28 
 .../apache/phoenix/parse/ParseNodeFactory.java  |  4 ++-
 2 files changed, 31 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/24ee2c66/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArithmeticQueryIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArithmeticQueryIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArithmeticQueryIT.java
index 2df1827..72eb016 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArithmeticQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArithmeticQueryIT.java
@@ -957,4 +957,32 @@ public class ArithmeticQueryIT extends 
BaseHBaseManagedTimeIT {
 assertTrue(rs.next());
 assertEquals(1.3, rs.getDouble(1), 0.001);
 }
+
+@Test
+public void testFloatingPointUpsert() throws Exception {
+Connection conn = DriverManager.getConnection(getUrl());
+String ddl = CREATE TABLE test (id VARCHAR not null primary key, name 
VARCHAR, lat FLOAT);
+conn.createStatement().execute(ddl);
+String dml = UPSERT INTO test(id,name,lat) VALUES ('testid', 
'testname', -1.00);
+conn.createStatement().execute(dml);
+conn.commit();
+
+ResultSet rs = conn.createStatement().executeQuery(SELECT lat FROM 
test);
+assertTrue(rs.next());
+assertEquals(-1.0f, rs.getFloat(1), 0.001);
+}
+
+@Test
+public void testFloatingPointMultiplicationUpsert() throws Exception {
+Connection conn = DriverManager.getConnection(getUrl());
+String ddl = CREATE TABLE test (id VARCHAR not null primary key, name 
VARCHAR, lat FLOAT);
+conn.createStatement().execute(ddl);
+String dml = UPSERT INTO test(id,name,lat) VALUES ('testid', 
'testname', -1.00 * 1);
+conn.createStatement().execute(dml);
+conn.commit();
+
+ResultSet rs = conn.createStatement().executeQuery(SELECT lat FROM 
test);
+assertTrue(rs.next());
+assertEquals(-1.0f, rs.getFloat(1), 0.001);
+}
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/24ee2c66/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java 
b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
index 931f327..eb1768c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ParseNodeFactory.java
@@ -47,6 +47,7 @@ import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.TypeMismatchException;
 import org.apache.phoenix.schema.stats.StatisticsCollectionScope;
 import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PLong;
 import org.apache.phoenix.schema.types.PTimestamp;
 import org.apache.phoenix.util.SchemaUtil;
 
@@ -577,7 +578,8 @@ public class ParseNodeFactory {
 
 public ParseNode negate(ParseNode child) {
 // Prevents reparsing of -1 from becoming 1*-1 and 1*1*-1 with each 
re-parsing
-if (LiteralParseNode.ONE.equals(child)) {
+if (LiteralParseNode.ONE.equals(child)  
((LiteralParseNode)child).getType().isCoercibleTo(
+PLong.INSTANCE)) {
 return LiteralParseNode.MINUS_ONE;
 }
 return new 
MultiplyParseNode(Arrays.asList(child,LiteralParseNode.MINUS_ONE));



Git Push Summary

2015-05-20 Thread apurtell
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.x [deleted] 166425dba
  refs/heads/5.x-HBase-1.1 [deleted] bd974e7b7


  1   2   >