[phoenix] branch 4.x updated (e58b758 -> cd657db)

2020-11-17 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a change to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from e58b758  PHOENIX-6191: Creating a view which has its own new columns 
should also do checkAndPut checks on SYSTEM.MUTEX
 new 2169994  PHOENIX-6221 Getting CNF while creating transactional table 
with Omid(Rajeshbabu)
 new cd657db  fixing white spaces

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 phoenix-client-parent/pom.xml | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)



[phoenix] 01/02: PHOENIX-6221 Getting CNF while creating transactional table with Omid(Rajeshbabu)

2020-11-17 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 21699942f33f4ffbe6f1a40f170a2476c1f88016
Author: Rajeshbabu Chintaguntla 
AuthorDate: Sat Nov 14 02:34:07 2020 +0530

PHOENIX-6221 Getting CNF while creating transactional table with 
Omid(Rajeshbabu)
---
 phoenix-client-parent/pom.xml | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/phoenix-client-parent/pom.xml b/phoenix-client-parent/pom.xml
index cabbf16..5b813ec 100644
--- a/phoenix-client-parent/pom.xml
+++ b/phoenix-client-parent/pom.xml
@@ -145,8 +145,9 @@
 
 org/apache/hbase/**
 
-org/apache/phoenix/**
-
+   org/apache/phoenix/**
+   org/apache/omid/**
+
   
 
 



[phoenix] 02/02: fixing white spaces

2020-11-17 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit cd657db52357aa73aabc707f8617cf09d9125d66
Author: Rajeshbabu Chintaguntla 
AuthorDate: Sat Nov 14 04:00:24 2020 +0530

fixing white spaces
---
 phoenix-client-parent/pom.xml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/phoenix-client-parent/pom.xml b/phoenix-client-parent/pom.xml
index 5b813ec..0217e8a 100644
--- a/phoenix-client-parent/pom.xml
+++ b/phoenix-client-parent/pom.xml
@@ -145,8 +145,8 @@
 
 org/apache/hbase/**
 
-   org/apache/phoenix/**
-   org/apache/omid/**
+org/apache/phoenix/**
+org/apache/omid/**
 
   
 



[phoenix] branch master updated (1b5f13a -> 243ac64)

2020-11-16 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from 1b5f13a  PHOENIX-6225 fix the dependency issue on the master branch
 add 4d110f2  PHOENIX-6221 Getting CNF while creating transactional table 
with Omid(Rajeshbabu)
 add 243ac64  fixing white spaces

No new revisions were added by this update.

Summary of changes:
 phoenix-client-parent/pom.xml | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)



[phoenix] branch 4.x updated: PHOENIX-6130 StatementContext.subqueryResults should be thread safe

2020-09-14 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 75973b6  PHOENIX-6130 StatementContext.subqueryResults should be 
thread safe
75973b6 is described below

commit 75973b633db1c8ca62c518a3949ec842a9d98ad1
Author: Toshihiro Suzuki 
AuthorDate: Sun Sep 13 22:31:33 2020 +0900

PHOENIX-6130 StatementContext.subqueryResults should be thread safe
---
 .../apache/phoenix/end2end/ToCharFunctionIT.java   | 25 ++
 .../apache/phoenix/compile/StatementContext.java   |  2 +-
 2 files changed, 26 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ToCharFunctionIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ToCharFunctionIT.java
index 022197c..6632886 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ToCharFunctionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ToCharFunctionIT.java
@@ -28,6 +28,7 @@ import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.sql.Statement;
 import java.sql.Time;
 import java.sql.Timestamp;
 import java.text.DateFormat;
@@ -266,4 +267,28 @@ public class ToCharFunctionIT extends 
ParallelStatsDisabledIT {
 }
 conn.close();
 }
+
+@Test
+public void testToChar100Times() throws Exception {
+String tableName = generateUniqueName();
+try (Connection conn = DriverManager.getConnection(getUrl());
+ Statement statement = conn.createStatement()) {
+conn.setAutoCommit(true);
+statement.execute("create table " + tableName +
+" (id varchar primary key, ts varchar)");
+statement.execute("upsert into " + tableName +
+" values ('id', '159606720')");
+String query = "select ts from " + tableName +
+" where ts <= (select to_char(" +
+"cast(to_number(to_date('2020-07-30 00:00:00')) as BIGINT), 
'#'))" +
+" and ts >= (select to_char(" +
+"cast(to_number(to_date('2020-07-29 00:00:00')) as BIGINT), 
'#'))";
+for (int i = 0; i < 100; i++) {
+try (ResultSet rs = statement.executeQuery(query)) {
+// The query should always return a result
+assertTrue(rs.next());
+}
+}
+}
+}
 }
\ No newline at end of file
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
index b477049..ea0c58e 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
@@ -136,7 +136,7 @@ public class StatementContext {
 this.whereConditionColumns = new ArrayList>();
 this.dataColumns = this.currentTable == null ? Collections. emptyMap() : Maps
 . newLinkedHashMap();
-this.subqueryResults = Maps. newHashMap();
+this.subqueryResults = Maps. 
newConcurrentMap();
 this.readMetricsQueue = new 
ReadMetricQueue(isRequestMetricsEnabled,connection.getLogLevel());
 this.overAllQueryMetrics = new 
OverAllQueryMetrics(isRequestMetricsEnabled,connection.getLogLevel());
 this.retryingPersistentCache = Maps. newHashMap();



[phoenix] branch master updated: PHOENIX-6130 StatementContext.subqueryResults should be thread safe

2020-09-14 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 11cc4cd  PHOENIX-6130 StatementContext.subqueryResults should be 
thread safe
 new da573bb  Merge pull request #879 from brfrn169/PHOENIX-6130
11cc4cd is described below

commit 11cc4cd0231d720dc2cd9ea4b214f79e069eb2fd
Author: Toshihiro Suzuki 
AuthorDate: Sun Sep 13 22:31:33 2020 +0900

PHOENIX-6130 StatementContext.subqueryResults should be thread safe
---
 .../apache/phoenix/end2end/ToCharFunctionIT.java   | 25 ++
 .../apache/phoenix/compile/StatementContext.java   |  2 +-
 2 files changed, 26 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ToCharFunctionIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ToCharFunctionIT.java
index 022197c..6632886 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ToCharFunctionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ToCharFunctionIT.java
@@ -28,6 +28,7 @@ import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.sql.Statement;
 import java.sql.Time;
 import java.sql.Timestamp;
 import java.text.DateFormat;
@@ -266,4 +267,28 @@ public class ToCharFunctionIT extends 
ParallelStatsDisabledIT {
 }
 conn.close();
 }
+
+@Test
+public void testToChar100Times() throws Exception {
+String tableName = generateUniqueName();
+try (Connection conn = DriverManager.getConnection(getUrl());
+ Statement statement = conn.createStatement()) {
+conn.setAutoCommit(true);
+statement.execute("create table " + tableName +
+" (id varchar primary key, ts varchar)");
+statement.execute("upsert into " + tableName +
+" values ('id', '159606720')");
+String query = "select ts from " + tableName +
+" where ts <= (select to_char(" +
+"cast(to_number(to_date('2020-07-30 00:00:00')) as BIGINT), 
'#'))" +
+" and ts >= (select to_char(" +
+"cast(to_number(to_date('2020-07-29 00:00:00')) as BIGINT), 
'#'))";
+for (int i = 0; i < 100; i++) {
+try (ResultSet rs = statement.executeQuery(query)) {
+// The query should always return a result
+assertTrue(rs.next());
+}
+}
+}
+}
 }
\ No newline at end of file
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
index a4f7198..7d49085 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
@@ -140,7 +140,7 @@ public class StatementContext {
 this.whereConditionColumns = new ArrayList>();
 this.dataColumns = this.currentTable == null ? Collections. emptyMap() : Maps
 . newLinkedHashMap();
-this.subqueryResults = Maps. newHashMap();
+this.subqueryResults = Maps. 
newConcurrentMap();
 this.readMetricsQueue = new 
ReadMetricQueue(isRequestMetricsEnabled,connection.getLogLevel());
 this.overAllQueryMetrics = new 
OverAllQueryMetrics(isRequestMetricsEnabled,connection.getLogLevel());
 this.retryingPersistentCache = Maps. newHashMap();



[phoenix-omid] branch master updated: OMID-159 Replace default hbase commit table and timestamp modules in server configurations as for the new package structure(Rajeshbabu)

2020-09-14 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix-omid.git


The following commit(s) were added to refs/heads/master by this push:
 new 8f57727  OMID-159 Replace default hbase commit table and timestamp 
modules in server configurations as for the new package structure(Rajeshbabu)
 new c52d816  Merge pull request #67 from chrajeshbabu/master
8f57727 is described below

commit 8f57727486c16ed30781af65611252e9f4229d0a
Author: Rajeshbabu Chintaguntla 
AuthorDate: Mon Sep 14 11:49:49 2020 +0530

OMID-159 Replace default hbase commit table and timestamp modules in server 
configurations as for the new package structure(Rajeshbabu)
---
 .../src/main/resources/default-omid-server-configuration.yml | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git 
a/tso-server/src/main/resources/default-omid-server-configuration.yml 
b/tso-server/src/main/resources/default-omid-server-configuration.yml
index 3129904..647dbfa 100644
--- a/tso-server/src/main/resources/default-omid-server-configuration.yml
+++ b/tso-server/src/main/resources/default-omid-server-configuration.yml
@@ -112,13 +112,13 @@ monitorContext: false
 # Configuration WITHOUT High Availability using HBase for all required storage 
& reporting metrics to CSV files
 # 
-
 #
-# commitTableStoreModule: 
!!org.apache.omid.tso.DefaultHBaseCommitTableStorageModule [ ]
+# commitTableStoreModule: 
!!org.apache.omid.committable.hbase.DefaultHBaseCommitTableStorageModule [ ]
 # See optional params
 # - tableName
 # - familyName
 # - principal
 # - keytab
-# timestampStoreModule: 
!!org.apache.omid.tso.DefaultHBaseTimestampStorageModule [ ]
+# timestampStoreModule: 
!!org.apache.omid.timestamp.storage.DefaultHBaseTimestampStorageModule [ ]
 # See optional params
 # - tableName
 # - familyName
@@ -139,8 +139,8 @@ monitorContext: false
 # Configuration WITHOUT High Availability using ZK to store the timestamps & 
reporting metrics to console every 30 secs
 # 
-
 #
-# commitTableStoreModule: 
!!org.apache.omid.tso.DefaultHBaseCommitTableStorageModule [ ]
-# timestampStoreModule: !!org.apache.omid.tso.DefaultZKTimestampStorageModule
+# commitTableStoreModule: 
!!org.apache.omid.committable.hbase.DefaultHBaseCommitTableStorageModule [ ]
+# timestampStoreModule: 
!!org.apache.omid.timestamp.storage.DefaultZKTimestampStorageModule
 # zkCluster: "localhost:2181"
 # namespace: "omid"
 # leaseModule: !!org.apache.omid.tso.VoidLeaseManagementModule [ ]
@@ -158,8 +158,8 @@ monitorContext: false
 # Configuration WITH High Availability using HBase for all required storage 
and no metrics reports
 # 
-
 #
-# commitTableStoreModule: 
!!org.apache.omid.tso.DefaultHBaseCommitTableStorageModule [ ]
-# timestampStoreModule: 
!!org.apache.omid.tso.DefaultHBaseTimestampStorageModule [ ]
+# commitTableStoreModule: 
!!org.apache.omid.committable.hbase.DefaultHBaseCommitTableStorageModule [ ]
+# timestampStoreModule: 
!!org.apache.omid.timestamp.storage.DefaultHBaseTimestampStorageModule [ ]
 # leaseModule: !!org.apache.omid.tso.HALeaseManagementModule
 # leasePeriodInMs: 1
 # tsoLeasePath: "/tso-lease"



[phoenix] branch master updated: PHOENIX-5905 Reset user to hbase by changing rpc context before getting user permissions on access controller service(Rajeshbabu)-made test case works with all 2.x pro

2020-06-17 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new b15468e  PHOENIX-5905 Reset user to hbase by changing rpc context 
before getting user permissions on access controller service(Rajeshbabu)-made 
test case works with all 2.x profiles
b15468e is described below

commit b15468e6784a0626bd140ed0c95b317d96efd052
Author: Rajeshbabu Chintaguntla 
AuthorDate: Thu Jun 18 06:30:51 2020 +0530

PHOENIX-5905 Reset user to hbase by changing rpc context before getting 
user permissions on access controller service(Rajeshbabu)-made test case works 
with all 2.x profiles
---
 .../apache/phoenix/end2end/BasePermissionsIT.java  | 67 +-
 1 file changed, 40 insertions(+), 27 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
index a211030..888f24f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.LocalHBaseCluster;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
@@ -36,9 +36,9 @@ import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.access.AccessControlClient;
 import org.apache.hadoop.hbase.security.access.AccessControlUtil;
 import org.apache.hadoop.hbase.security.access.AccessController;
-import org.apache.hadoop.hbase.security.access.GetUserPermissionsRequest;
 import org.apache.hadoop.hbase.security.access.Permission;
 import org.apache.hadoop.hbase.security.access.UserPermission;
+import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
@@ -67,6 +67,7 @@ import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashSet;
@@ -1436,45 +1437,57 @@ public abstract class BasePermissionsIT extends 
BaseTest {
 
 public static class  CustomAccessController extends AccessController {
 
-org.apache.hadoop.hbase.client.Connection connection;
+Configuration configuration;
+boolean aclRegion;
 @Override
 public void start(CoprocessorEnvironment env) throws IOException {
 super.start(env);
- connection = 
ConnectionFactory.createConnection(env.getConfiguration());
+configuration = env.getConfiguration();
+if(env instanceof RegionCoprocessorEnvironment) {
+aclRegion = AccessControlClient.ACL_TABLE_NAME.
+equals(((RegionCoprocessorEnvironment) 
env).getRegion().
+getTableDescriptor().getTableName());
+}
 }
 
 @Override
 public void getUserPermissions(RpcController controller,

AccessControlProtos.GetUserPermissionsRequest request,

RpcCallback done) {
+if(aclRegion) {
+super.getUserPermissions(controller,request,done);
+return;
+}
 AccessControlProtos.GetUserPermissionsResponse response = null;
+org.apache.hadoop.hbase.client.Connection connection;
+try {
+connection = ConnectionFactory.createConnection(configuration);
+} catch (IOException e) {
+// pass exception back up
+ResponseConverter.setControllerException(controller, new 
IOException(e));
+return;
+}
 try {
-final String userName = request.hasUserName() ? 
request.getUserName().toStringUtf8() : null;
-final String namespace =
-request.hasNamespaceName() ? 
request.getNamespaceName().toStringUtf8() : null;
-final TableName table =
-request.hasTableName() ? 
ProtobufUtil.toTableName(request.getTableName()) : null;
-final byte[] cf =
-request.hasColumnFamily

[phoenix] branch 4.x updated: PHOENIX-5905 Reset user to hbase by changing rpc context before getting user permissions on access controller service(Rajeshbabu)

2020-06-17 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 2d4ce75  PHOENIX-5905 Reset user to hbase by changing rpc context 
before getting user permissions on access controller service(Rajeshbabu)
2d4ce75 is described below

commit 2d4ce75ca23730607f80fc66bbc8388e463b52e2
Author: Rajeshbabu Chintaguntla 
AuthorDate: Thu Jun 18 05:43:28 2020 +0530

PHOENIX-5905 Reset user to hbase by changing rpc context before getting 
user permissions on access controller service(Rajeshbabu)
---
 .../apache/phoenix/end2end/BasePermissionsIT.java  | 109 ++---
 ...sionNSDisabledWithCustomAccessControllerIT.java |  32 ++
 ...ssionNSEnabledWithCustomAccessControllerIT.java |  32 ++
 .../coprocessor/PhoenixAccessController.java   |  22 ++---
 4 files changed, 172 insertions(+), 23 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
index ffa724f..2338ada 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
@@ -18,17 +18,26 @@ package org.apache.phoenix.end2end;
 
 import com.google.common.base.Joiner;
 import com.google.common.base.Throwables;
+import com.google.protobuf.RpcCallback;
+import com.google.protobuf.RpcController;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.AuthUtil;
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.LocalHBaseCluster;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.ResponseConverter;
+import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.access.AccessControlClient;
+import org.apache.hadoop.hbase.security.access.AccessController;
 import org.apache.hadoop.hbase.security.access.Permission;
+import org.apache.hadoop.hbase.security.access.UserPermission;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
@@ -38,7 +47,6 @@ import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.NewerSchemaAlreadyExistsException;
 import org.apache.phoenix.schema.TableNotFoundException;
-import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.SchemaUtil;
 import org.junit.Before;
@@ -58,6 +66,7 @@ import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashSet;
@@ -65,7 +74,12 @@ import java.util.List;
 import java.util.Properties;
 import java.util.Set;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 @Category(NeedsOwnMiniClusterTest.class)
 @FixMethodOrder(MethodSorters.NAME_ASCENDING)
@@ -137,6 +151,10 @@ public abstract class BasePermissionsIT extends BaseTest {
 }
 
 static void initCluster(boolean isNamespaceMapped) throws Exception {
+initCluster(isNamespaceMapped, false);
+}
+
+static void initCluster(boolean isNamespaceMapped, boolean 
useCustomAccessController) throws Exception {
 if (null != testUtil) {
 testUtil.shutdownMiniCluster();
 testUtil = null;
@@ -145,7 +163,7 @@ public abstract class BasePermissionsIT extends BaseTest {
 testUtil = new HBaseTestingUtility();
 
 Configuration config = testUtil.getConfiguration();
-enablePhoenixHBaseAuthorization(config);
+enablePhoenixHBaseAuthorization(config, useCustomAccessController);
 configureNamespacesOnServer(config, isNamespaceMapped);
 config.setInt(HConstants.MASTER_INFO_PORT, -1);
 
@@ -184,17 +202,26 @@ public abstract class BasePermissionsIT extends BaseTest {
 view2TableName = tableName + &quo

[phoenix] branch master updated: PHOENIX-5905 Reset user to hbase by changing rpc context before getting user permissions on access controller service-addendum(Rajeshbabu), PHOENIX-5700 PhoenixAccessC

2020-06-04 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 73277b1  PHOENIX-5905 Reset user to hbase by changing rpc context 
before getting user permissions on access controller 
service-addendum(Rajeshbabu),PHOENIX-5700 PhoenixAccessController does not copy 
permissions from custom AccessControllers,PHOENIX-5908 Add test cases to verify 
Phoenix Access Controller with Custom Access Controller Service
73277b1 is described below

commit 73277b153e80ab0b187055ac8bf82e24f3baa770
Author: Rajeshbabu Chintaguntla 
AuthorDate: Tue Jun 2 21:49:47 2020 +0530

PHOENIX-5905 Reset user to hbase by changing rpc context before getting 
user permissions on access controller service-addendum(Rajeshbabu),PHOENIX-5700 
PhoenixAccessController does not copy permissions from custom 
AccessControllers,PHOENIX-5908 Add test cases to verify Phoenix Access 
Controller with Custom Access Controller Service
---
 .../apache/phoenix/end2end/BasePermissionsIT.java  | 130 -
 ...sionNSDisabledWithCustomAccessControllerIT.java |  32 +
 ...ssionNSEnabledWithCustomAccessControllerIT.java |  32 +
 .../coprocessor/PhoenixAccessController.java   |  11 +-
 4 files changed, 172 insertions(+), 33 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
index f2a6b9d..a211030 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
@@ -18,34 +18,27 @@ package org.apache.phoenix.end2end;
 
 import com.google.common.base.Joiner;
 import com.google.common.base.Throwables;
-
-import java.io.IOException;
-import java.lang.reflect.UndeclaredThrowableException;
-import java.security.PrivilegedExceptionAction;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Arrays;
-
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Properties;
-import java.util.Set;
-
+import com.google.protobuf.RpcCallback;
+import com.google.protobuf.RpcController;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.AuthUtil;
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.LocalHBaseCluster;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.access.AccessControlClient;
+import org.apache.hadoop.hbase.security.access.AccessControlUtil;
+import org.apache.hadoop.hbase.security.access.AccessController;
+import org.apache.hadoop.hbase.security.access.GetUserPermissionsRequest;
 import org.apache.hadoop.hbase.security.access.Permission;
+import org.apache.hadoop.hbase.security.access.UserPermission;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
@@ -55,7 +48,6 @@ import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.NewerSchemaAlreadyExistsException;
 import org.apache.phoenix.schema.TableNotFoundException;
-import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.SchemaUtil;
 import org.junit.Before;
@@ -66,7 +58,28 @@ import org.junit.runners.MethodSorters;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.junit.Assert.*;
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.security.PrivilegedExceptionAction;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Properties;
+import java.util.Set;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
+import static

[phoenix] branch 4.x updated: PHOENIX-5905 Reset user to hbase by changing rpc context before getting user permissions on access controller service(Rajeshbabu)

2020-05-20 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new e7ec48d  PHOENIX-5905 Reset user to hbase by changing rpc context 
before getting user permissions on access controller service(Rajeshbabu)
e7ec48d is described below

commit e7ec48db14ee774242df4a905f75e40c8c22f492
Author: Rajeshbabu Chintaguntla 
AuthorDate: Thu May 21 02:18:25 2020 +0530

PHOENIX-5905 Reset user to hbase by changing rpc context before getting 
user permissions on access controller service(Rajeshbabu)
---
 .../coprocessor/PhoenixAccessController.java   | 39 ++
 1 file changed, 25 insertions(+), 14 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
index 78343d0..1902490 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
@@ -17,16 +17,8 @@
  */
 package org.apache.phoenix.coprocessor;
 
-import java.io.IOException;
-import java.net.InetAddress;
-import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicReference;
-
+import com.google.protobuf.ByteString;
+import com.google.protobuf.RpcCallback;
 import com.google.protobuf.RpcController;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.AuthUtil;
@@ -37,7 +29,6 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.coprocessor.BaseMasterAndRegionObserver;
@@ -46,6 +37,7 @@ import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;
 import org.apache.hadoop.hbase.ipc.RpcServer;
+import org.apache.hadoop.hbase.ipc.RpcUtil;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
@@ -54,8 +46,14 @@ import 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
-import org.apache.hadoop.hbase.security.access.*;
+import org.apache.hadoop.hbase.security.access.AccessChecker;
+import org.apache.hadoop.hbase.security.access.AccessControlClient;
+import org.apache.hadoop.hbase.security.access.AccessControlConstants;
+import org.apache.hadoop.hbase.security.access.AuthResult;
+import org.apache.hadoop.hbase.security.access.Permission;
 import org.apache.hadoop.hbase.security.access.Permission.Action;
+import org.apache.hadoop.hbase.security.access.TableAuthManager;
+import org.apache.hadoop.hbase.security.access.UserPermission;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.phoenix.compat.hbase.CompatObserverContext;
@@ -69,8 +67,15 @@ import org.apache.phoenix.util.MetaDataUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.protobuf.ByteString;
-import com.google.protobuf.RpcCallback;
+import java.io.IOException;
+import java.net.InetAddress;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicReference;
 
 public class PhoenixAccessController extends BaseMetaDataEndpointObserver {
 
@@ -472,7 +477,10 @@ public class PhoenixAccessController extends 
BaseMetaDataEndpointObserver {
 @Override
 public List run() throws Exception {
 final List userPermissions = new 
ArrayList();
+final RpcServer.Call rpcContext = RpcUtil.getRpcContext();
 try (Connection connection = 
ConnectionFactory.createConnection(env.getConfiguration())) {
+// Setting RPC context as null so that user can be resetted
+RpcUtil.setRpcContext(null);
 // Merge permissions from all accessController 
coprocessors loaded in memory

[phoenix] branch master updated: PHOENIX-5905 Reset user to hbase by changing rpc context before getting user permissions on access controller service(Rajeshbabu)

2020-05-20 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new aad5836  PHOENIX-5905 Reset user to hbase by changing rpc context 
before getting user permissions on access controller service(Rajeshbabu)
aad5836 is described below

commit aad583670ea821286ab5e2460ce0ab7255d474c4
Author: Rajeshbabu Chintaguntla 
AuthorDate: Wed May 20 21:50:22 2020 +0530

PHOENIX-5905 Reset user to hbase by changing rpc context before getting 
user permissions on access controller service(Rajeshbabu)
---
 .../coprocessor/PhoenixAccessController.java   | 47 +-
 1 file changed, 29 insertions(+), 18 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
index 609f2d1..2754e84 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
@@ -17,24 +17,15 @@
  */
 package org.apache.phoenix.coprocessor;
 
-import java.io.IOException;
-import java.net.InetAddress;
-import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Optional;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicReference;
-
+import com.google.protobuf.ByteString;
+import com.google.protobuf.RpcCallback;
+import com.google.protobuf.RpcController;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.AuthUtil;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
@@ -47,7 +38,9 @@ import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.ObserverContextImpl;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.ipc.RpcCall;
 import org.apache.hadoop.hbase.ipc.RpcServer;
+import org.apache.hadoop.hbase.ipc.RpcUtil;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
@@ -56,9 +49,14 @@ import 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
-import org.apache.hadoop.hbase.security.access.*;
+import org.apache.hadoop.hbase.security.access.AccessChecker;
+import org.apache.hadoop.hbase.security.access.AccessControlClient;
+import org.apache.hadoop.hbase.security.access.AccessControlConstants;
+import org.apache.hadoop.hbase.security.access.AccessControlUtil;
+import org.apache.hadoop.hbase.security.access.AuthResult;
+import org.apache.hadoop.hbase.security.access.Permission;
 import org.apache.hadoop.hbase.security.access.Permission.Action;
-import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.security.access.UserPermission;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.phoenix.compat.hbase.CompatPermissionUtil;
 import 
org.apache.phoenix.coprocessor.PhoenixMetaDataCoprocessorHost.PhoenixMetaDataControllerEnvironment;
@@ -71,13 +69,20 @@ import org.apache.phoenix.util.MetaDataUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.protobuf.ByteString;
-import com.google.protobuf.RpcCallback;
-import com.google.protobuf.RpcController;
+import java.io.IOException;
+import java.net.InetAddress;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicReference;
 
 import static 
org.apache.phoenix.compat.hbase.CompatPermissionUtil.authorizeUserTable;
-import static 
org.apache.phoenix.compat.hbase.CompatPermissionUtil.getUserFromUP;
 import static 
org.apache.phoenix.compat.hbase.CompatPermissionUtil.getPermissionFromUP;
+import static 
org.apache.phoenix.compat.hbase.CompatPermissionUtil.getUserFromUP;
 
 public class PhoenixAccessController extends BaseMetaDataEndpointObserver {
 
@@ -467,7 +472,10 @@ public

[phoenix] branch 4.x updated: PHOENIX-4753 Remove the need for users to have Write access to the Phoenix SYSTEM STATS TABLE to drop tables(Rajeshbabu)

2020-05-11 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 2152a41  PHOENIX-4753 Remove the need for users to have Write access 
to the Phoenix SYSTEM STATS TABLE to drop tables(Rajeshbabu)
2152a41 is described below

commit 2152a41bcefc5dbefd4355aa967d85f125c639d8
Author: Rajeshbabu Chintaguntla 
AuthorDate: Mon May 11 19:40:43 2020 +0530

PHOENIX-4753 Remove the need for users to have Write access to the Phoenix 
SYSTEM STATS TABLE to drop tables(Rajeshbabu)
---
 .../apache/phoenix/end2end/BasePermissionsIT.java  | 127 +++--
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |  58 +-
 .../coprocessor/PhoenixAccessController.java   |  10 +-
 .../org/apache/phoenix/schema/MetaDataClient.java  |  27 -
 .../java/org/apache/phoenix/util/MetaDataUtil.java |  61 ++
 5 files changed, 223 insertions(+), 60 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
index 218c6b1..ffa724f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
@@ -38,6 +38,7 @@ import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.NewerSchemaAlreadyExistsException;
 import org.apache.phoenix.schema.TableNotFoundException;
+import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.SchemaUtil;
 import org.junit.Before;
@@ -64,11 +65,7 @@ import java.util.List;
 import java.util.Properties;
 import java.util.Set;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
 
 @Category(NeedsOwnMiniClusterTest.class)
 @FixMethodOrder(MethodSorters.NAME_ASCENDING)
@@ -207,6 +204,12 @@ public abstract class BasePermissionsIT extends BaseTest {
 conf.set(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, 
Boolean.toString(isNamespaceMapped));
 }
 
+private static void configureStatsConfigurations(Configuration conf) {
+conf.set(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, 
Long.toString(20));
+conf.set(QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, Long.toString(5));
+
conf.set(QueryServices.MAX_SERVER_METADATA_CACHE_TIME_TO_LIVE_MS_ATTRIB, 
Long.toString(5));
+conf.set(QueryServices.USE_STATS_FOR_PARALLELIZATION, 
Boolean.toString(true));
+}
 public static HBaseTestingUtility getUtility(){
 return testUtil;
 }
@@ -383,13 +386,17 @@ public abstract class BasePermissionsIT extends BaseTest {
 }
 
 AccessTestAction createTable(final String tableName) throws SQLException {
+return createTable(tableName, NUM_RECORDS);
+}
+
+AccessTestAction createTable(final String tableName, final int 
numRecordsToInsert) throws SQLException {
 return new AccessTestAction() {
 @Override
 public Object run() throws Exception {
 try (Connection conn = getConnection(); Statement stmt = 
conn.createStatement();) {
 assertFalse(stmt.execute("CREATE TABLE " + tableName + 
"(pk INTEGER not null primary key, data VARCHAR, val integer)"));
 try (PreparedStatement pstmt = 
conn.prepareStatement("UPSERT INTO " + tableName + " values(?, ?, ?)")) {
-for (int i = 0; i < NUM_RECORDS; i++) {
+for (int i = 0; i < numRecordsToInsert; i++) {
 pstmt.setInt(1, i);
 pstmt.setString(2, Integer.toString(i));
 pstmt.setInt(3, i);
@@ -403,6 +410,19 @@ public abstract class BasePermissionsIT extends BaseTest {
 };
 }
 
+AccessTestAction updateStatsOnTable(final String tableName) throws 
SQLException {
+return new AccessTestAction() {
+@Override
+public Object run() throws Exception {
+try (Connection conn = getConnection(); Statement stmt = 
conn.createStatement();) {
+assertFalse(stmt.execute("UPDATE STATISTICS " + tableName 
+ " SET \""
++ QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB 
+ "\" = 5"));
+}
+return null;
+}
+};
+}
+
 private AccessTestAction createMultiTenantTable(final String tableName) 
throws SQLException {
 

[phoenix] branch master updated: PHOENIX-4753 Remove the need for users to have Write access to the Phoenix SYSTEM STATS TABLE to drop tables(Rajeshbabu)

2020-05-08 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 1740409  PHOENIX-4753 Remove the need for users to have Write access 
to the Phoenix SYSTEM STATS TABLE to drop tables(Rajeshbabu)
1740409 is described below

commit 1740409a56a621452b32771ca80d16b61f25c4a7
Author: Rajeshbabu Chintaguntla 
AuthorDate: Fri May 8 20:49:40 2020 +0530

PHOENIX-4753 Remove the need for users to have Write access to the Phoenix 
SYSTEM STATS TABLE to drop tables(Rajeshbabu)
---
 .../apache/phoenix/end2end/BasePermissionsIT.java  | 130 +++--
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |  58 -
 .../phoenix/coprocessor/MetaDataProtocol.java  |   9 +-
 .../coprocessor/PhoenixAccessController.java   |  16 ++-
 .../org/apache/phoenix/schema/MetaDataClient.java  |  27 -
 .../java/org/apache/phoenix/util/MetaDataUtil.java |  61 ++
 6 files changed, 230 insertions(+), 71 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
index f722c02..f2a6b9d 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
@@ -19,12 +19,6 @@ package org.apache.phoenix.end2end;
 import com.google.common.base.Joiner;
 import com.google.common.base.Throwables;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
 import java.io.IOException;
 import java.lang.reflect.UndeclaredThrowableException;
 import java.security.PrivilegedExceptionAction;
@@ -61,6 +55,7 @@ import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.NewerSchemaAlreadyExistsException;
 import org.apache.phoenix.schema.TableNotFoundException;
+import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.SchemaUtil;
 import org.junit.Before;
@@ -71,6 +66,8 @@ import org.junit.runners.MethodSorters;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.junit.Assert.*;
+
 @Category(NeedsOwnMiniClusterTest.class)
 @FixMethodOrder(MethodSorters.NAME_ASCENDING)
 public abstract class BasePermissionsIT extends BaseTest {
@@ -151,6 +148,7 @@ public abstract class BasePermissionsIT extends BaseTest {
 Configuration config = testUtil.getConfiguration();
 enablePhoenixHBaseAuthorization(config);
 configureNamespacesOnServer(config, isNamespaceMapped);
+configureStatsConfigurations(config);
 config.setBoolean(LocalHBaseCluster.ASSIGN_RANDOM_PORTS, true);
 
 testUtil.startMiniCluster(1);
@@ -208,6 +206,12 @@ public abstract class BasePermissionsIT extends BaseTest {
 conf.set(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, 
Boolean.toString(isNamespaceMapped));
 }
 
+private static void configureStatsConfigurations(Configuration conf) {
+conf.set(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, 
Long.toString(20));
+conf.set(QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, Long.toString(5));
+
conf.set(QueryServices.MAX_SERVER_METADATA_CACHE_TIME_TO_LIVE_MS_ATTRIB, 
Long.toString(5));
+conf.set(QueryServices.USE_STATS_FOR_PARALLELIZATION, 
Boolean.toString(true));
+}
 public static HBaseTestingUtility getUtility(){
 return testUtil;
 }
@@ -384,13 +388,17 @@ public abstract class BasePermissionsIT extends BaseTest {
 }
 
 AccessTestAction createTable(final String tableName) throws SQLException {
+return createTable(tableName, NUM_RECORDS);
+}
+
+AccessTestAction createTable(final String tableName, int 
numRecordsToInsert) throws SQLException {
 return new AccessTestAction() {
 @Override
 public Object run() throws Exception {
 try (Connection conn = getConnection(); Statement stmt = 
conn.createStatement();) {
 assertFalse(stmt.execute("CREATE TABLE " + tableName + 
"(pk INTEGER not null primary key, data VARCHAR, val integer)"));
 try (PreparedStatement pstmt = 
conn.prepareStatement("UPSERT INTO " + tableName + " values(?, ?, ?)")) {
-for (int i = 0; i < NUM_RECORDS; i++) {
+for (int i = 0; i < numRecordsToInsert; i++) {
 pstmt.setInt(1, i);
 pstmt.setString(2, Integer.toString(i));
 pstmt.setI

[phoenix] branch 4.x updated: PHOENIX-5794 Create a threshold for non async index creation, that can be modified in configs(Richard Antal)

2020-04-07 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new d880f32  PHOENIX-5794 Create a threshold for non async index creation, 
that can be modified in configs(Richard Antal)
d880f32 is described below

commit d880f32f1e111ff407fa3b383d1d0ea37dc19e5b
Author: Rajeshbabu 
AuthorDate: Tue Apr 7 19:42:08 2020 +0530

PHOENIX-5794 Create a threshold for non async index creation, that can be 
modified in configs(Richard Antal)
---
 .../end2end/index/IndexAsyncThresholdIT.java   | 207 +
 .../apache/phoenix/exception/SQLExceptionCode.java |   3 +
 .../org/apache/phoenix/query/QueryServices.java|   1 +
 .../apache/phoenix/query/QueryServicesOptions.java |   3 +
 .../org/apache/phoenix/schema/MetaDataClient.java  |  38 
 pom.xml|   4 +-
 6 files changed, 254 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexAsyncThresholdIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexAsyncThresholdIT.java
new file mode 100644
index 000..6cd2eaf
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexAsyncThresholdIT.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end.index;
+
+import com.google.common.collect.Maps;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.BaseTest;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.PIndexState;
+import org.apache.phoenix.schema.PMetaData;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.ServerUtil.ConnectionFactory;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.AfterParam;
+import org.junit.runners.Parameterized.BeforeParam;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.Statement;
+import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertNotNull;
+
+
+@RunWith(Parameterized.class)
+@Category(NeedsOwnMiniClusterTest.class)
+public class IndexAsyncThresholdIT extends BaseTest {
+
+private static final Logger logger = 
LoggerFactory.getLogger(IndexAsyncThresholdIT.class);
+
+private final String tableName;
+private final long rows;
+private final long columns;
+private final boolean overThreshold;
+private final Mode mode;
+
+enum Mode{
+NORMAL,
+ASYNC,
+COVERED,
+FUNCTIONAL
+}
+
+public IndexAsyncThresholdIT(Long threshold, Long rows, Long columns, Long 
overThreshold,
+ Long mode)
+throws Exception {
+this.tableName = generateUniqueName();
+this.rows = rows;
+this.columns = columns;
+this.overThreshold = overThreshold == 0;
+this.mode = mode.equals(0L) ? Mode.NORMAL :
+mode.equals(1L) ? Mode.ASYNC :
+mode.equals(2L) ? Mode.COVERED :
+Mode.FUNCTIONAL;
+}
+
+@Parameterized.Parameters
+public static synchronized Collection  primeNumbers() {
+return Arrays.asList(new Long[][]{
+

[phoenix] branch master updated: PHOENIX-5794 Create a threshold for non async index creation, that can be modified in configs(Richard Antal)

2020-04-06 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new b349787  PHOENIX-5794 Create a threshold for non async index creation, 
that can be modified in configs(Richard Antal)
b349787 is described below

commit b3497876a404828c6486ffc1e2e1957a007abeb6
Author: Rajeshbabu 
AuthorDate: Tue Apr 7 02:27:00 2020 +0530

PHOENIX-5794 Create a threshold for non async index creation, that can be 
modified in configs(Richard Antal)
---
 .../end2end/index/IndexAsyncThresholdIT.java   | 207 +
 .../apache/phoenix/exception/SQLExceptionCode.java |   3 +
 .../org/apache/phoenix/query/QueryServices.java|   1 +
 .../apache/phoenix/query/QueryServicesOptions.java |   3 +
 .../org/apache/phoenix/schema/MetaDataClient.java  |  40 
 pom.xml|   2 +-
 6 files changed, 255 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexAsyncThresholdIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexAsyncThresholdIT.java
new file mode 100644
index 000..6cd2eaf
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexAsyncThresholdIT.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end.index;
+
+import com.google.common.collect.Maps;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.BaseTest;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.PIndexState;
+import org.apache.phoenix.schema.PMetaData;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.ServerUtil.ConnectionFactory;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.AfterParam;
+import org.junit.runners.Parameterized.BeforeParam;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.Statement;
+import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertNotNull;
+
+
+@RunWith(Parameterized.class)
+@Category(NeedsOwnMiniClusterTest.class)
+public class IndexAsyncThresholdIT extends BaseTest {
+
+private static final Logger logger = 
LoggerFactory.getLogger(IndexAsyncThresholdIT.class);
+
+private final String tableName;
+private final long rows;
+private final long columns;
+private final boolean overThreshold;
+private final Mode mode;
+
+enum Mode{
+NORMAL,
+ASYNC,
+COVERED,
+FUNCTIONAL
+}
+
+public IndexAsyncThresholdIT(Long threshold, Long rows, Long columns, Long 
overThreshold,
+ Long mode)
+throws Exception {
+this.tableName = generateUniqueName();
+this.rows = rows;
+this.columns = columns;
+this.overThreshold = overThreshold == 0;
+this.mode = mode.equals(0L) ? Mode.NORMAL :
+mode.equals(1L) ? Mode.ASYNC :
+mode.equals(2L) ? Mode.COVERED :
+Mode.FUNCTIONAL;
+}
+
+@Parameterized.Parameters
+public static synchronized Collection  primeNumbers() {
+return Arrays.asList(new Long[][]{
+   

[phoenix] branch 4.x updated: PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert select happening for differet table.(Rajeshbabu)-addendum

2020-03-13 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
 new 24be0d5  PHOENIX-5731 Loading bulkload hfiles should not be blocked if 
the upsert select happening for differet table.(Rajeshbabu)-addendum
24be0d5 is described below

commit 24be0d5239b81dd95ae076bb6909be7fe0143c27
Author: Rajeshbabu Chintaguntla 
AuthorDate: Fri Mar 13 17:53:08 2020 +0530

PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert 
select happening for differet table.(Rajeshbabu)-addendum
---
 .../phoenix/coprocessor/UngroupedAggregateRegionObserver.java | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 4f21511..0547a62 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -528,11 +528,13 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 if(buildLocalIndex) {
 checkForLocalIndexColumnFamilies(region, indexMaintainers);
 }
-if (isDescRowKeyOrderUpgrade || isDelete ||
-(isUpsert && (targetHTable == null ||
-
targetHTable.getName().equals(region.getTableDesc().getTableName(
+if (isDescRowKeyOrderUpgrade || isDelete || isUpsert
 || (deleteCQ != null && deleteCF != null) || emptyCF != null 
|| buildLocalIndex) {
 needToWrite = true;
+if((isUpsert && (targetHTable == null ||
+
!targetHTable.getName().equals(region.getTableDesc().getTableName() {
+needToWrite = false;
+}
 maxBatchSize = conf.getInt(MUTATE_BATCH_SIZE_ATTRIB, 
QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
 mutations = new MutationList(Ints.saturatedCast(maxBatchSize + 
maxBatchSize / 10));
 maxBatchSizeBytes = conf.getLong(MUTATE_BATCH_SIZE_BYTES_ATTRIB,



[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert select happening for differet table.(Rajeshbabu)-addendum

2020-03-13 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new 17a796a  PHOENIX-5731 Loading bulkload hfiles should not be blocked if 
the upsert select happening for differet table.(Rajeshbabu)-addendum
17a796a is described below

commit 17a796aa9f1f4048a17b59fb9114cb45f791c742
Author: Rajeshbabu Chintaguntla 
AuthorDate: Fri Mar 13 17:23:06 2020 +0530

PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert 
select happening for differet table.(Rajeshbabu)-addendum
---
 .../phoenix/coprocessor/UngroupedAggregateRegionObserver.java | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 4f21511..bd44634 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -528,11 +528,13 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 if(buildLocalIndex) {
 checkForLocalIndexColumnFamilies(region, indexMaintainers);
 }
-if (isDescRowKeyOrderUpgrade || isDelete ||
-(isUpsert && (targetHTable == null ||
-
targetHTable.getName().equals(region.getTableDesc().getTableName(
+if (isDescRowKeyOrderUpgrade || isDelete || isUpsert
 || (deleteCQ != null && deleteCF != null) || emptyCF != null 
|| buildLocalIndex) {
 needToWrite = true;
+if(isUpsert && (targetHTable == null ||
+
!targetHTable.getName().equals(region.getTableDesc().getTableName( {
+needToWrite = false;
+}
 maxBatchSize = conf.getInt(MUTATE_BATCH_SIZE_ATTRIB, 
QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
 mutations = new MutationList(Ints.saturatedCast(maxBatchSize + 
maxBatchSize / 10));
 maxBatchSizeBytes = conf.getLong(MUTATE_BATCH_SIZE_BYTES_ATTRIB,



[phoenix] branch master updated: PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert select happening for differet table.(Rajeshbabu)-addendum

2020-03-13 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 0442ccf  PHOENIX-5731 Loading bulkload hfiles should not be blocked if 
the upsert select happening for differet table.(Rajeshbabu)-addendum
0442ccf is described below

commit 0442ccf03dd3932b7a178149ef65bfed09a1cb21
Author: Rajeshbabu Chintaguntla 
AuthorDate: Fri Mar 13 15:06:05 2020 +0530

PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert 
select happening for differet table.(Rajeshbabu)-addendum
---
 .../phoenix/coprocessor/UngroupedAggregateRegionObserver.java  | 10 ++
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index d96956c..566fb59 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -565,11 +565,13 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 if(buildLocalIndex) {
 checkForLocalIndexColumnFamilies(region, indexMaintainers);
 }
-if (isDescRowKeyOrderUpgrade || isDelete ||
-(isUpsert && (targetHTable == null ||
-
targetHTable.getName().equals(region.getTableDescriptor().getTableName(
-|| (deleteCQ != null && deleteCF != null) || emptyCF != null 
|| buildLocalIndex) {
+if (isDescRowKeyOrderUpgrade || isDelete || isUpsert ||
+(deleteCQ != null && deleteCF != null) || emptyCF != null || 
buildLocalIndex) {
 needToWrite = true;
+if(isUpsert && (targetHTable == null ||
+
!targetHTable.getName().equals(region.getTableDescriptor().getTableName( {
+needToWrite = false;
+}
 maxBatchSize = conf.getInt(MUTATE_BATCH_SIZE_ATTRIB, 
QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
 mutations = new MutationList(Ints.saturatedCast(maxBatchSize + 
maxBatchSize / 10));
 maxBatchSizeBytes = conf.getLong(MUTATE_BATCH_SIZE_BYTES_ATTRIB,



[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert select happening for differet table-addendum.(Rajeshbabu)

2020-02-18 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 07fba15  PHOENIX-5731 Loading bulkload hfiles should not be blocked if 
the upsert select happening for differet table-addendum.(Rajeshbabu)
07fba15 is described below

commit 07fba1545f5b1887a1c1de70ca79226cad183b61
Author: Rajeshbabu Chintaguntla 
AuthorDate: Wed Feb 19 09:45:28 2020 +0530

PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert 
select happening for differet table-addendum.(Rajeshbabu)
---
 .../apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index ea5cb91..e8a20dd 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -523,7 +523,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 }
 if (isDescRowKeyOrderUpgrade || isDelete ||
 (isUpsert && (targetHTable == null ||
-
targetHTable.getName().equals(region.getTableDescriptor().getTableName(
+
targetHTable.getName().equals(region.getTableDesc().getTableName(
 || (deleteCQ != null && deleteCF != null) || emptyCF != null 
|| buildLocalIndex) {
 needToWrite = true;
 maxBatchSize = conf.getInt(MUTATE_BATCH_SIZE_ATTRIB, 
QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);



[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert select happening for differet table-addendum.(Rajeshbabu)

2020-02-18 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 18c9594  PHOENIX-5731 Loading bulkload hfiles should not be blocked if 
the upsert select happening for differet table-addendum.(Rajeshbabu)
18c9594 is described below

commit 18c959433cb6dc7a73aaba7925919cfeb0a33f1f
Author: Rajeshbabu Chintaguntla 
AuthorDate: Wed Feb 19 09:45:28 2020 +0530

PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert 
select happening for differet table-addendum.(Rajeshbabu)
---
 .../apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index e1962a6..4f21511 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -530,7 +530,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 }
 if (isDescRowKeyOrderUpgrade || isDelete ||
 (isUpsert && (targetHTable == null ||
-
targetHTable.getName().equals(region.getTableDescriptor().getTableName(
+
targetHTable.getName().equals(region.getTableDesc().getTableName(
 || (deleteCQ != null && deleteCF != null) || emptyCF != null 
|| buildLocalIndex) {
 needToWrite = true;
 maxBatchSize = conf.getInt(MUTATE_BATCH_SIZE_ATTRIB, 
QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);



[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert select happening for differet table-addendum.(Rajeshbabu)

2020-02-18 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new 0007e56  PHOENIX-5731 Loading bulkload hfiles should not be blocked if 
the upsert select happening for differet table-addendum.(Rajeshbabu)
0007e56 is described below

commit 0007e56e2075ed1b12283df579bb40d915251247
Author: Rajeshbabu Chintaguntla 
AuthorDate: Wed Feb 19 09:45:28 2020 +0530

PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert 
select happening for differet table-addendum.(Rajeshbabu)
---
 .../apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index e1962a6..4f21511 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -530,7 +530,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 }
 if (isDescRowKeyOrderUpgrade || isDelete ||
 (isUpsert && (targetHTable == null ||
-
targetHTable.getName().equals(region.getTableDescriptor().getTableName(
+
targetHTable.getName().equals(region.getTableDesc().getTableName(
 || (deleteCQ != null && deleteCF != null) || emptyCF != null 
|| buildLocalIndex) {
 needToWrite = true;
 maxBatchSize = conf.getInt(MUTATE_BATCH_SIZE_ATTRIB, 
QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);



[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert select happening for differet table.(Rajeshbabu)

2020-02-18 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 1348603  PHOENIX-5731 Loading bulkload hfiles should not be blocked if 
the upsert select happening for differet table.(Rajeshbabu)
1348603 is described below

commit 13486032db982687e4d5c1a1ae7246263fd39998
Author: Rajeshbabu Chintaguntla 
AuthorDate: Wed Feb 19 09:07:50 2020 +0530

PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert 
select happening for differet table.(Rajeshbabu)
---
 .../apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 8c3ac26..ea5cb91 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -521,7 +521,10 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 if(buildLocalIndex) {
 checkForLocalIndexColumnFamilies(region, indexMaintainers);
 }
-if (isDescRowKeyOrderUpgrade || isDelete || isUpsert || (deleteCQ != 
null && deleteCF != null) || emptyCF != null || buildLocalIndex) {
+if (isDescRowKeyOrderUpgrade || isDelete ||
+(isUpsert && (targetHTable == null ||
+
targetHTable.getName().equals(region.getTableDescriptor().getTableName(
+|| (deleteCQ != null && deleteCF != null) || emptyCF != null 
|| buildLocalIndex) {
 needToWrite = true;
 maxBatchSize = conf.getInt(MUTATE_BATCH_SIZE_ATTRIB, 
QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
 mutations = new MutationList(Ints.saturatedCast(maxBatchSize + 
maxBatchSize / 10));



[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert select happening for differet table.(Rajeshbabu)

2020-02-18 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new ee84df2  PHOENIX-5731 Loading bulkload hfiles should not be blocked if 
the upsert select happening for differet table.(Rajeshbabu)
ee84df2 is described below

commit ee84df2a58a5e8479d3d0319b84f301e04c97d9d
Author: Rajeshbabu Chintaguntla 
AuthorDate: Wed Feb 19 09:07:50 2020 +0530

PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert 
select happening for differet table.(Rajeshbabu)
---
 .../apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index fbd9d80..e1962a6 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -528,7 +528,10 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 if(buildLocalIndex) {
 checkForLocalIndexColumnFamilies(region, indexMaintainers);
 }
-if (isDescRowKeyOrderUpgrade || isDelete || isUpsert || (deleteCQ != 
null && deleteCF != null) || emptyCF != null || buildLocalIndex) {
+if (isDescRowKeyOrderUpgrade || isDelete ||
+(isUpsert && (targetHTable == null ||
+
targetHTable.getName().equals(region.getTableDescriptor().getTableName(
+|| (deleteCQ != null && deleteCF != null) || emptyCF != null 
|| buildLocalIndex) {
 needToWrite = true;
 maxBatchSize = conf.getInt(MUTATE_BATCH_SIZE_ATTRIB, 
QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
 mutations = new MutationList(Ints.saturatedCast(maxBatchSize + 
maxBatchSize / 10));



[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert select happening for differet table.(Rajeshbabu)

2020-02-18 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new 6c1a1c7  PHOENIX-5731 Loading bulkload hfiles should not be blocked if 
the upsert select happening for differet table.(Rajeshbabu)
6c1a1c7 is described below

commit 6c1a1c731ee66b5859a161284b99c6395ee60e8c
Author: Rajeshbabu Chintaguntla 
AuthorDate: Wed Feb 19 09:07:50 2020 +0530

PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert 
select happening for differet table.(Rajeshbabu)
---
 .../apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index fbd9d80..e1962a6 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -528,7 +528,10 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 if(buildLocalIndex) {
 checkForLocalIndexColumnFamilies(region, indexMaintainers);
 }
-if (isDescRowKeyOrderUpgrade || isDelete || isUpsert || (deleteCQ != 
null && deleteCF != null) || emptyCF != null || buildLocalIndex) {
+if (isDescRowKeyOrderUpgrade || isDelete ||
+(isUpsert && (targetHTable == null ||
+
targetHTable.getName().equals(region.getTableDescriptor().getTableName(
+|| (deleteCQ != null && deleteCF != null) || emptyCF != null 
|| buildLocalIndex) {
 needToWrite = true;
 maxBatchSize = conf.getInt(MUTATE_BATCH_SIZE_ATTRIB, 
QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
 mutations = new MutationList(Ints.saturatedCast(maxBatchSize + 
maxBatchSize / 10));



[phoenix] branch master updated: PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert select happening for differet table.(Rajeshbabu)

2020-02-18 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new c80059f  PHOENIX-5731 Loading bulkload hfiles should not be blocked if 
the upsert select happening for differet table.(Rajeshbabu)
c80059f is described below

commit c80059fa883288bcf774daef73329b8e0b2ba760
Author: Rajeshbabu Chintaguntla 
AuthorDate: Wed Feb 19 09:07:50 2020 +0530

PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert 
select happening for differet table.(Rajeshbabu)
---
 .../apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index b38e144..d96956c 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -565,7 +565,10 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 if(buildLocalIndex) {
 checkForLocalIndexColumnFamilies(region, indexMaintainers);
 }
-if (isDescRowKeyOrderUpgrade || isDelete || isUpsert || (deleteCQ != 
null && deleteCF != null) || emptyCF != null || buildLocalIndex) {
+if (isDescRowKeyOrderUpgrade || isDelete ||
+(isUpsert && (targetHTable == null ||
+
targetHTable.getName().equals(region.getTableDescriptor().getTableName(
+|| (deleteCQ != null && deleteCF != null) || emptyCF != null 
|| buildLocalIndex) {
 needToWrite = true;
 maxBatchSize = conf.getInt(MUTATE_BATCH_SIZE_ATTRIB, 
QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
 mutations = new MutationList(Ints.saturatedCast(maxBatchSize + 
maxBatchSize / 10));



[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5724 Use exec permission in Phoenix ACLs only when execute check enabled(Rajeshbabu)

2020-02-12 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new ad7fead  PHOENIX-5724 Use exec permission in Phoenix ACLs only when 
execute check enabled(Rajeshbabu)
ad7fead is described below

commit ad7feadbec76b9e4db38be443ea5f75c2c8d30df
Author: Rajeshbabu Chintaguntla 
AuthorDate: Wed Feb 12 23:54:56 2020 +0530

PHOENIX-5724 Use exec permission in Phoenix ACLs only when execute check 
enabled(Rajeshbabu)
---
 .../coprocessor/PhoenixAccessController.java   | 46 +++---
 1 file changed, 32 insertions(+), 14 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
index 0e7a094..15b4486 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
@@ -53,13 +53,8 @@ import 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
-import org.apache.hadoop.hbase.security.access.AccessChecker;
-import org.apache.hadoop.hbase.security.access.AccessControlClient;
-import org.apache.hadoop.hbase.security.access.AuthResult;
-import org.apache.hadoop.hbase.security.access.Permission;
+import org.apache.hadoop.hbase.security.access.*;
 import org.apache.hadoop.hbase.security.access.Permission.Action;
-import org.apache.hadoop.hbase.security.access.TableAuthManager;
-import org.apache.hadoop.hbase.security.access.UserPermission;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import 
org.apache.phoenix.coprocessor.PhoenixMetaDataCoprocessorHost.PhoenixMetaDataControllerEnvironment;
@@ -80,6 +75,7 @@ public class PhoenixAccessController extends 
BaseMetaDataEndpointObserver {
 private PhoenixMetaDataControllerEnvironment env;
 AtomicReference> accessControllers 
= new AtomicReference<>();
 private boolean hbaseAccessControllerEnabled = false;
+private boolean execPermissionsCheckEnabled;
 private boolean accessCheckEnabled;
 private AccessChecker accessChecker;
 private UserProvider userProvider;
@@ -123,6 +119,8 @@ public class PhoenixAccessController extends 
BaseMetaDataEndpointObserver {
 LOGGER.warn(
 "PhoenixAccessController has been loaded with 
authorization checks disabled.");
 }
+this.execPermissionsCheckEnabled = 
conf.getBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY,
+AccessControlConstants.DEFAULT_EXEC_PERMISSION_CHECKS);
 if (env instanceof PhoenixMetaDataControllerEnvironment) {
 this.env = (PhoenixMetaDataControllerEnvironment)env;
 } else {
@@ -180,12 +178,17 @@ public class PhoenixAccessController extends 
BaseMetaDataEndpointObserver {
 Set physicalTablesChecked = new HashSet();
 if (tableType == PTableType.VIEW || tableType == PTableType.INDEX) {
 physicalTablesChecked.add(parentPhysicalTableName);
-requireAccess("Create" + tableType, parentPhysicalTableName, 
Action.READ, Action.EXEC);
+if(execPermissionsCheckEnabled) {
+requireAccess("Create" + tableType, parentPhysicalTableName, 
Action.READ, Action.EXEC);
+} else {
+requireAccess("Create" + tableType, parentPhysicalTableName, 
Action.READ);
+}
 }
 
 if (tableType == PTableType.VIEW) {
-
-Action[] requiredActions = { Action.READ, Action.EXEC };
+
+Action[] requiredActions = execPermissionsCheckEnabled ?
+new Action[]{ Action.READ, Action.EXEC } : new Action[] { 
Action.READ};
 for (TableName index : indexes) {
 if (!physicalTablesChecked.add(index)) {
 // skip check for local index as we have already check the 
ACLs above
@@ -235,9 +238,12 @@ public class PhoenixAccessController extends 
BaseMetaDataEndpointObserver {
 // skip check for local index
 if (physicalTableName != null && 
!parentPhysicalTableName.equals(physicalTableName)
 && 
!MetaDataUtil.isViewIndex(physicalTableName.getNameAsString())) {
+List actions = Arrays.asList(Action.READ, 
Action.WRITE, Action.CREATE, Action.ADMIN);
+if(execPermissionsCheckEnabled) {
+actions.add(Action.EXEC);
+}
 authorizeOr

[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5724 Use exec permission in Phoenix ACLs only when execute check enabled(Rajeshbabu)

2020-02-12 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new fd7b375  PHOENIX-5724 Use exec permission in Phoenix ACLs only when 
execute check enabled(Rajeshbabu)
fd7b375 is described below

commit fd7b375293c2e14852b4d23db9a2d98f6693496a
Author: Rajeshbabu Chintaguntla 
AuthorDate: Wed Feb 12 22:38:32 2020 +0530

PHOENIX-5724 Use exec permission in Phoenix ACLs only when execute check 
enabled(Rajeshbabu)
---
 .../coprocessor/PhoenixAccessController.java   | 46 +++---
 1 file changed, 32 insertions(+), 14 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
index 4603f1e..a4788b2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
@@ -54,13 +54,8 @@ import 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
-import org.apache.hadoop.hbase.security.access.AccessChecker;
-import org.apache.hadoop.hbase.security.access.AccessControlClient;
-import org.apache.hadoop.hbase.security.access.AuthResult;
-import org.apache.hadoop.hbase.security.access.Permission;
+import org.apache.hadoop.hbase.security.access.*;
 import org.apache.hadoop.hbase.security.access.Permission.Action;
-import org.apache.hadoop.hbase.security.access.TableAuthManager;
-import org.apache.hadoop.hbase.security.access.UserPermission;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import 
org.apache.phoenix.coprocessor.PhoenixMetaDataCoprocessorHost.PhoenixMetaDataControllerEnvironment;
@@ -82,6 +77,7 @@ public class PhoenixAccessController extends 
BaseMetaDataEndpointObserver {
 AtomicReference> accessControllers 
= new AtomicReference<>();
 private boolean accessCheckEnabled;
 private boolean hbaseAccessControllerEnabled;
+private boolean execPermissionsCheckEnabled;
 private UserProvider userProvider;
 private AccessChecker accessChecker;
 public static final Logger LOGGER = 
LoggerFactory.getLogger(PhoenixAccessController.class);
@@ -124,6 +120,8 @@ public class PhoenixAccessController extends 
BaseMetaDataEndpointObserver {
 LOGGER.warn(
 "PhoenixAccessController has been loaded with 
authorization checks disabled.");
 }
+this.execPermissionsCheckEnabled = 
conf.getBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY,
+AccessControlConstants.DEFAULT_EXEC_PERMISSION_CHECKS);
 if (env instanceof PhoenixMetaDataControllerEnvironment) {
 this.env = (PhoenixMetaDataControllerEnvironment)env;
 } else {
@@ -181,12 +179,17 @@ public class PhoenixAccessController extends 
BaseMetaDataEndpointObserver {
 Set physicalTablesChecked = new HashSet();
 if (tableType == PTableType.VIEW || tableType == PTableType.INDEX) {
 physicalTablesChecked.add(parentPhysicalTableName);
-requireAccess("Create" + tableType, parentPhysicalTableName, 
Action.READ, Action.EXEC);
+if(execPermissionsCheckEnabled) {
+requireAccess("Create" + tableType, parentPhysicalTableName, 
Action.READ, Action.EXEC);
+} else {
+requireAccess("Create" + tableType, parentPhysicalTableName, 
Action.READ);
+}
 }
 
 if (tableType == PTableType.VIEW) {
-
-Action[] requiredActions = { Action.READ, Action.EXEC };
+
+Action[] requiredActions = execPermissionsCheckEnabled ?
+new Action[]{ Action.READ, Action.EXEC } : new Action[] { 
Action.READ};
 for (TableName index : indexes) {
 if (!physicalTablesChecked.add(index)) {
 // skip check for local index as we have already check the 
ACLs above
@@ -236,9 +239,12 @@ public class PhoenixAccessController extends 
BaseMetaDataEndpointObserver {
 // skip check for local index
 if (physicalTableName != null && 
!parentPhysicalTableName.equals(physicalTableName)
 && 
!MetaDataUtil.isViewIndex(physicalTableName.getNameAsString())) {
+List actions = Arrays.asList(Action.READ, 
Action.WRITE, Action.CREATE, Action.ADMIN);
+if(execPermissionsCheckEnabled) {
+actions.add(Action.EXEC);
+}
  

[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5724 Use exec permission in Phoenix ACLs only when execute check enabled(Rajeshbabu)

2020-02-12 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new d128960  PHOENIX-5724 Use exec permission in Phoenix ACLs only when 
execute check enabled(Rajeshbabu)
d128960 is described below

commit d12896058d6833d343d5889bc119dae43e7f3c32
Author: Rajeshbabu Chintaguntla 
AuthorDate: Wed Feb 12 22:38:32 2020 +0530

PHOENIX-5724 Use exec permission in Phoenix ACLs only when execute check 
enabled(Rajeshbabu)
---
 .../coprocessor/PhoenixAccessController.java   | 46 +++---
 1 file changed, 32 insertions(+), 14 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
index c1113bc..83e9593 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
@@ -54,13 +54,8 @@ import 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
-import org.apache.hadoop.hbase.security.access.AccessChecker;
-import org.apache.hadoop.hbase.security.access.AccessControlClient;
-import org.apache.hadoop.hbase.security.access.AuthResult;
-import org.apache.hadoop.hbase.security.access.Permission;
+import org.apache.hadoop.hbase.security.access.*;
 import org.apache.hadoop.hbase.security.access.Permission.Action;
-import org.apache.hadoop.hbase.security.access.TableAuthManager;
-import org.apache.hadoop.hbase.security.access.UserPermission;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import 
org.apache.phoenix.coprocessor.PhoenixMetaDataCoprocessorHost.PhoenixMetaDataControllerEnvironment;
@@ -82,6 +77,7 @@ public class PhoenixAccessController extends 
BaseMetaDataEndpointObserver {
 AtomicReference> accessControllers 
= new AtomicReference<>();
 private boolean accessCheckEnabled;
 private boolean hbaseAccessControllerEnabled;
+private boolean execPermissionsCheckEnabled;
 private UserProvider userProvider;
 private AccessChecker accessChecker;
 public static final Logger LOGGER = 
LoggerFactory.getLogger(PhoenixAccessController.class);
@@ -124,6 +120,8 @@ public class PhoenixAccessController extends 
BaseMetaDataEndpointObserver {
 LOGGER.warn(
 "PhoenixAccessController has been loaded with 
authorization checks disabled.");
 }
+this.execPermissionsCheckEnabled = 
conf.getBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY,
+AccessControlConstants.DEFAULT_EXEC_PERMISSION_CHECKS);
 if (env instanceof PhoenixMetaDataControllerEnvironment) {
 this.env = (PhoenixMetaDataControllerEnvironment)env;
 } else {
@@ -181,12 +179,17 @@ public class PhoenixAccessController extends 
BaseMetaDataEndpointObserver {
 Set physicalTablesChecked = new HashSet();
 if (tableType == PTableType.VIEW || tableType == PTableType.INDEX) {
 physicalTablesChecked.add(parentPhysicalTableName);
-requireAccess("Create" + tableType, parentPhysicalTableName, 
Action.READ, Action.EXEC);
+if(execPermissionsCheckEnabled) {
+requireAccess("Create" + tableType, parentPhysicalTableName, 
Action.READ, Action.EXEC);
+} else {
+requireAccess("Create" + tableType, parentPhysicalTableName, 
Action.READ);
+}
 }
 
 if (tableType == PTableType.VIEW) {
-
-Action[] requiredActions = { Action.READ, Action.EXEC };
+
+Action[] requiredActions = execPermissionsCheckEnabled ?
+new Action[]{ Action.READ, Action.EXEC } : new Action[] { 
Action.READ};
 for (TableName index : indexes) {
 if (!physicalTablesChecked.add(index)) {
 // skip check for local index as we have already check the 
ACLs above
@@ -236,9 +239,12 @@ public class PhoenixAccessController extends 
BaseMetaDataEndpointObserver {
 // skip check for local index
 if (physicalTableName != null && 
!parentPhysicalTableName.equals(physicalTableName)
 && 
!MetaDataUtil.isViewIndex(physicalTableName.getNameAsString())) {
+List actions = Arrays.asList(Action.READ, 
Action.WRITE, Action.CREATE, Action.ADMIN);
+if(execPermissionsCheckEnabled) {
+actions.add(Action.EXEC);
+}
  

[phoenix] branch master updated: PHOENIX-5724 Use exec permission in Phoenix ACLs only when execute check enabled(Rajeshbabu)

2020-02-12 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new cf67e5d  PHOENIX-5724 Use exec permission in Phoenix ACLs only when 
execute check enabled(Rajeshbabu)
cf67e5d is described below

commit cf67e5d4a46183ba11c8c0547034f075b57b1ab0
Author: Rajeshbabu Chintaguntla 
AuthorDate: Wed Feb 12 22:22:24 2020 +0530

PHOENIX-5724 Use exec permission in Phoenix ACLs only when execute check 
enabled(Rajeshbabu)
---
 .../coprocessor/PhoenixAccessController.java   | 46 +++---
 1 file changed, 32 insertions(+), 14 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
index 8a7d285..2bb9011 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
@@ -56,13 +56,8 @@ import 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
-import org.apache.hadoop.hbase.security.access.AccessChecker;
-import org.apache.hadoop.hbase.security.access.AccessControlClient;
-import org.apache.hadoop.hbase.security.access.AccessControlUtil;
-import org.apache.hadoop.hbase.security.access.AuthResult;
-import org.apache.hadoop.hbase.security.access.Permission;
+import org.apache.hadoop.hbase.security.access.*;
 import org.apache.hadoop.hbase.security.access.Permission.Action;
-import org.apache.hadoop.hbase.security.access.UserPermission;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.phoenix.compat.hbase.CompatPermissionUtil;
@@ -90,6 +85,7 @@ public class PhoenixAccessController extends 
BaseMetaDataEndpointObserver {
 AtomicReference> accessControllers = new 
AtomicReference<>();
 private boolean hbaseAccessControllerEnabled;
 private boolean accessCheckEnabled;
+private boolean execPermissionsCheckEnabled;
 private UserProvider userProvider;
 private AccessChecker accessChecker;
 public static final Logger LOGGER = 
LoggerFactory.getLogger(PhoenixAccessController.class);
@@ -139,6 +135,8 @@ public class PhoenixAccessController extends 
BaseMetaDataEndpointObserver {
 LOGGER.warn(
 "PhoenixAccessController has been loaded with 
authorization checks disabled.");
 }
+this.execPermissionsCheckEnabled = 
conf.getBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY,
+AccessControlConstants.DEFAULT_EXEC_PERMISSION_CHECKS);
 if (env instanceof PhoenixMetaDataControllerEnvironment) {
 this.env = (PhoenixMetaDataControllerEnvironment)env;
 } else {
@@ -187,12 +185,17 @@ public class PhoenixAccessController extends 
BaseMetaDataEndpointObserver {
 Set physicalTablesChecked = new HashSet();
 if (tableType == PTableType.VIEW || tableType == PTableType.INDEX) {
 physicalTablesChecked.add(parentPhysicalTableName);
-requireAccess("Create" + tableType, parentPhysicalTableName, 
Action.READ, Action.EXEC);
+if(execPermissionsCheckEnabled) {
+requireAccess("Create" + tableType, parentPhysicalTableName, 
Action.READ, Action.EXEC);
+} else {
+requireAccess("Create" + tableType, parentPhysicalTableName, 
Action.READ);
+}
 }
 
 if (tableType == PTableType.VIEW) {
-
-Action[] requiredActions = { Action.READ, Action.EXEC };
+
+Action[] requiredActions = execPermissionsCheckEnabled ?
+new Action[]{ Action.READ, Action.EXEC } : new Action[] { 
Action.READ};
 for (TableName index : indexes) {
 if (!physicalTablesChecked.add(index)) {
 // skip check for local index as we have already check the 
ACLs above
@@ -242,9 +245,12 @@ public class PhoenixAccessController extends 
BaseMetaDataEndpointObserver {
 // skip check for local index
 if (physicalTableName != null && 
!parentPhysicalTableName.equals(physicalTableName)
 && 
!MetaDataUtil.isViewIndex(physicalTableName.getNameAsString())) {
+List actions = Arrays.asList(Action.READ, 
Action.WRITE, Action.CREATE, Action.ADMIN);
+if(execPermissionsCheckEnabled) {
+actions.add(Action.EXEC);
+}
 authorizeOrGrantAccessToUsers("Create" + 

[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5691 create index is failing when phoenix acls enabled and ranger is enabled(Rajeshbabu)

2020-02-10 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new a58dae7  PHOENIX-5691 create index is failing when phoenix acls 
enabled and ranger is enabled(Rajeshbabu)
a58dae7 is described below

commit a58dae7eee1ca1c1eb298b170632da0575d00cbb
Author: Rajeshbabu Chintaguntla 
AuthorDate: Tue Feb 11 11:01:05 2020 +0530

PHOENIX-5691 create index is failing when phoenix acls enabled and ranger 
is enabled(Rajeshbabu)
---
 .../org/apache/phoenix/coprocessor/PhoenixAccessController.java   | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
index d23c533..4603f1e 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
@@ -27,6 +27,7 @@ import java.util.List;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicReference;
 
+import com.google.protobuf.RpcController;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.AuthUtil;
 import org.apache.hadoop.hbase.CompoundConfiguration;
@@ -44,8 +45,8 @@ import 
org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;
-import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.ipc.RpcServer;
+import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
 import 
org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
@@ -489,8 +490,7 @@ public class PhoenixAccessController extends 
BaseMetaDataEndpointObserver {
 
 private void getUserPermsFromUserDefinedAccessController(final 
List userPermissions, Connection connection, 
AccessControlService.Interface service) {
 
-HBaseRpcController controller = ((ClusterConnection)connection)
-.getRpcControllerFactory().newController();
+ServerRpcController controller = new ServerRpcController();
 
 AccessControlProtos.GetUserPermissionsRequest.Builder 
builderTablePerms = AccessControlProtos.GetUserPermissionsRequest
 .newBuilder();
@@ -511,7 +511,7 @@ public class PhoenixAccessController extends 
BaseMetaDataEndpointObserver {
 }
 
 private void callGetUserPermissionsRequest(final 
List userPermissions, AccessControlService.Interface service
-, AccessControlProtos.GetUserPermissionsRequest request, 
HBaseRpcController controller) {
+, AccessControlProtos.GetUserPermissionsRequest request, 
RpcController controller) {
 service.getUserPermissions(controller, request,
 new 
RpcCallback() {
 @Override



[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5691 create index is failing when phoenix acls enabled and ranger is enabled(Rajeshbabu)

2020-02-10 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new 3939269  PHOENIX-5691 create index is failing when phoenix acls 
enabled and ranger is enabled(Rajeshbabu)
3939269 is described below

commit 39392697b8b9e216bb75fbbfea84d2c8b8210b2d
Author: Rajeshbabu Chintaguntla 
AuthorDate: Tue Feb 11 11:01:05 2020 +0530

PHOENIX-5691 create index is failing when phoenix acls enabled and ranger 
is enabled(Rajeshbabu)
---
 .../org/apache/phoenix/coprocessor/PhoenixAccessController.java   | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
index f1198ee..c1113bc 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
@@ -27,6 +27,7 @@ import java.util.List;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicReference;
 
+import com.google.protobuf.RpcController;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.AuthUtil;
 import org.apache.hadoop.hbase.CompoundConfiguration;
@@ -44,8 +45,8 @@ import 
org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;
-import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.ipc.RpcServer;
+import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
 import 
org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
@@ -489,8 +490,7 @@ public class PhoenixAccessController extends 
BaseMetaDataEndpointObserver {
 
 private void getUserPermsFromUserDefinedAccessController(final 
List userPermissions, Connection connection, 
AccessControlService.Interface service) {
 
-HBaseRpcController controller = ((ClusterConnection)connection)
-.getRpcControllerFactory().newController();
+ServerRpcController controller = new ServerRpcController();
 
 AccessControlProtos.GetUserPermissionsRequest.Builder 
builderTablePerms = AccessControlProtos.GetUserPermissionsRequest
 .newBuilder();
@@ -511,7 +511,7 @@ public class PhoenixAccessController extends 
BaseMetaDataEndpointObserver {
 }
 
 private void callGetUserPermissionsRequest(final 
List userPermissions, AccessControlService.Interface service
-, AccessControlProtos.GetUserPermissionsRequest request, 
HBaseRpcController controller) {
+, AccessControlProtos.GetUserPermissionsRequest request, 
RpcController controller) {
 service.getUserPermissions(controller, request,
 new 
RpcCallback() {
 @Override



[phoenix] branch master updated: PHOENIX-5471 MetaDataEndpointImplIT declares wrong package

2019-09-10 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 7d0af64  PHOENIX-5471 MetaDataEndpointImplIT declares wrong package
 new 1347302  Merge pull request #582 from stoty/PHOENIX-5471
7d0af64 is described below

commit 7d0af64187df82edb1bd516e4adf9455c2473896
Author: Istvan Toth 
AuthorDate: Tue Sep 10 14:21:59 2019 +0200

PHOENIX-5471 MetaDataEndpointImplIT declares wrong package

make declared package consistent with directory
---
 .../src/it/java/org/apache/phoenix/end2end/MetaDataEndpointImplIT.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetaDataEndpointImplIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetaDataEndpointImplIT.java
index 61b039d..75af5f8 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetaDataEndpointImplIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetaDataEndpointImplIT.java
@@ -1,4 +1,4 @@
-package org.apache.phoenix.coprocessor;
+package org.apache.phoenix.end2end;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;



[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5136 Rows with null values inserted by UPSERT .. ON DUPLICATE KEY UPDATE are included in query results when they shouldn't be(Miles Spielberg)

2019-08-26 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 075d60f  PHOENIX-5136 Rows with null values inserted by UPSERT .. ON 
DUPLICATE KEY UPDATE are included in query results when they shouldn't be(Miles 
Spielberg)
075d60f is described below

commit 075d60f48898394f2d123d16d951a791fbfa28f4
Author: Rajeshbabu Chintaguntla 
AuthorDate: Mon Aug 26 22:06:22 2019 +0530

PHOENIX-5136 Rows with null values inserted by UPSERT .. ON DUPLICATE KEY 
UPDATE are included in query results when they shouldn't be(Miles Spielberg)
---
 .../apache/phoenix/end2end/OnDuplicateKeyIT.java   |  34 +++
 .../apache/phoenix/expression/AndExpression.java   |   2 +-
 .../apache/phoenix/expression/AndOrExpression.java |  25 +-
 .../phoenix/filter/BooleanExpressionFilter.java|   5 +-
 .../phoenix/expression/AndExpressionTest.java  | 297 +
 .../phoenix/expression/OrExpressionTest.java   | 293 
 6 files changed, 648 insertions(+), 8 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OnDuplicateKeyIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OnDuplicateKeyIT.java
index f1ee0e7..4782e57 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OnDuplicateKeyIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OnDuplicateKeyIT.java
@@ -580,6 +580,40 @@ public class OnDuplicateKeyIT extends 
ParallelStatsDisabledIT {
 }
 }
 
+@Test
+public void 
testRowsCreatedViaUpsertOnDuplicateKeyShouldNotBeReturnedInQueryIfNotMatched() 
throws Exception {
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+Connection conn = DriverManager.getConnection(getUrl(), props);
+String tableName = generateUniqueName();
+String ddl = "create table " + tableName + "(pk varchar primary key, 
counter1 bigint, counter2 smallint)";
+conn.createStatement().execute(ddl);
+createIndex(conn, tableName);
+// The data has to be specifically starting with null for the first 
counter to fail the test. If you reverse the values, the test passes.
+String dml1 = "UPSERT INTO " + tableName + " VALUES('a',NULL,2) ON 
DUPLICATE KEY UPDATE " +
+"counter1 = CASE WHEN (counter1 IS NULL) THEN NULL ELSE 
counter1 END, " +
+"counter2 = CASE WHEN (counter1 IS NULL) THEN 2 ELSE counter2 
END";
+conn.createStatement().execute(dml1);
+conn.commit();
+
+String dml2 = "UPSERT INTO " + tableName + " VALUES('b',1,2) ON 
DUPLICATE KEY UPDATE " +
+"counter1 = CASE WHEN (counter1 IS NULL) THEN 1 ELSE counter1 
END, " +
+"counter2 = CASE WHEN (counter1 IS NULL) THEN 2 ELSE counter2 
END";
+conn.createStatement().execute(dml2);
+conn.commit();
+
+// Using this statement causes the test to pass
+//ResultSet rs = conn.createStatement().executeQuery("SELECT * FROM " 
+ tableName + " WHERE counter2 = 2 AND counter1 = 1");
+// This statement should be equivalent to the one above, but it 
selects both rows.
+ResultSet rs = conn.createStatement().executeQuery("SELECT pk, 
counter1, counter2 FROM " + tableName + " WHERE counter2 = 2 AND (counter1 = 1 
OR counter1 = 1)");
+assertTrue(rs.next());
+assertEquals("b",rs.getString(1));
+assertEquals(1,rs.getLong(2));
+assertEquals(2,rs.getLong(3));
+assertFalse(rs.next());
+
+conn.close();
+}
+
 
 }
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/AndExpression.java 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/AndExpression.java
index 70e94ca..2aa1827 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/AndExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/AndExpression.java
@@ -80,7 +80,7 @@ public class AndExpression extends AndOrExpression {
 
 @Override
 protected boolean isStopValue(Boolean value) {
-return !Boolean.TRUE.equals(value);
+return Boolean.FALSE.equals(value);
 }
 
 @Override
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/AndOrExpression.java 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/AndOrExpression.java
index ea8c375..07b07a2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/AndOrExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/AndOrExpression.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.schema.types.

[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5136 Rows with null values inserted by UPSERT .. ON DUPLICATE KEY UPDATE are included in query results when they shouldn't be(Miles Spielberg)

2019-08-26 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new d50991e  PHOENIX-5136 Rows with null values inserted by UPSERT .. ON 
DUPLICATE KEY UPDATE are included in query results when they shouldn't be(Miles 
Spielberg)
d50991e is described below

commit d50991ec3606cadbd73e7c3c1c65a87ef7821cd4
Author: Rajeshbabu Chintaguntla 
AuthorDate: Mon Aug 26 22:02:56 2019 +0530

PHOENIX-5136 Rows with null values inserted by UPSERT .. ON DUPLICATE KEY 
UPDATE are included in query results when they shouldn't be(Miles Spielberg)
---
 .../apache/phoenix/end2end/OnDuplicateKeyIT.java   |  34 +++
 .../apache/phoenix/expression/AndExpression.java   |   2 +-
 .../apache/phoenix/expression/AndOrExpression.java |  25 +-
 .../phoenix/filter/BooleanExpressionFilter.java|   5 +-
 .../phoenix/expression/AndExpressionTest.java  | 297 +
 .../phoenix/expression/OrExpressionTest.java   | 293 
 6 files changed, 648 insertions(+), 8 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OnDuplicateKeyIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OnDuplicateKeyIT.java
index f1ee0e7..4782e57 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OnDuplicateKeyIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OnDuplicateKeyIT.java
@@ -580,6 +580,40 @@ public class OnDuplicateKeyIT extends 
ParallelStatsDisabledIT {
 }
 }
 
+@Test
+public void 
testRowsCreatedViaUpsertOnDuplicateKeyShouldNotBeReturnedInQueryIfNotMatched() 
throws Exception {
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+Connection conn = DriverManager.getConnection(getUrl(), props);
+String tableName = generateUniqueName();
+String ddl = "create table " + tableName + "(pk varchar primary key, 
counter1 bigint, counter2 smallint)";
+conn.createStatement().execute(ddl);
+createIndex(conn, tableName);
+// The data has to be specifically starting with null for the first 
counter to fail the test. If you reverse the values, the test passes.
+String dml1 = "UPSERT INTO " + tableName + " VALUES('a',NULL,2) ON 
DUPLICATE KEY UPDATE " +
+"counter1 = CASE WHEN (counter1 IS NULL) THEN NULL ELSE 
counter1 END, " +
+"counter2 = CASE WHEN (counter1 IS NULL) THEN 2 ELSE counter2 
END";
+conn.createStatement().execute(dml1);
+conn.commit();
+
+String dml2 = "UPSERT INTO " + tableName + " VALUES('b',1,2) ON 
DUPLICATE KEY UPDATE " +
+"counter1 = CASE WHEN (counter1 IS NULL) THEN 1 ELSE counter1 
END, " +
+"counter2 = CASE WHEN (counter1 IS NULL) THEN 2 ELSE counter2 
END";
+conn.createStatement().execute(dml2);
+conn.commit();
+
+// Using this statement causes the test to pass
+//ResultSet rs = conn.createStatement().executeQuery("SELECT * FROM " 
+ tableName + " WHERE counter2 = 2 AND counter1 = 1");
+// This statement should be equivalent to the one above, but it 
selects both rows.
+ResultSet rs = conn.createStatement().executeQuery("SELECT pk, 
counter1, counter2 FROM " + tableName + " WHERE counter2 = 2 AND (counter1 = 1 
OR counter1 = 1)");
+assertTrue(rs.next());
+assertEquals("b",rs.getString(1));
+assertEquals(1,rs.getLong(2));
+assertEquals(2,rs.getLong(3));
+assertFalse(rs.next());
+
+conn.close();
+}
+
 
 }
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/AndExpression.java 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/AndExpression.java
index 70e94ca..2aa1827 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/AndExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/AndExpression.java
@@ -80,7 +80,7 @@ public class AndExpression extends AndOrExpression {
 
 @Override
 protected boolean isStopValue(Boolean value) {
-return !Boolean.TRUE.equals(value);
+return Boolean.FALSE.equals(value);
 }
 
 @Override
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/AndOrExpression.java 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/AndOrExpression.java
index ea8c375..07b07a2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/AndOrExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/AndOrExpression.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.schema.types.

[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5136 Rows with null values inserted by UPSERT .. ON DUPLICATE KEY UPDATE are included in query results when they shouldn't be(Miles Spielberg)

2019-08-26 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new a7bddf5  PHOENIX-5136 Rows with null values inserted by UPSERT .. ON 
DUPLICATE KEY UPDATE are included in query results when they shouldn't be(Miles 
Spielberg)
a7bddf5 is described below

commit a7bddf530ab53ed41ff5263a86f5e9212628612d
Author: Rajeshbabu Chintaguntla 
AuthorDate: Mon Aug 26 22:00:56 2019 +0530

PHOENIX-5136 Rows with null values inserted by UPSERT .. ON DUPLICATE KEY 
UPDATE are included in query results when they shouldn't be(Miles Spielberg)
---
 .../apache/phoenix/end2end/OnDuplicateKeyIT.java   |  34 +++
 .../apache/phoenix/expression/AndExpression.java   |   2 +-
 .../apache/phoenix/expression/AndOrExpression.java |  25 +-
 .../phoenix/filter/BooleanExpressionFilter.java|   5 +-
 .../phoenix/expression/AndExpressionTest.java  | 297 +
 .../phoenix/expression/OrExpressionTest.java   | 293 
 6 files changed, 648 insertions(+), 8 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OnDuplicateKeyIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OnDuplicateKeyIT.java
index f1ee0e7..4782e57 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OnDuplicateKeyIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OnDuplicateKeyIT.java
@@ -580,6 +580,40 @@ public class OnDuplicateKeyIT extends 
ParallelStatsDisabledIT {
 }
 }
 
+@Test
+public void 
testRowsCreatedViaUpsertOnDuplicateKeyShouldNotBeReturnedInQueryIfNotMatched() 
throws Exception {
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+Connection conn = DriverManager.getConnection(getUrl(), props);
+String tableName = generateUniqueName();
+String ddl = "create table " + tableName + "(pk varchar primary key, 
counter1 bigint, counter2 smallint)";
+conn.createStatement().execute(ddl);
+createIndex(conn, tableName);
+// The data has to be specifically starting with null for the first 
counter to fail the test. If you reverse the values, the test passes.
+String dml1 = "UPSERT INTO " + tableName + " VALUES('a',NULL,2) ON 
DUPLICATE KEY UPDATE " +
+"counter1 = CASE WHEN (counter1 IS NULL) THEN NULL ELSE 
counter1 END, " +
+"counter2 = CASE WHEN (counter1 IS NULL) THEN 2 ELSE counter2 
END";
+conn.createStatement().execute(dml1);
+conn.commit();
+
+String dml2 = "UPSERT INTO " + tableName + " VALUES('b',1,2) ON 
DUPLICATE KEY UPDATE " +
+"counter1 = CASE WHEN (counter1 IS NULL) THEN 1 ELSE counter1 
END, " +
+"counter2 = CASE WHEN (counter1 IS NULL) THEN 2 ELSE counter2 
END";
+conn.createStatement().execute(dml2);
+conn.commit();
+
+// Using this statement causes the test to pass
+//ResultSet rs = conn.createStatement().executeQuery("SELECT * FROM " 
+ tableName + " WHERE counter2 = 2 AND counter1 = 1");
+// This statement should be equivalent to the one above, but it 
selects both rows.
+ResultSet rs = conn.createStatement().executeQuery("SELECT pk, 
counter1, counter2 FROM " + tableName + " WHERE counter2 = 2 AND (counter1 = 1 
OR counter1 = 1)");
+assertTrue(rs.next());
+assertEquals("b",rs.getString(1));
+assertEquals(1,rs.getLong(2));
+assertEquals(2,rs.getLong(3));
+assertFalse(rs.next());
+
+conn.close();
+}
+
 
 }
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/AndExpression.java 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/AndExpression.java
index 70e94ca..2aa1827 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/AndExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/AndExpression.java
@@ -80,7 +80,7 @@ public class AndExpression extends AndOrExpression {
 
 @Override
 protected boolean isStopValue(Boolean value) {
-return !Boolean.TRUE.equals(value);
+return Boolean.FALSE.equals(value);
 }
 
 @Override
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/AndOrExpression.java 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/AndOrExpression.java
index ea8c375..07b07a2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/AndOrExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/AndOrExpression.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.schema.types.

[phoenix] branch master updated: PHOENIX-5136 Rows with null values inserted by UPSERT .. ON DUPLICATE KEY UPDATE are included in query results when they shouldn't be(Miles Spielberg)-addendum

2019-08-26 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 43310a6  PHOENIX-5136 Rows with null values inserted by UPSERT .. ON 
DUPLICATE KEY UPDATE are included in query results when they shouldn't be(Miles 
Spielberg)-addendum
43310a6 is described below

commit 43310a6dc69433d3df772e17d48fe4d8821d40ac
Author: Rajeshbabu Chintaguntla 
AuthorDate: Mon Aug 26 21:59:02 2019 +0530

PHOENIX-5136 Rows with null values inserted by UPSERT .. ON DUPLICATE KEY 
UPDATE are included in query results when they shouldn't be(Miles 
Spielberg)-addendum
---
 .../phoenix/expression/AndExpressionTest.java  | 297 +
 .../phoenix/expression/OrExpressionTest.java   | 293 
 2 files changed, 590 insertions(+)

diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java
new file mode 100644
index 000..a8f1529
--- /dev/null
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/expression/AndExpressionTest.java
@@ -0,0 +1,297 @@
+package org.apache.phoenix.expression;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.PBaseColumn;
+import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.PName;
+import org.apache.phoenix.schema.PNameFactory;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.tuple.MultiKeyValueTuple;
+import org.apache.phoenix.schema.types.PBoolean;
+import org.apache.phoenix.schema.types.PDataType;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.Collections;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class AndExpressionTest {
+
+private AndExpression createAnd(Expression lhs, Expression rhs) {
+return new AndExpression(Arrays.asList(lhs, rhs));
+}
+
+private AndExpression createAnd(Boolean x, Boolean y) {
+return createAnd(LiteralExpression.newConstant(x), 
LiteralExpression.newConstant(y));
+}
+
+private void testImmediateSingle(Boolean expected, Boolean lhs, Boolean 
rhs) {
+AndExpression and = createAnd(lhs, rhs);
+ImmutableBytesWritable out = new ImmutableBytesWritable();
+MultiKeyValueTuple tuple = new MultiKeyValueTuple();
+boolean success = and.evaluate(tuple, out);
+assertTrue(success);
+assertEquals(expected, PBoolean.INSTANCE.toObject(out));
+}
+
+// Evaluating AND when values of both sides are known should immediately 
succeed
+// and return the same result regardless of order.
+private void testImmediate(Boolean expected, Boolean a, Boolean b) {
+testImmediateSingle(expected, a, b);
+testImmediateSingle(expected, b, a);
+}
+
+private PColumn pcolumn(final String name) {
+return new PBaseColumn() {
+@Override public PName getName() {
+return PNameFactory.newName(name);
+}
+
+@Override public PDataType getDataType() {
+return PBoolean.INSTANCE;
+}
+
+@Override public PName getFamilyName() {
+return 
PNameFactory.newName(QueryConstants.DEFAULT_COLUMN_FAMILY);
+}
+
+@Override public int getPosition() {
+return 0;
+}
+
+@Override public Integer getArraySize() {
+return null;
+}
+
+@Override public byte[] getViewConstant() {
+return new byte[0];
+}
+
+@Override public boolean isViewReferenced() {
+return false;
+}
+
+@Override public String getExpressionStr() {
+return null;
+}
+
+@Override public boolean isRowTimestamp() {
+return false;
+}
+
+@Override public boolean isDynamic() {
+return false;
+}
+
+@Override public byte[] getColumnQualifierBytes() {
+return null;
+}
+
+@Override public long getTimestamp() {
+return 0;
+}
+
+@Override public boolean isDerived() {
+return false;
+}
+
+@Override public boolean isExcluded() {
+return false;
+}
+
+@Override public SortOrder getSortOrder() {
+return null

[phoenix] branch master updated: PHOENIX-5136 Rows with null values inserted by UPSERT .. ON DUPLICATE KEY UPDATE are included in query results when they shouldn't be(Miles Spielberg)

2019-08-26 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new f4579a5  PHOENIX-5136 Rows with null values inserted by UPSERT .. ON 
DUPLICATE KEY UPDATE are included in query results when they shouldn't be(Miles 
Spielberg)
f4579a5 is described below

commit f4579a52a950a880368a2ec9f8cecbd3c67cbae9
Author: Rajeshbabu Chintaguntla 
AuthorDate: Mon Aug 26 21:53:06 2019 +0530

PHOENIX-5136 Rows with null values inserted by UPSERT .. ON DUPLICATE KEY 
UPDATE are included in query results when they shouldn't be(Miles Spielberg)
---
 .../apache/phoenix/end2end/OnDuplicateKeyIT.java   | 34 ++
 .../apache/phoenix/expression/AndExpression.java   |  2 +-
 .../apache/phoenix/expression/AndOrExpression.java | 25 
 .../phoenix/filter/BooleanExpressionFilter.java|  5 +++-
 4 files changed, 58 insertions(+), 8 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OnDuplicateKeyIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OnDuplicateKeyIT.java
index f1ee0e7..4782e57 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OnDuplicateKeyIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OnDuplicateKeyIT.java
@@ -580,6 +580,40 @@ public class OnDuplicateKeyIT extends 
ParallelStatsDisabledIT {
 }
 }
 
+@Test
+public void 
testRowsCreatedViaUpsertOnDuplicateKeyShouldNotBeReturnedInQueryIfNotMatched() 
throws Exception {
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+Connection conn = DriverManager.getConnection(getUrl(), props);
+String tableName = generateUniqueName();
+String ddl = "create table " + tableName + "(pk varchar primary key, 
counter1 bigint, counter2 smallint)";
+conn.createStatement().execute(ddl);
+createIndex(conn, tableName);
+// The data has to be specifically starting with null for the first 
counter to fail the test. If you reverse the values, the test passes.
+String dml1 = "UPSERT INTO " + tableName + " VALUES('a',NULL,2) ON 
DUPLICATE KEY UPDATE " +
+"counter1 = CASE WHEN (counter1 IS NULL) THEN NULL ELSE 
counter1 END, " +
+"counter2 = CASE WHEN (counter1 IS NULL) THEN 2 ELSE counter2 
END";
+conn.createStatement().execute(dml1);
+conn.commit();
+
+String dml2 = "UPSERT INTO " + tableName + " VALUES('b',1,2) ON 
DUPLICATE KEY UPDATE " +
+"counter1 = CASE WHEN (counter1 IS NULL) THEN 1 ELSE counter1 
END, " +
+"counter2 = CASE WHEN (counter1 IS NULL) THEN 2 ELSE counter2 
END";
+conn.createStatement().execute(dml2);
+conn.commit();
+
+// Using this statement causes the test to pass
+//ResultSet rs = conn.createStatement().executeQuery("SELECT * FROM " 
+ tableName + " WHERE counter2 = 2 AND counter1 = 1");
+// This statement should be equivalent to the one above, but it 
selects both rows.
+ResultSet rs = conn.createStatement().executeQuery("SELECT pk, 
counter1, counter2 FROM " + tableName + " WHERE counter2 = 2 AND (counter1 = 1 
OR counter1 = 1)");
+assertTrue(rs.next());
+assertEquals("b",rs.getString(1));
+assertEquals(1,rs.getLong(2));
+assertEquals(2,rs.getLong(3));
+assertFalse(rs.next());
+
+conn.close();
+}
+
 
 }
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/AndExpression.java 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/AndExpression.java
index 70e94ca..2aa1827 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/AndExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/AndExpression.java
@@ -80,7 +80,7 @@ public class AndExpression extends AndOrExpression {
 
 @Override
 protected boolean isStopValue(Boolean value) {
-return !Boolean.TRUE.equals(value);
+return Boolean.FALSE.equals(value);
 }
 
 @Override
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/AndOrExpression.java 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/AndOrExpression.java
index ea8c375..07b07a2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/AndOrExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/AndOrExpression.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.schema.types.PBoolean;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.tuple.Tuple;
-
+import org.apache.phoenix.util.ByteU

[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5388 Incorrect current_date()/now() when query involves subquery(Ankit Singhal)

2019-07-19 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new e6fedb8  PHOENIX-5388 Incorrect current_date()/now() when query 
involves subquery(Ankit Singhal)
e6fedb8 is described below

commit e6fedb855effd0b10581152157417e7109145ed8
Author: Rajeshbabu Chintaguntla 
AuthorDate: Fri Jul 19 12:38:18 2019 +0530

PHOENIX-5388 Incorrect current_date()/now() when query involves 
subquery(Ankit Singhal)
---
 .../src/it/java/org/apache/phoenix/end2end/DateTimeIT.java  | 13 +
 .../java/org/apache/phoenix/compile/StatementContext.java   |  7 ---
 .../src/main/java/org/apache/phoenix/schema/TableRef.java   |  2 +-
 3 files changed, 18 insertions(+), 4 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
index 561c96c..37b4a7c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
@@ -441,6 +441,19 @@ public class DateTimeIT extends ParallelStatsDisabledIT {
 }
 
 @Test
+public void testNowWithSubquery() throws Exception {
+String query =
+"SELECT now(), reference_date FROM (select now() as "
++ "reference_date union all select now() as 
reference_date) limit 1";
+Statement statement = conn.createStatement();
+ResultSet rs = statement.executeQuery(query);
+assertTrue(rs.next());
+
assertTrue(Math.abs(rs.getTime(1).getTime()-rs.getTime(2).getTime())<1);
+assertEquals(rs.getDate(2).toString(), rs.getDate(1).toString());
+assertFalse(rs.next());
+}
+
+@Test
 public void testSelectLiteralDate() throws Exception {
 String s = DateUtil.DEFAULT_DATE_FORMATTER.format(date);
 String query = "SELECT DATE '" + s + "' FROM " + this.tableName;
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
index cc38870..b477049 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
@@ -260,9 +260,10 @@ public class StatementContext {
 public long getCurrentTime() throws SQLException {
 long ts = this.getCurrentTable().getCurrentTime();
 // if the table is transactional then it is only resolved once per 
query, so we can't use the table timestamp
-if (this.getCurrentTable().getTable().getType() != 
PTableType.PROJECTED && !this
-.getCurrentTable().getTable().isTransactional() && ts != 
QueryConstants
-.UNSET_TIMESTAMP) {
+if (this.getCurrentTable().getTable().getType() != PTableType.SUBQUERY
+&& this.getCurrentTable().getTable().getType() != 
PTableType.PROJECTED
+&& !this.getCurrentTable().getTable().isTransactional()
+&& ts != QueryConstants.UNSET_TIMESTAMP) {
 return ts;
 }
 if (currentTime != QueryConstants.UNSET_TIMESTAMP) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java
index b40c0b8..5f426b0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java
@@ -80,9 +80,9 @@ public class TableRef {
 boolean hasDynamicCols) {
 this.alias = alias;
 this.table = table;
-this.currentTime = upperBoundTimeStamp;
 // if UPDATE_CACHE_FREQUENCY is set, always let the server set 
timestamps
 this.upperBoundTimeStamp = table.getUpdateCacheFrequency()!=0 ? 
QueryConstants.UNSET_TIMESTAMP : upperBoundTimeStamp;
+this.currentTime = this.upperBoundTimeStamp;
 this.lowerBoundTimeStamp = lowerBoundTimeStamp;
 this.hasDynamicCols = hasDynamicCols;
 }



[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5388 Incorrect current_date()/now() when query involves subquery(Ankit Singhal)

2019-07-19 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new f336c76  PHOENIX-5388 Incorrect current_date()/now() when query 
involves subquery(Ankit Singhal)
f336c76 is described below

commit f336c766462af9d9acf0f35243a694a8b5678962
Author: Rajeshbabu Chintaguntla 
AuthorDate: Fri Jul 19 12:37:08 2019 +0530

PHOENIX-5388 Incorrect current_date()/now() when query involves 
subquery(Ankit Singhal)
---
 .../src/it/java/org/apache/phoenix/end2end/DateTimeIT.java  | 13 +
 .../java/org/apache/phoenix/compile/StatementContext.java   |  7 ---
 .../src/main/java/org/apache/phoenix/schema/TableRef.java   |  2 +-
 3 files changed, 18 insertions(+), 4 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
index cc7c7a7..6e48d67 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
@@ -440,6 +440,19 @@ public class DateTimeIT extends ParallelStatsDisabledIT {
 }
 
 @Test
+public void testNowWithSubquery() throws Exception {
+String query =
+"SELECT now(), reference_date FROM (select now() as "
++ "reference_date union all select now() as 
reference_date) limit 1";
+Statement statement = conn.createStatement();
+ResultSet rs = statement.executeQuery(query);
+assertTrue(rs.next());
+
assertTrue(Math.abs(rs.getTime(1).getTime()-rs.getTime(2).getTime())<1);
+assertEquals(rs.getDate(2).toString(), rs.getDate(1).toString());
+assertFalse(rs.next());
+}
+
+@Test
 public void testSelectLiteralDate() throws Exception {
 String s = DateUtil.DEFAULT_DATE_FORMATTER.format(date);
 String query = "SELECT DATE '" + s + "' FROM " + this.tableName;
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
index cc38870..b477049 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
@@ -260,9 +260,10 @@ public class StatementContext {
 public long getCurrentTime() throws SQLException {
 long ts = this.getCurrentTable().getCurrentTime();
 // if the table is transactional then it is only resolved once per 
query, so we can't use the table timestamp
-if (this.getCurrentTable().getTable().getType() != 
PTableType.PROJECTED && !this
-.getCurrentTable().getTable().isTransactional() && ts != 
QueryConstants
-.UNSET_TIMESTAMP) {
+if (this.getCurrentTable().getTable().getType() != PTableType.SUBQUERY
+&& this.getCurrentTable().getTable().getType() != 
PTableType.PROJECTED
+&& !this.getCurrentTable().getTable().isTransactional()
+&& ts != QueryConstants.UNSET_TIMESTAMP) {
 return ts;
 }
 if (currentTime != QueryConstants.UNSET_TIMESTAMP) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java
index b40c0b8..5f426b0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java
@@ -80,9 +80,9 @@ public class TableRef {
 boolean hasDynamicCols) {
 this.alias = alias;
 this.table = table;
-this.currentTime = upperBoundTimeStamp;
 // if UPDATE_CACHE_FREQUENCY is set, always let the server set 
timestamps
 this.upperBoundTimeStamp = table.getUpdateCacheFrequency()!=0 ? 
QueryConstants.UNSET_TIMESTAMP : upperBoundTimeStamp;
+this.currentTime = this.upperBoundTimeStamp;
 this.lowerBoundTimeStamp = lowerBoundTimeStamp;
 this.hasDynamicCols = hasDynamicCols;
 }



[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5388 Incorrect current_date()/now() when query involves subquery(Ankit Singhal)

2019-07-19 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 49e04d5  PHOENIX-5388 Incorrect current_date()/now() when query 
involves subquery(Ankit Singhal)
49e04d5 is described below

commit 49e04d5608b8648392ca7d79d0183ef9e9615033
Author: Rajeshbabu Chintaguntla 
AuthorDate: Fri Jul 19 12:25:28 2019 +0530

PHOENIX-5388 Incorrect current_date()/now() when query involves 
subquery(Ankit Singhal)
---
 .../src/it/java/org/apache/phoenix/end2end/DateTimeIT.java  | 13 +
 .../java/org/apache/phoenix/compile/StatementContext.java   |  7 ---
 .../src/main/java/org/apache/phoenix/schema/TableRef.java   |  2 +-
 3 files changed, 18 insertions(+), 4 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
index cc7c7a7..6e48d67 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
@@ -440,6 +440,19 @@ public class DateTimeIT extends ParallelStatsDisabledIT {
 }
 
 @Test
+public void testNowWithSubquery() throws Exception {
+String query =
+"SELECT now(), reference_date FROM (select now() as "
++ "reference_date union all select now() as 
reference_date) limit 1";
+Statement statement = conn.createStatement();
+ResultSet rs = statement.executeQuery(query);
+assertTrue(rs.next());
+
assertTrue(Math.abs(rs.getTime(1).getTime()-rs.getTime(2).getTime())<1);
+assertEquals(rs.getDate(2).toString(), rs.getDate(1).toString());
+assertFalse(rs.next());
+}
+
+@Test
 public void testSelectLiteralDate() throws Exception {
 String s = DateUtil.DEFAULT_DATE_FORMATTER.format(date);
 String query = "SELECT DATE '" + s + "' FROM " + this.tableName;
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
index cc38870..b477049 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
@@ -260,9 +260,10 @@ public class StatementContext {
 public long getCurrentTime() throws SQLException {
 long ts = this.getCurrentTable().getCurrentTime();
 // if the table is transactional then it is only resolved once per 
query, so we can't use the table timestamp
-if (this.getCurrentTable().getTable().getType() != 
PTableType.PROJECTED && !this
-.getCurrentTable().getTable().isTransactional() && ts != 
QueryConstants
-.UNSET_TIMESTAMP) {
+if (this.getCurrentTable().getTable().getType() != PTableType.SUBQUERY
+&& this.getCurrentTable().getTable().getType() != 
PTableType.PROJECTED
+&& !this.getCurrentTable().getTable().isTransactional()
+&& ts != QueryConstants.UNSET_TIMESTAMP) {
 return ts;
 }
 if (currentTime != QueryConstants.UNSET_TIMESTAMP) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java
index b40c0b8..5f426b0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java
@@ -80,9 +80,9 @@ public class TableRef {
 boolean hasDynamicCols) {
 this.alias = alias;
 this.table = table;
-this.currentTime = upperBoundTimeStamp;
 // if UPDATE_CACHE_FREQUENCY is set, always let the server set 
timestamps
 this.upperBoundTimeStamp = table.getUpdateCacheFrequency()!=0 ? 
QueryConstants.UNSET_TIMESTAMP : upperBoundTimeStamp;
+this.currentTime = this.upperBoundTimeStamp;
 this.lowerBoundTimeStamp = lowerBoundTimeStamp;
 this.hasDynamicCols = hasDynamicCols;
 }



[phoenix] branch master updated: PHOENIX-5388 Incorrect current_date()/now() when query involves subquery(Ankit Singhal)

2019-07-19 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 86f158b  PHOENIX-5388 Incorrect current_date()/now() when query 
involves subquery(Ankit Singhal)
86f158b is described below

commit 86f158bd613c8e0df927cd35d8442863886983e5
Author: Rajeshbabu Chintaguntla 
AuthorDate: Fri Jul 19 12:24:11 2019 +0530

PHOENIX-5388 Incorrect current_date()/now() when query involves 
subquery(Ankit Singhal)
---
 .../src/it/java/org/apache/phoenix/end2end/DateTimeIT.java  | 13 +
 .../java/org/apache/phoenix/compile/StatementContext.java   |  7 ---
 .../src/main/java/org/apache/phoenix/schema/TableRef.java   |  2 +-
 3 files changed, 18 insertions(+), 4 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
index df94a70..c76481e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
@@ -441,6 +441,19 @@ public class DateTimeIT extends ParallelStatsDisabledIT {
 }
 
 @Test
+public void testNowWithSubquery() throws Exception {
+String query =
+"SELECT now(), reference_date FROM (select now() as "
++ "reference_date union all select now() as 
reference_date) limit 1";
+Statement statement = conn.createStatement();
+ResultSet rs = statement.executeQuery(query);
+assertTrue(rs.next());
+
assertTrue(Math.abs(rs.getTime(1).getTime()-rs.getTime(2).getTime())<1);
+assertEquals(rs.getDate(2).toString(), rs.getDate(1).toString());
+assertFalse(rs.next());
+}
+
+@Test
 public void testSelectLiteralDate() throws Exception {
 String s = DateUtil.DEFAULT_DATE_FORMATTER.format(date);
 String query = "SELECT DATE '" + s + "' FROM " + this.tableName;
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
index cc38870..b477049 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
@@ -260,9 +260,10 @@ public class StatementContext {
 public long getCurrentTime() throws SQLException {
 long ts = this.getCurrentTable().getCurrentTime();
 // if the table is transactional then it is only resolved once per 
query, so we can't use the table timestamp
-if (this.getCurrentTable().getTable().getType() != 
PTableType.PROJECTED && !this
-.getCurrentTable().getTable().isTransactional() && ts != 
QueryConstants
-.UNSET_TIMESTAMP) {
+if (this.getCurrentTable().getTable().getType() != PTableType.SUBQUERY
+&& this.getCurrentTable().getTable().getType() != 
PTableType.PROJECTED
+&& !this.getCurrentTable().getTable().isTransactional()
+&& ts != QueryConstants.UNSET_TIMESTAMP) {
 return ts;
 }
 if (currentTime != QueryConstants.UNSET_TIMESTAMP) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java
index bbbfc5e..bfaa066 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/TableRef.java
@@ -80,9 +80,9 @@ public class TableRef {
 boolean hasDynamicCols) {
 this.alias = alias;
 this.table = table;
-this.currentTime = upperBoundTimeStamp;
 // if UPDATE_CACHE_FREQUENCY is set, always let the server set 
timestamps
 this.upperBoundTimeStamp = table.getUpdateCacheFrequency()!=0 ? 
QueryConstants.UNSET_TIMESTAMP : upperBoundTimeStamp;
+this.currentTime = this.upperBoundTimeStamp;
 this.lowerBoundTimeStamp = lowerBoundTimeStamp;
 this.hasDynamicCols = hasDynamicCols;
 }



[phoenix-connectors] branch master updated: PHOENIX-5309 Skip adding log4j and slf4j to phoenix-hive jar to avoid logging in hive-server2.err file than hiveserver2.log(Rajeshbabu)

2019-06-23 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix-connectors.git


The following commit(s) were added to refs/heads/master by this push:
 new 9890de6  PHOENIX-5309 Skip adding log4j and slf4j to phoenix-hive jar 
to avoid logging in hive-server2.err file than hiveserver2.log(Rajeshbabu)
9890de6 is described below

commit 9890de6366f3f32af48df0122d49f94a878de146
Author: Rajeshbabu Chintaguntla 
AuthorDate: Sun Jun 23 22:41:03 2019 +0530

PHOENIX-5309 Skip adding log4j and slf4j to phoenix-hive jar to avoid 
logging in hive-server2.err file than hiveserver2.log(Rajeshbabu)
---
 phoenix-hive/pom.xml | 22 ++
 1 file changed, 22 insertions(+)

diff --git a/phoenix-hive/pom.xml b/phoenix-hive/pom.xml
index 705ad34..8a95b44 100644
--- a/phoenix-hive/pom.xml
+++ b/phoenix-hive/pom.xml
@@ -36,6 +36,16 @@
 
   org.apache.phoenix
   phoenix-core
+  
+
+ log4j
+ log4j
+   
+
+  org.slf4j
+  slf4j-api
+   
+  
 
 
   org.apache.hive
@@ -55,6 +65,18 @@
   provided
 
 
+  org.slf4j
+  slf4j-api
+${slf4j.version}
+  provided
+
+
+  log4j
+  log4j
+ ${log4j.version}
+  provided
+
+
   org.apache.hadoop
   hadoop-mapreduce-client-core
 



[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5308 Unable to run the some end2end tests in real cluster mainly the once using accessing hbase internals from minihbasecluster or custom coprocessors(R

2019-06-17 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new 301ba7f  PHOENIX-5308 Unable to run the some end2end tests in real 
cluster mainly the once using accessing hbase internals from minihbasecluster 
or custom coprocessors(Rajeshbabu)
301ba7f is described below

commit 301ba7fe7d36b2a2b6364c1e1255dc301593af1d
Author: Rajeshbabu Chintaguntla 
AuthorDate: Mon Jun 17 16:16:12 2019 +0530

PHOENIX-5308 Unable to run the some end2end tests in real cluster mainly 
the once using accessing hbase internals from minihbasecluster or custom 
coprocessors(Rajeshbabu)
---
 .../end2end/ConcurrentMutationsExtendedIT.java | 404 +
 .../phoenix/end2end/ConcurrentMutationsIT.java | 343 +
 .../end2end/index/MutableIndexExtendedIT.java  | 184 ++
 .../phoenix/end2end/index/MutableIndexIT.java  | 176 -
 .../phoenix/end2end/join/HashJoinCacheIT.java  |   3 +
 5 files changed, 592 insertions(+), 518 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java
new file mode 100644
index 000..571961d
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java
@@ -0,0 +1,404 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
+import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.util.*;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+
+import java.io.IOException;
+import java.sql.*;
+import java.util.Properties;
+import java.util.Random;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import static org.junit.Assert.*;
+
+@RunWith(RunUntilFailure.class) @Category(NeedsOwnMiniClusterTest.class)
+public class ConcurrentMutationsExtendedIT extends ParallelStatsDisabledIT {
+
+private static final Random RAND = new Random(5);
+private static final String MVCC_LOCK_TEST_TABLE_PREFIX = "MVCCLOCKTEST_";
+private static final String LOCK_TEST_TABLE_PREFIX = "LOCKTEST_";
+private static final int ROW_LOCK_WAIT_TIME = 1;
+
+private final Object lock = new Object();
+
+@Test
+public void testSynchronousDeletesAndUpsertValues() throws Exception {
+final String tableName = generateUniqueName();
+final String indexName = generateUniqueName();
+Connection conn = DriverManager.getConnection(getUrl());
+conn.createStatement().execute("CREATE TABLE " + tableName
++ "(k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, v1 INTEGER, 
CONSTRAINT pk PRIMARY KEY (k1,k2)) COLUMN_ENCODED_BYTES = 0");
+TestUtil.addCoprocessor(conn, tableName, DelayingRegionObserver.class);
+conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + 
tableName + "(v1)");
+final CountDownLatch doneSignal = new CountDownLatch(2);
+Runnable r1 = new Runnable() {
+
+@Override public void run() {
+try {
+Properties props = 
PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
+for (int i = 0; i < 50; i++) {
+Thread.sleep(20);
+synchronized (lock) {

[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5308 Unable to run the some end2end tests in real cluster mainly the once using accessing hbase internals from minihbasecluster or custom coprocessors(R

2019-06-17 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 78eb884  PHOENIX-5308 Unable to run the some end2end tests in real 
cluster mainly the once using accessing hbase internals from minihbasecluster 
or custom coprocessors(Rajeshbabu)
78eb884 is described below

commit 78eb8841b7a5d28e2591dc89adef89f692e98220
Author: Rajeshbabu Chintaguntla 
AuthorDate: Mon Jun 17 16:15:07 2019 +0530

PHOENIX-5308 Unable to run the some end2end tests in real cluster mainly 
the once using accessing hbase internals from minihbasecluster or custom 
coprocessors(Rajeshbabu)
---
 .../end2end/ConcurrentMutationsExtendedIT.java | 404 +
 .../phoenix/end2end/ConcurrentMutationsIT.java | 343 +
 .../end2end/index/MutableIndexExtendedIT.java  | 184 ++
 .../phoenix/end2end/index/MutableIndexIT.java  | 175 -
 .../phoenix/end2end/join/HashJoinCacheIT.java  |   3 +
 5 files changed, 592 insertions(+), 517 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java
new file mode 100644
index 000..571961d
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java
@@ -0,0 +1,404 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
+import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.util.*;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+
+import java.io.IOException;
+import java.sql.*;
+import java.util.Properties;
+import java.util.Random;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import static org.junit.Assert.*;
+
+@RunWith(RunUntilFailure.class) @Category(NeedsOwnMiniClusterTest.class)
+public class ConcurrentMutationsExtendedIT extends ParallelStatsDisabledIT {
+
+private static final Random RAND = new Random(5);
+private static final String MVCC_LOCK_TEST_TABLE_PREFIX = "MVCCLOCKTEST_";
+private static final String LOCK_TEST_TABLE_PREFIX = "LOCKTEST_";
+private static final int ROW_LOCK_WAIT_TIME = 1;
+
+private final Object lock = new Object();
+
+@Test
+public void testSynchronousDeletesAndUpsertValues() throws Exception {
+final String tableName = generateUniqueName();
+final String indexName = generateUniqueName();
+Connection conn = DriverManager.getConnection(getUrl());
+conn.createStatement().execute("CREATE TABLE " + tableName
++ "(k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, v1 INTEGER, 
CONSTRAINT pk PRIMARY KEY (k1,k2)) COLUMN_ENCODED_BYTES = 0");
+TestUtil.addCoprocessor(conn, tableName, DelayingRegionObserver.class);
+conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + 
tableName + "(v1)");
+final CountDownLatch doneSignal = new CountDownLatch(2);
+Runnable r1 = new Runnable() {
+
+@Override public void run() {
+try {
+Properties props = 
PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
+for (int i = 0; i < 50; i++) {
+Thread.sleep(20);
+synchronized (lock) {

[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5308 Unable to run the some end2end tests in real cluster mainly the once using accessing hbase internals from minihbasecluster or custom coprocessors(R

2019-06-17 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 0c15dcc  PHOENIX-5308 Unable to run the some end2end tests in real 
cluster mainly the once using accessing hbase internals from minihbasecluster 
or custom coprocessors(Rajeshbabu)
0c15dcc is described below

commit 0c15dccb73857d8197af317bccba6d283124b8f5
Author: Rajeshbabu Chintaguntla 
AuthorDate: Mon Jun 17 15:41:08 2019 +0530

PHOENIX-5308 Unable to run the some end2end tests in real cluster mainly 
the once using accessing hbase internals from minihbasecluster or custom 
coprocessors(Rajeshbabu)
---
 .../end2end/ConcurrentMutationsExtendedIT.java | 404 +
 .../phoenix/end2end/ConcurrentMutationsIT.java | 343 +
 .../end2end/index/MutableIndexExtendedIT.java  | 184 ++
 .../phoenix/end2end/index/MutableIndexIT.java  | 176 -
 .../phoenix/end2end/join/HashJoinCacheIT.java  |   3 +
 5 files changed, 592 insertions(+), 518 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java
new file mode 100644
index 000..571961d
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java
@@ -0,0 +1,404 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
+import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.util.*;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+
+import java.io.IOException;
+import java.sql.*;
+import java.util.Properties;
+import java.util.Random;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import static org.junit.Assert.*;
+
+@RunWith(RunUntilFailure.class) @Category(NeedsOwnMiniClusterTest.class)
+public class ConcurrentMutationsExtendedIT extends ParallelStatsDisabledIT {
+
+private static final Random RAND = new Random(5);
+private static final String MVCC_LOCK_TEST_TABLE_PREFIX = "MVCCLOCKTEST_";
+private static final String LOCK_TEST_TABLE_PREFIX = "LOCKTEST_";
+private static final int ROW_LOCK_WAIT_TIME = 1;
+
+private final Object lock = new Object();
+
+@Test
+public void testSynchronousDeletesAndUpsertValues() throws Exception {
+final String tableName = generateUniqueName();
+final String indexName = generateUniqueName();
+Connection conn = DriverManager.getConnection(getUrl());
+conn.createStatement().execute("CREATE TABLE " + tableName
++ "(k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, v1 INTEGER, 
CONSTRAINT pk PRIMARY KEY (k1,k2)) COLUMN_ENCODED_BYTES = 0");
+TestUtil.addCoprocessor(conn, tableName, DelayingRegionObserver.class);
+conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + 
tableName + "(v1)");
+final CountDownLatch doneSignal = new CountDownLatch(2);
+Runnable r1 = new Runnable() {
+
+@Override public void run() {
+try {
+Properties props = 
PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
+for (int i = 0; i < 50; i++) {
+Thread.sleep(20);
+synchronized (lock) {

[phoenix] branch master updated: PHOENIX-5308 Unable to run the some end2end tests in real cluster mainly the once using accessing hbase internals from minihbasecluster or custom coprocessors(Rajeshba

2019-06-17 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 85ba3e9  PHOENIX-5308 Unable to run the some end2end tests in real 
cluster mainly the once using accessing hbase internals from minihbasecluster 
or custom coprocessors(Rajeshbabu)
85ba3e9 is described below

commit 85ba3e973f45f62ddfb67d484415d31860799200
Author: Rajeshbabu Chintaguntla 
AuthorDate: Mon Jun 17 12:26:34 2019 +0530

PHOENIX-5308 Unable to run the some end2end tests in real cluster mainly 
the once using accessing hbase internals from minihbasecluster or custom 
coprocessors(Rajeshbabu)
---
 .../end2end/ConcurrentMutationsExtendedIT.java | 404 +
 .../phoenix/end2end/ConcurrentMutationsIT.java | 343 +
 .../end2end/index/MutableIndexExtendedIT.java  | 279 ++
 .../phoenix/end2end/index/MutableIndexIT.java  | 176 -
 .../phoenix/end2end/join/HashJoinCacheIT.java  |   3 +
 5 files changed, 687 insertions(+), 518 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java
new file mode 100644
index 000..571961d
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConcurrentMutationsExtendedIT.java
@@ -0,0 +1,404 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver;
+import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.util.*;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+
+import java.io.IOException;
+import java.sql.*;
+import java.util.Properties;
+import java.util.Random;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import static org.junit.Assert.*;
+
+@RunWith(RunUntilFailure.class) @Category(NeedsOwnMiniClusterTest.class)
+public class ConcurrentMutationsExtendedIT extends ParallelStatsDisabledIT {
+
+private static final Random RAND = new Random(5);
+private static final String MVCC_LOCK_TEST_TABLE_PREFIX = "MVCCLOCKTEST_";
+private static final String LOCK_TEST_TABLE_PREFIX = "LOCKTEST_";
+private static final int ROW_LOCK_WAIT_TIME = 1;
+
+private final Object lock = new Object();
+
+@Test
+public void testSynchronousDeletesAndUpsertValues() throws Exception {
+final String tableName = generateUniqueName();
+final String indexName = generateUniqueName();
+Connection conn = DriverManager.getConnection(getUrl());
+conn.createStatement().execute("CREATE TABLE " + tableName
++ "(k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, v1 INTEGER, 
CONSTRAINT pk PRIMARY KEY (k1,k2)) COLUMN_ENCODED_BYTES = 0");
+TestUtil.addCoprocessor(conn, tableName, DelayingRegionObserver.class);
+conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + 
tableName + "(v1)");
+final CountDownLatch doneSignal = new CountDownLatch(2);
+Runnable r1 = new Runnable() {
+
+@Override public void run() {
+try {
+Properties props = 
PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
+for (int i = 0; i < 50; i++) {
+Thread.sleep(20);
+synchronized (lock) {
+PhoenixCon

phoenix git commit: PHOENIX-4874 psql doesn't support date/time with values smaller than milliseconds-addendum(Rajeshbabu)

2018-10-16 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/master 02259651e -> c3c51180b


PHOENIX-4874 psql doesn't support date/time with values smaller than 
milliseconds-addendum(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c3c51180
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c3c51180
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c3c51180

Branch: refs/heads/master
Commit: c3c51180bf6c5913fcfb4b1e8dca2ebd60187d6e
Parents: 0225965
Author: Rajeshbabu Chintaguntla 
Authored: Tue Oct 16 21:23:21 2018 +0530
Committer: Rajeshbabu Chintaguntla 
Committed: Tue Oct 16 21:23:21 2018 +0530

--
 .../org/apache/phoenix/util/AbstractUpsertExecutorTest.java   | 7 ---
 1 file changed, 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c3c51180/phoenix-core/src/test/java/org/apache/phoenix/util/AbstractUpsertExecutorTest.java
--
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/util/AbstractUpsertExecutorTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/util/AbstractUpsertExecutorTest.java
index aacbd8a..acae751 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/util/AbstractUpsertExecutorTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/util/AbstractUpsertExecutorTest.java
@@ -25,21 +25,15 @@ import static org.mockito.Mockito.verifyNoMoreInteractions;
 
 import java.io.IOException;
 import java.sql.Connection;
-import java.sql.Date;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.SQLException;
 import java.sql.Timestamp;
 import java.sql.Types;
-import java.time.LocalDateTime;
-import java.time.LocalTime;
-import java.time.ZoneId;
-import java.time.format.DateTimeFormatterBuilder;
 import java.util.Arrays;
 import java.util.Base64;
 import java.util.List;
 import java.util.Properties;
-import java.util.TimeZone;
 
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.query.BaseConnectionlessQueryTest;
@@ -48,7 +42,6 @@ import org.apache.phoenix.schema.types.PArrayDataType;
 import org.apache.phoenix.schema.types.PBinary;
 import org.apache.phoenix.schema.types.PInteger;
 import org.apache.phoenix.schema.types.PIntegerArray;
-import org.apache.phoenix.schema.types.PTimestamp;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;



phoenix git commit: PHOENIX-4859 Using local index in where statement for join (only rhs table) query fails-addendum(Rajeshbabu)

2018-10-15 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/master 483979104 -> 0d32f8700


PHOENIX-4859 Using local index in where statement for join (only rhs table) 
query fails-addendum(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/0d32f870
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/0d32f870
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/0d32f870

Branch: refs/heads/master
Commit: 0d32f870044c5fefdcf9a08e59e6c3c2ef3dc5d1
Parents: 4839791
Author: Rajeshbabu Chintaguntla 
Authored: Mon Oct 15 21:22:10 2018 +0530
Committer: Rajeshbabu Chintaguntla 
Committed: Mon Oct 15 21:22:10 2018 +0530

--
 .../java/org/apache/phoenix/schema/LocalIndexDataColumnRef.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/0d32f870/phoenix-core/src/main/java/org/apache/phoenix/schema/LocalIndexDataColumnRef.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/LocalIndexDataColumnRef.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/LocalIndexDataColumnRef.java
index 835cb6a..de0fe4c 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/LocalIndexDataColumnRef.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/LocalIndexDataColumnRef.java
@@ -41,7 +41,7 @@ public class LocalIndexDataColumnRef extends ColumnRef {
 TableName.create(tRef.getTable().getSchemaName().getString(), 
tRef.getTable()
 .getParentTableName().getString())), 
context.getConnection(), false)
 
.resolveTable(context.getCurrentTable().getTable().getSchemaName().getString(),
-
context.getCurrentTable().getTable().getParentTableName().getString()),
+tRef.getTable().getParentTableName().getString()),
 IndexUtil.getDataColumnFamilyName(indexColumnName), IndexUtil
 .getDataColumnName(indexColumnName));
 position = context.getDataColumnPosition(this.getColumn());



phoenix git commit: PHOENIX-4859 Using local index in where statement for join (only rhs table) query fails(Rajeshbabu)

2018-10-09 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.4 e692f1494 -> 1e5c4cdbd


PHOENIX-4859 Using local index in where statement for join (only rhs table) 
query fails(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1e5c4cdb
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1e5c4cdb
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1e5c4cdb

Branch: refs/heads/4.x-HBase-1.4
Commit: 1e5c4cdbd92b000d046657bb424b524fde4873a5
Parents: e692f14
Author: Rajeshbabu Chintaguntla 
Authored: Tue Oct 9 16:05:19 2018 +0530
Committer: Rajeshbabu Chintaguntla 
Committed: Tue Oct 9 16:05:19 2018 +0530

--
 .../phoenix/end2end/index/LocalIndexIT.java | 29 
 .../phoenix/compile/ExpressionCompiler.java |  2 +-
 .../apache/phoenix/compile/JoinCompiler.java|  2 +-
 .../phoenix/compile/ProjectionCompiler.java |  4 +--
 .../compile/TupleProjectionCompiler.java|  2 +-
 .../phoenix/schema/LocalIndexDataColumnRef.java | 18 ++--
 6 files changed, 44 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/1e5c4cdb/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index ed1cf45..e260969 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -684,6 +684,35 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 conn1.close();
 }
 
+@Test
+public void testLocalIndexSelfJoin() throws Exception {
+  String tableName = generateUniqueName();
+  String indexName = "IDX_" + generateUniqueName();
+  Connection conn1 = DriverManager.getConnection(getUrl());
+  if (isNamespaceMapped) {
+  conn1.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + 
schemaName);
+  }
+String ddl =
+"CREATE TABLE "
++ tableName
++ " (customer_id integer primary key, postal_code 
varchar, country_code varchar)";
+conn1.createStatement().execute(ddl);
+conn1.createStatement().execute("UPSERT INTO " + tableName + " 
values(1,'560103','IN')");
+conn1.commit();
+conn1.createStatement().execute(
+"CREATE LOCAL INDEX " + indexName + " ON " + tableName + 
"(postal_code)");
+ResultSet rs =
+conn1.createStatement()
+.executeQuery(
+"SELECT * from "
++ tableName
++ " c1, "
++ tableName
++ " c2 where c1.customer_id=c2.customer_id 
and c2.postal_code='560103'");
+assertTrue(rs.next());
+conn1.close();
+}
+
 private void copyLocalIndexHFiles(Configuration conf, HRegionInfo 
fromRegion, HRegionInfo toRegion, boolean move)
 throws IOException {
 Path root = FSUtils.getRootDir(conf);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1e5c4cdb/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
index 9daa744..077e1af 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
@@ -376,7 +376,7 @@ public class ExpressionCompiler extends 
UnsupportedAllParseNodeVisitorhttp://git-wip-us.apache.org/repos/asf/phoenix/blob/1e5c4cdb/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
index 36bfc5f..880fa72 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
@@ -869,7 +869,7 @@ public class JoinCompiler {
 if (columnRef.getTableRef().equals(tableRef)
   

phoenix git commit: PHOENIX-4859 Using local index in where statement for join (only rhs table) query fails(Rajeshbabu)

2018-10-09 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 c90d090a1 -> 2ded8b64c


PHOENIX-4859 Using local index in where statement for join (only rhs table) 
query fails(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/2ded8b64
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/2ded8b64
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/2ded8b64

Branch: refs/heads/4.x-HBase-1.3
Commit: 2ded8b64cdea48e89e7a0c936a59913a00345416
Parents: c90d090
Author: Rajeshbabu Chintaguntla 
Authored: Tue Oct 9 16:04:41 2018 +0530
Committer: Rajeshbabu Chintaguntla 
Committed: Tue Oct 9 16:04:41 2018 +0530

--
 .../phoenix/end2end/index/LocalIndexIT.java | 29 
 .../phoenix/compile/ExpressionCompiler.java |  2 +-
 .../apache/phoenix/compile/JoinCompiler.java|  2 +-
 .../phoenix/compile/ProjectionCompiler.java |  4 +--
 .../compile/TupleProjectionCompiler.java|  2 +-
 .../phoenix/schema/LocalIndexDataColumnRef.java | 18 ++--
 6 files changed, 44 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/2ded8b64/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index ed1cf45..e260969 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -684,6 +684,35 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 conn1.close();
 }
 
+@Test
+public void testLocalIndexSelfJoin() throws Exception {
+  String tableName = generateUniqueName();
+  String indexName = "IDX_" + generateUniqueName();
+  Connection conn1 = DriverManager.getConnection(getUrl());
+  if (isNamespaceMapped) {
+  conn1.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + 
schemaName);
+  }
+String ddl =
+"CREATE TABLE "
++ tableName
++ " (customer_id integer primary key, postal_code 
varchar, country_code varchar)";
+conn1.createStatement().execute(ddl);
+conn1.createStatement().execute("UPSERT INTO " + tableName + " 
values(1,'560103','IN')");
+conn1.commit();
+conn1.createStatement().execute(
+"CREATE LOCAL INDEX " + indexName + " ON " + tableName + 
"(postal_code)");
+ResultSet rs =
+conn1.createStatement()
+.executeQuery(
+"SELECT * from "
++ tableName
++ " c1, "
++ tableName
++ " c2 where c1.customer_id=c2.customer_id 
and c2.postal_code='560103'");
+assertTrue(rs.next());
+conn1.close();
+}
+
 private void copyLocalIndexHFiles(Configuration conf, HRegionInfo 
fromRegion, HRegionInfo toRegion, boolean move)
 throws IOException {
 Path root = FSUtils.getRootDir(conf);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2ded8b64/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
index 9daa744..077e1af 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
@@ -376,7 +376,7 @@ public class ExpressionCompiler extends 
UnsupportedAllParseNodeVisitorhttp://git-wip-us.apache.org/repos/asf/phoenix/blob/2ded8b64/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
index 36bfc5f..880fa72 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
@@ -869,7 +869,7 @@ public class JoinCompiler {
 if (columnRef.getTableRef().equals(tableRef)
   

phoenix git commit: PHOENIX-4859 Using local index in where statement for join (only rhs table) query fails(Rajeshbabu)

2018-10-09 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 d4d5887b9 -> 52bcff6f4


PHOENIX-4859 Using local index in where statement for join (only rhs table) 
query fails(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/52bcff6f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/52bcff6f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/52bcff6f

Branch: refs/heads/4.x-HBase-1.2
Commit: 52bcff6f421ef26779a8d89186eff54eada063ee
Parents: d4d5887
Author: Rajeshbabu Chintaguntla 
Authored: Tue Oct 9 16:00:32 2018 +0530
Committer: Rajeshbabu Chintaguntla 
Committed: Tue Oct 9 16:00:32 2018 +0530

--
 .../phoenix/end2end/index/LocalIndexIT.java | 29 
 .../phoenix/compile/ExpressionCompiler.java |  2 +-
 .../apache/phoenix/compile/JoinCompiler.java|  2 +-
 .../phoenix/compile/ProjectionCompiler.java |  4 +--
 .../compile/TupleProjectionCompiler.java|  2 +-
 .../phoenix/schema/LocalIndexDataColumnRef.java | 18 ++--
 6 files changed, 44 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/52bcff6f/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index ed1cf45..e260969 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -684,6 +684,35 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 conn1.close();
 }
 
+@Test
+public void testLocalIndexSelfJoin() throws Exception {
+  String tableName = generateUniqueName();
+  String indexName = "IDX_" + generateUniqueName();
+  Connection conn1 = DriverManager.getConnection(getUrl());
+  if (isNamespaceMapped) {
+  conn1.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + 
schemaName);
+  }
+String ddl =
+"CREATE TABLE "
++ tableName
++ " (customer_id integer primary key, postal_code 
varchar, country_code varchar)";
+conn1.createStatement().execute(ddl);
+conn1.createStatement().execute("UPSERT INTO " + tableName + " 
values(1,'560103','IN')");
+conn1.commit();
+conn1.createStatement().execute(
+"CREATE LOCAL INDEX " + indexName + " ON " + tableName + 
"(postal_code)");
+ResultSet rs =
+conn1.createStatement()
+.executeQuery(
+"SELECT * from "
++ tableName
++ " c1, "
++ tableName
++ " c2 where c1.customer_id=c2.customer_id 
and c2.postal_code='560103'");
+assertTrue(rs.next());
+conn1.close();
+}
+
 private void copyLocalIndexHFiles(Configuration conf, HRegionInfo 
fromRegion, HRegionInfo toRegion, boolean move)
 throws IOException {
 Path root = FSUtils.getRootDir(conf);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/52bcff6f/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
index 9daa744..077e1af 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
@@ -376,7 +376,7 @@ public class ExpressionCompiler extends 
UnsupportedAllParseNodeVisitorhttp://git-wip-us.apache.org/repos/asf/phoenix/blob/52bcff6f/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
index 36bfc5f..880fa72 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
@@ -869,7 +869,7 @@ public class JoinCompiler {
 if (columnRef.getTableRef().equals(tableRef)
   

phoenix git commit: PHOENIX-4859 Using local index in where statement for join (only rhs table) query fails(Rajeshbabu)

2018-10-09 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/master f7b053ac4 -> 9a818dd43


PHOENIX-4859 Using local index in where statement for join (only rhs table) 
query fails(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9a818dd4
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9a818dd4
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9a818dd4

Branch: refs/heads/master
Commit: 9a818dd43527008890d7b1cfa353725ff042f630
Parents: f7b053a
Author: Rajeshbabu Chintaguntla 
Authored: Tue Oct 9 12:36:42 2018 +0530
Committer: Rajeshbabu Chintaguntla 
Committed: Tue Oct 9 12:36:42 2018 +0530

--
 .../phoenix/end2end/index/LocalIndexIT.java | 29 
 .../phoenix/compile/ExpressionCompiler.java |  2 +-
 .../apache/phoenix/compile/JoinCompiler.java|  2 +-
 .../phoenix/compile/ProjectionCompiler.java |  4 +--
 .../compile/TupleProjectionCompiler.java|  2 +-
 .../phoenix/schema/LocalIndexDataColumnRef.java | 18 ++--
 6 files changed, 44 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9a818dd4/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 3ff35d1..bd4d675 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -692,6 +692,35 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 conn1.close();
 }
 
+@Test
+public void testLocalIndexSelfJoin() throws Exception {
+  String tableName = generateUniqueName();
+  String indexName = "IDX_" + generateUniqueName();
+  Connection conn1 = DriverManager.getConnection(getUrl());
+  if (isNamespaceMapped) {
+  conn1.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + 
schemaName);
+  }
+String ddl =
+"CREATE TABLE "
++ tableName
++ " (customer_id integer primary key, postal_code 
varchar, country_code varchar)";
+conn1.createStatement().execute(ddl);
+conn1.createStatement().execute("UPSERT INTO " + tableName + " 
values(1,'560103','IN')");
+conn1.commit();
+conn1.createStatement().execute(
+"CREATE LOCAL INDEX " + indexName + " ON " + tableName + 
"(postal_code)");
+ResultSet rs =
+conn1.createStatement()
+.executeQuery(
+"SELECT * from "
++ tableName
++ " c1, "
++ tableName
++ " c2 where c1.customer_id=c2.customer_id 
and c2.postal_code='560103'");
+assertTrue(rs.next());
+conn1.close();
+}
+
 private void copyLocalIndexHFiles(Configuration conf, RegionInfo 
fromRegion, RegionInfo toRegion, boolean move)
 throws IOException {
 Path root = FSUtils.getRootDir(conf);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9a818dd4/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
index 9daa744..077e1af 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
@@ -376,7 +376,7 @@ public class ExpressionCompiler extends 
UnsupportedAllParseNodeVisitorhttp://git-wip-us.apache.org/repos/asf/phoenix/blob/9a818dd4/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
index 36bfc5f..880fa72 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
@@ -869,7 +869,7 @@ public class JoinCompiler {
 if (columnRef.getTableRef().equals(tableRef)
 &

phoenix git commit: PHOENIX-4874 psql doesn't support date/time with values smaller than milliseconds(Rajeshbabu)

2018-09-23 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.4 b55678229 -> d003c324b


PHOENIX-4874 psql doesn't support date/time with values smaller than 
milliseconds(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d003c324
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d003c324
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d003c324

Branch: refs/heads/4.x-HBase-1.4
Commit: d003c324b7a9399b59209c4faafc3746525ec03c
Parents: b556782
Author: Rajeshbabu Chintaguntla 
Authored: Mon Sep 24 05:11:21 2018 +0530
Committer: Rajeshbabu Chintaguntla 
Committed: Mon Sep 24 05:11:21 2018 +0530

--
 .../phoenix/util/csv/CsvUpsertExecutor.java | 20 +---
 .../phoenix/util/json/JsonUpsertExecutor.java   |  3 ++
 .../util/AbstractUpsertExecutorTest.java| 51 +++-
 3 files changed, 54 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d003c324/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
index 4f98ada..0b5881f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.util.csv;
 import java.sql.Connection;
 import java.sql.PreparedStatement;
 import java.sql.SQLException;
+import java.sql.Timestamp;
 import java.sql.Types;
 import java.util.Base64;
 import java.util.List;
@@ -30,6 +31,7 @@ import javax.annotation.Nullable;
 import org.apache.commons.csv.CSVRecord;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.expression.function.EncodeFormat;
+import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.IllegalDataException;
@@ -41,6 +43,7 @@ import org.apache.phoenix.schema.types.PTimestamp;
 import org.apache.phoenix.schema.types.PVarbinary;
 import org.apache.phoenix.util.ColumnInfo;
 import org.apache.phoenix.util.DateUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.UpsertExecutor;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -125,9 +128,9 @@ public class CsvUpsertExecutor extends 
UpsertExecutor {
 private final String binaryEncoding;
 
 SimpleDatatypeConversionFunction(PDataType dataType, Connection conn) {
-Properties props;
+ReadOnlyProps props;
 try {
-props = conn.getClientInfo();
+props = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getProps();
 } catch (SQLException e) {
 throw new RuntimeException(e);
 }
@@ -139,23 +142,23 @@ public class CsvUpsertExecutor extends 
UpsertExecutor {
 String dateFormat;
 int dateSqlType = dataType.getResultSetSqlType();
 if (dateSqlType == Types.DATE) {
-dateFormat = 
props.getProperty(QueryServices.DATE_FORMAT_ATTRIB,
+dateFormat = props.get(QueryServices.DATE_FORMAT_ATTRIB,
 DateUtil.DEFAULT_DATE_FORMAT);
 } else if (dateSqlType == Types.TIME) {
-dateFormat = 
props.getProperty(QueryServices.TIME_FORMAT_ATTRIB,
+dateFormat = props.get(QueryServices.TIME_FORMAT_ATTRIB,
 DateUtil.DEFAULT_TIME_FORMAT);
 } else {
-dateFormat = 
props.getProperty(QueryServices.TIMESTAMP_FORMAT_ATTRIB,
+dateFormat = 
props.get(QueryServices.TIMESTAMP_FORMAT_ATTRIB,
 DateUtil.DEFAULT_TIMESTAMP_FORMAT);

 }
-String timeZoneId = 
props.getProperty(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB,
+String timeZoneId = 
props.get(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB,
 QueryServicesOptions.DEFAULT_DATE_FORMAT_TIMEZONE);
 this.dateTimeParser = DateUtil.getDateTimeParser(dateFormat, 
dataType, timeZoneId);
 } else {
 this.dateTimeParser = null;
 }
 this.codec = codec;
-this.binaryEncoding = 
props.getProperty(QueryServices.UPLOAD_BINARY_DATA_TYPE_ENCODING,
+this.binaryEncoding = 
props.get(QueryServices.UPLOAD_BINARY_DATA_TYPE_ENCOD

phoenix git commit: PHOENIX-4874 psql doesn't support date/time with values smaller than milliseconds(Rajeshbabu)

2018-09-23 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 e5379cd1d -> 04931f1f8


PHOENIX-4874 psql doesn't support date/time with values smaller than 
milliseconds(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/04931f1f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/04931f1f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/04931f1f

Branch: refs/heads/4.x-HBase-1.3
Commit: 04931f1f8aa7017e535b1030e8f25317014f86be
Parents: e5379cd
Author: Rajeshbabu Chintaguntla 
Authored: Mon Sep 24 05:10:37 2018 +0530
Committer: Rajeshbabu Chintaguntla 
Committed: Mon Sep 24 05:10:37 2018 +0530

--
 .../phoenix/util/csv/CsvUpsertExecutor.java | 20 +---
 .../phoenix/util/json/JsonUpsertExecutor.java   |  3 ++
 .../util/AbstractUpsertExecutorTest.java| 51 +++-
 3 files changed, 54 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/04931f1f/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
index 4f98ada..0b5881f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.util.csv;
 import java.sql.Connection;
 import java.sql.PreparedStatement;
 import java.sql.SQLException;
+import java.sql.Timestamp;
 import java.sql.Types;
 import java.util.Base64;
 import java.util.List;
@@ -30,6 +31,7 @@ import javax.annotation.Nullable;
 import org.apache.commons.csv.CSVRecord;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.expression.function.EncodeFormat;
+import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.IllegalDataException;
@@ -41,6 +43,7 @@ import org.apache.phoenix.schema.types.PTimestamp;
 import org.apache.phoenix.schema.types.PVarbinary;
 import org.apache.phoenix.util.ColumnInfo;
 import org.apache.phoenix.util.DateUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.UpsertExecutor;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -125,9 +128,9 @@ public class CsvUpsertExecutor extends 
UpsertExecutor {
 private final String binaryEncoding;
 
 SimpleDatatypeConversionFunction(PDataType dataType, Connection conn) {
-Properties props;
+ReadOnlyProps props;
 try {
-props = conn.getClientInfo();
+props = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getProps();
 } catch (SQLException e) {
 throw new RuntimeException(e);
 }
@@ -139,23 +142,23 @@ public class CsvUpsertExecutor extends 
UpsertExecutor {
 String dateFormat;
 int dateSqlType = dataType.getResultSetSqlType();
 if (dateSqlType == Types.DATE) {
-dateFormat = 
props.getProperty(QueryServices.DATE_FORMAT_ATTRIB,
+dateFormat = props.get(QueryServices.DATE_FORMAT_ATTRIB,
 DateUtil.DEFAULT_DATE_FORMAT);
 } else if (dateSqlType == Types.TIME) {
-dateFormat = 
props.getProperty(QueryServices.TIME_FORMAT_ATTRIB,
+dateFormat = props.get(QueryServices.TIME_FORMAT_ATTRIB,
 DateUtil.DEFAULT_TIME_FORMAT);
 } else {
-dateFormat = 
props.getProperty(QueryServices.TIMESTAMP_FORMAT_ATTRIB,
+dateFormat = 
props.get(QueryServices.TIMESTAMP_FORMAT_ATTRIB,
 DateUtil.DEFAULT_TIMESTAMP_FORMAT);

 }
-String timeZoneId = 
props.getProperty(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB,
+String timeZoneId = 
props.get(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB,
 QueryServicesOptions.DEFAULT_DATE_FORMAT_TIMEZONE);
 this.dateTimeParser = DateUtil.getDateTimeParser(dateFormat, 
dataType, timeZoneId);
 } else {
 this.dateTimeParser = null;
 }
 this.codec = codec;
-this.binaryEncoding = 
props.getProperty(QueryServices.UPLOAD_BINARY_DATA_TYPE_ENCODING,
+this.binaryEncoding = 
props.get(QueryServices.UPLOAD_BINARY_DATA_TYPE_ENCOD

phoenix git commit: PHOENIX-4874 psql doesn't support date/time with values smaller than milliseconds(Rajeshbabu)

2018-09-23 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 91576cdd5 -> ab0b2f89e


PHOENIX-4874 psql doesn't support date/time with values smaller than 
milliseconds(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ab0b2f89
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ab0b2f89
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ab0b2f89

Branch: refs/heads/4.x-HBase-1.2
Commit: ab0b2f89e8b40d24e33abbd8b43e4959c51df555
Parents: 91576cd
Author: Rajeshbabu Chintaguntla 
Authored: Mon Sep 24 05:09:31 2018 +0530
Committer: Rajeshbabu Chintaguntla 
Committed: Mon Sep 24 05:09:31 2018 +0530

--
 .../phoenix/util/csv/CsvUpsertExecutor.java | 20 +---
 .../phoenix/util/json/JsonUpsertExecutor.java   |  3 ++
 .../util/AbstractUpsertExecutorTest.java| 51 +++-
 3 files changed, 54 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ab0b2f89/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
index 4f98ada..0b5881f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.util.csv;
 import java.sql.Connection;
 import java.sql.PreparedStatement;
 import java.sql.SQLException;
+import java.sql.Timestamp;
 import java.sql.Types;
 import java.util.Base64;
 import java.util.List;
@@ -30,6 +31,7 @@ import javax.annotation.Nullable;
 import org.apache.commons.csv.CSVRecord;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.expression.function.EncodeFormat;
+import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.IllegalDataException;
@@ -41,6 +43,7 @@ import org.apache.phoenix.schema.types.PTimestamp;
 import org.apache.phoenix.schema.types.PVarbinary;
 import org.apache.phoenix.util.ColumnInfo;
 import org.apache.phoenix.util.DateUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.UpsertExecutor;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -125,9 +128,9 @@ public class CsvUpsertExecutor extends 
UpsertExecutor {
 private final String binaryEncoding;
 
 SimpleDatatypeConversionFunction(PDataType dataType, Connection conn) {
-Properties props;
+ReadOnlyProps props;
 try {
-props = conn.getClientInfo();
+props = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getProps();
 } catch (SQLException e) {
 throw new RuntimeException(e);
 }
@@ -139,23 +142,23 @@ public class CsvUpsertExecutor extends 
UpsertExecutor {
 String dateFormat;
 int dateSqlType = dataType.getResultSetSqlType();
 if (dateSqlType == Types.DATE) {
-dateFormat = 
props.getProperty(QueryServices.DATE_FORMAT_ATTRIB,
+dateFormat = props.get(QueryServices.DATE_FORMAT_ATTRIB,
 DateUtil.DEFAULT_DATE_FORMAT);
 } else if (dateSqlType == Types.TIME) {
-dateFormat = 
props.getProperty(QueryServices.TIME_FORMAT_ATTRIB,
+dateFormat = props.get(QueryServices.TIME_FORMAT_ATTRIB,
 DateUtil.DEFAULT_TIME_FORMAT);
 } else {
-dateFormat = 
props.getProperty(QueryServices.TIMESTAMP_FORMAT_ATTRIB,
+dateFormat = 
props.get(QueryServices.TIMESTAMP_FORMAT_ATTRIB,
 DateUtil.DEFAULT_TIMESTAMP_FORMAT);

 }
-String timeZoneId = 
props.getProperty(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB,
+String timeZoneId = 
props.get(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB,
 QueryServicesOptions.DEFAULT_DATE_FORMAT_TIMEZONE);
 this.dateTimeParser = DateUtil.getDateTimeParser(dateFormat, 
dataType, timeZoneId);
 } else {
 this.dateTimeParser = null;
 }
 this.codec = codec;
-this.binaryEncoding = 
props.getProperty(QueryServices.UPLOAD_BINARY_DATA_TYPE_ENCODING,
+this.binaryEncoding = 
props.get(QueryServices.UPLOAD_BINARY_DATA_TYPE_ENCOD

phoenix git commit: PHOENIX-4874 psql doesn't support date/time with values smaller than milliseconds(Rajeshbabu)

2018-09-23 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/master 4e8c9ab0c -> 34b8fe86b


PHOENIX-4874 psql doesn't support date/time with values smaller than 
milliseconds(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/34b8fe86
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/34b8fe86
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/34b8fe86

Branch: refs/heads/master
Commit: 34b8fe86b40f6cc2a05395640044e9dd7e1a1a8f
Parents: 4e8c9ab
Author: Rajeshbabu Chintaguntla 
Authored: Mon Sep 24 04:48:35 2018 +0530
Committer: Rajeshbabu Chintaguntla 
Committed: Mon Sep 24 04:48:35 2018 +0530

--
 .../phoenix/util/csv/CsvUpsertExecutor.java | 20 +---
 .../phoenix/util/json/JsonUpsertExecutor.java   |  3 ++
 .../util/AbstractUpsertExecutorTest.java| 51 +++-
 3 files changed, 54 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/34b8fe86/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
index 4f98ada..0b5881f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.util.csv;
 import java.sql.Connection;
 import java.sql.PreparedStatement;
 import java.sql.SQLException;
+import java.sql.Timestamp;
 import java.sql.Types;
 import java.util.Base64;
 import java.util.List;
@@ -30,6 +31,7 @@ import javax.annotation.Nullable;
 import org.apache.commons.csv.CSVRecord;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.expression.function.EncodeFormat;
+import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.IllegalDataException;
@@ -41,6 +43,7 @@ import org.apache.phoenix.schema.types.PTimestamp;
 import org.apache.phoenix.schema.types.PVarbinary;
 import org.apache.phoenix.util.ColumnInfo;
 import org.apache.phoenix.util.DateUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.UpsertExecutor;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -125,9 +128,9 @@ public class CsvUpsertExecutor extends 
UpsertExecutor {
 private final String binaryEncoding;
 
 SimpleDatatypeConversionFunction(PDataType dataType, Connection conn) {
-Properties props;
+ReadOnlyProps props;
 try {
-props = conn.getClientInfo();
+props = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getProps();
 } catch (SQLException e) {
 throw new RuntimeException(e);
 }
@@ -139,23 +142,23 @@ public class CsvUpsertExecutor extends 
UpsertExecutor {
 String dateFormat;
 int dateSqlType = dataType.getResultSetSqlType();
 if (dateSqlType == Types.DATE) {
-dateFormat = 
props.getProperty(QueryServices.DATE_FORMAT_ATTRIB,
+dateFormat = props.get(QueryServices.DATE_FORMAT_ATTRIB,
 DateUtil.DEFAULT_DATE_FORMAT);
 } else if (dateSqlType == Types.TIME) {
-dateFormat = 
props.getProperty(QueryServices.TIME_FORMAT_ATTRIB,
+dateFormat = props.get(QueryServices.TIME_FORMAT_ATTRIB,
 DateUtil.DEFAULT_TIME_FORMAT);
 } else {
-dateFormat = 
props.getProperty(QueryServices.TIMESTAMP_FORMAT_ATTRIB,
+dateFormat = 
props.get(QueryServices.TIMESTAMP_FORMAT_ATTRIB,
 DateUtil.DEFAULT_TIMESTAMP_FORMAT);

 }
-String timeZoneId = 
props.getProperty(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB,
+String timeZoneId = 
props.get(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB,
 QueryServicesOptions.DEFAULT_DATE_FORMAT_TIMEZONE);
 this.dateTimeParser = DateUtil.getDateTimeParser(dateFormat, 
dataType, timeZoneId);
 } else {
 this.dateTimeParser = null;
 }
 this.codec = codec;
-this.binaryEncoding = 
props.getProperty(QueryServices.UPLOAD_BINARY_DATA_TYPE_ENCODING,
+this.binaryEncoding = 
props.get(QueryServices.UPLOAD_BINARY_DATA_TYPE_ENCOD

svn commit: r1836126 - in /phoenix/site: publish/index.html publish/language/datatypes.html publish/language/functions.html publish/language/index.html publish/news.html source/src/site/markdown/index

2018-07-17 Thread rajeshbabu
Author: rajeshbabu
Date: Tue Jul 17 19:35:10 2018
New Revision: 1836126

URL: http://svn.apache.org/viewvc?rev=1836126=rev
Log:
Updating 5.0.0 release announcement(Rajeshbabu)

Modified:
phoenix/site/publish/index.html
phoenix/site/publish/language/datatypes.html
phoenix/site/publish/language/functions.html
phoenix/site/publish/language/index.html
phoenix/site/publish/news.html
phoenix/site/source/src/site/markdown/index.md
phoenix/site/source/src/site/markdown/news.md

Modified: phoenix/site/publish/index.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/index.html?rev=1836126=1836125=1836126=diff
==
--- phoenix/site/publish/index.html (original)
+++ phoenix/site/publish/index.html Tue Jul 17 19:35:10 2018
@@ -1,7 +1,7 @@
 
 
 
 
@@ -197,7 +197,7 @@
   
  
 
-News: PhoenixCon 2018 CFP is open. See 
the following page for more details PhoenixCon2018
 https://twitter.com/ApachePhoenix;> 
+News: Phoenix next major release 5.0.0 
has been released and is available for download here  
  https://twitter.com/ApachePhoenix;> 

   
  

Modified: phoenix/site/publish/language/datatypes.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/language/datatypes.html?rev=1836126=1836125=1836126=diff
==
--- phoenix/site/publish/language/datatypes.html (original)
+++ phoenix/site/publish/language/datatypes.html Tue Jul 17 19:35:10 2018
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/language/functions.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/language/functions.html?rev=1836126=1836125=1836126=diff
==
--- phoenix/site/publish/language/functions.html (original)
+++ phoenix/site/publish/language/functions.html Tue Jul 17 19:35:10 2018
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/language/index.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/language/index.html?rev=1836126=1836125=1836126=diff
==
--- phoenix/site/publish/language/index.html (original)
+++ phoenix/site/publish/language/index.html Tue Jul 17 19:35:10 2018
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/news.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/news.html?rev=1836126=1836125=1836126=diff
==
--- phoenix/site/publish/news.html (original)
+++ phoenix/site/publish/news.html Tue Jul 17 19:35:10 2018
@@ -1,7 +1,7 @@
 
 
 
 
@@ -171,6 +171,10 @@
  
   

+   https://blogs.apache.org/phoenix/entry/apache-phoenix-releases-next-major;>Announcing
 Phoenix 5.0.0 released (July 4, 2018) 
+
+   
+   
https://phoenix.apache.org/phoenixcon-2018;>PhoenixCon 2018 announced for 
June 18th, 2018 (March 24, 2018) 
 


Modified: phoenix/site/source/src/site/markdown/index.md
URL: 
http://svn.apache.org/viewvc/phoenix/site/source/src/site/markdown/index.md?rev=1836126=1836125=1836126=diff
==
--- phoenix/site/source/src/site/markdown/index.md (original)
+++ phoenix/site/source/src/site/markdown/index.md Tue Jul 17 19:35:10 2018
@@ -45,7 +45,7 @@
 
 
 News:
-PhoenixCon 2018 CFP is open. See the following page for 
more details PhoenixCon2018
 
+Phoenix next major release 5.0.0 has been released and 
is available for download here  
  
 
 ## Overview
 Apache Phoenix enables OLTP and operational analytics in Hadoop for low 
latency applications by combining the best of both worlds:

Modified: phoenix/site/source/src/site/markdown/news.md
URL: 
http://svn.apache.org/viewvc/phoenix/site/source/src/site/markdown/news.md?rev=1836126=1836125=1836126=diff
==
--- phoenix/site/source/src/site/markdown/news.md (original)
+++ phoenix/site/source/src/site/markdown/news.md Tue Jul 17 19:35:10 2018
@@ -1,6 +1,8 @@
 # Apache Phoenix News
 
 
+ [Announcing Phoenix 5.0.0 
released](https://blogs.apache.org/phoenix/entry/apache-phoenix-releases-next-major)
 (July 4, 2018)
+
  [PhoenixCon 2018 announced for June 18th, 
2018](https://phoenix.apache.org/phoenixcon-2018) (March 24, 2018)
 
  [Announcing CDH-compatible Phoenix 4.13.2 
released](https://blogs.apache.org/phoenix/entry/announcing-cdh-compatible-phoenix-4)
 (January 22, 2018)




svn commit: r1835864 - in /phoenix/site: publish/download.html publish/language/datatypes.html publish/language/functions.html publish/language/index.html source/src/site/markdown/download.md

2018-07-13 Thread rajeshbabu
Author: rajeshbabu
Date: Fri Jul 13 23:54:14 2018
New Revision: 1835864

URL: http://svn.apache.org/viewvc?rev=1835864=rev
Log:
Added 5.0.0-HBase-2.0 release to download page

Modified:
phoenix/site/publish/download.html
phoenix/site/publish/language/datatypes.html
phoenix/site/publish/language/functions.html
phoenix/site/publish/language/index.html
phoenix/site/source/src/site/markdown/download.md

Modified: phoenix/site/publish/download.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/download.html?rev=1835864=1835863=1835864=diff
==
--- phoenix/site/publish/download.html (original)
+++ phoenix/site/publish/download.html Fri Jul 13 23:54:14 2018
@@ -1,7 +1,7 @@
 
 
 
 
@@ -166,7 +166,7 @@
  Phoenix Downloads
  
 The below table lists mirrored release artifacts and their associated 
hashes and signatures available ONLY at apache.org. The keys used to sign 
releases can be found in our published https://www.apache.org/dist/phoenix/KEYS;>KEYS file. See our 
installation instructions here, our release 
notes here, and a list of fixes and new 
features https://issues.apache.org/jira/secure/ReleaseNote.jspa?version=12334393projectId=12315120;>here.
 Follow https://www.apache.org/dyn/closer.cgi#verify;>Verify the Integrity of the 
Files for how to verify your mirrored downloads. 
-Current release 4.14 can run on Apache HBase 0.98, 1.1, 1.2, 1.3 and 1.4 
and CDH HBase 5.11, 5.12, 5.13 and 5.14. Apache HBase 2.0 is supported by 
5.0.0-alpha. Please follow the appropriate link depending on your HBase 
version.  
+Current release 4.14 can run on Apache HBase 0.98, 1.1, 1.2, 1.3 and 1.4 
and CDH HBase 5.11, 5.12, 5.13 and 5.14. Apache HBase 2.0 is supported by 
5.0.0. Please follow the appropriate link depending on your HBase version.  
  
   

@@ -182,6 +182,7 @@
  
 
 //<![CDATA[
+addRelease('5.0.0-HBase-2.0','04/jul/2018');
 addRelease('4.14.0-HBase-1.4','09/jun/2018');
 addRelease('4.14.0-HBase-1.3','09/jun/2018');
 addRelease('4.14.0-HBase-1.2','09/jun/2018');

Modified: phoenix/site/publish/language/datatypes.html
URL: 
<a  rel="nofollow" href="http://svn.apache.org/viewvc/phoenix/site/publish/language/datatypes.html?rev=1835864&r1=1835863&r2=1835864&view=diff">http://svn.apache.org/viewvc/phoenix/site/publish/language/datatypes.html?rev=1835864&r1=1835863&r2=1835864&view=diff</a>
==
--- phoenix/site/publish/language/datatypes.html (original)
+++ phoenix/site/publish/language/datatypes.html Fri Jul 13 23:54:14 2018
@@ -1,7 +1,7 @@
 
 <!DOCTYPE html>
 <!--
- Generated by Apache Maven Doxia at 2018-06-28
+ Generated by Apache Maven Doxia at 2018-07-14
  Rendered using Reflow Maven Skin 1.1.0 
(<a  rel="nofollow" href="http://andriusvelykis.github.io/reflow-maven-skin">http://andriusvelykis.github.io/reflow-maven-skin</a>)
 -->
 <html  xml:lang="en" lang="en">

Modified: phoenix/site/publish/language/functions.html
URL: 
<a  rel="nofollow" href="http://svn.apache.org/viewvc/phoenix/site/publish/language/functions.html?rev=1835864&r1=1835863&r2=1835864&view=diff">http://svn.apache.org/viewvc/phoenix/site/publish/language/functions.html?rev=1835864&r1=1835863&r2=1835864&view=diff</a>
==
--- phoenix/site/publish/language/functions.html (original)
+++ phoenix/site/publish/language/functions.html Fri Jul 13 23:54:14 2018
@@ -1,7 +1,7 @@
 
 <!DOCTYPE html>
 <!--
- Generated by Apache Maven Doxia at 2018-06-28
+ Generated by Apache Maven Doxia at 2018-07-14
  Rendered using Reflow Maven Skin 1.1.0 
(<a  rel="nofollow" href="http://andriusvelykis.github.io/reflow-maven-skin">http://andriusvelykis.github.io/reflow-maven-skin</a>)
 -->
 <html  xml:lang="en" lang="en">

Modified: phoenix/site/publish/language/index.html
URL: 
<a  rel="nofollow" href="http://svn.apache.org/viewvc/phoenix/site/publish/language/index.html?rev=1835864&r1=1835863&r2=1835864&view=diff">http://svn.apache.org/viewvc/phoenix/site/publish/language/index.html?rev=1835864&r1=1835863&r2=1835864&view=diff</a>
==
--- phoenix/site/publish/language/index.html (original)
+++ phoenix/site/publish/language/index.html Fri Jul 13 23:54:14 2018
@@ -1,7 +1,7 @@
 
 <!DOCTYPE html>
 <!--
- Generated by Apache Maven Doxia at 2018-06-28
+ Generated by Apache Maven Doxia at 2018-07-14
  Rendered using Reflow Maven Skin 1.1.0 
(<a  rel="nofollow" href="http://andriusvelykis.github.io/reflow-ma

[3/3] phoenix git commit: Changed version to 5.1.0-HBase-2.0-SNAPSHOT

2018-07-03 Thread rajeshbabu
Changed version to 5.1.0-HBase-2.0-SNAPSHOT


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ae234304
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ae234304
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ae234304

Branch: refs/heads/5.x-HBase-2.0
Commit: ae2343042e022e24a69cf7754c63844af49085d3
Parents: 8a819c6
Author: Rajeshbabu Chintaguntla 
Authored: Wed Jul 4 07:50:39 2018 +0530
Committer: Rajeshbabu Chintaguntla 
Committed: Wed Jul 4 07:50:39 2018 +0530

--
 phoenix-assembly/pom.xml   | 2 +-
 phoenix-client/pom.xml | 2 +-
 phoenix-core/pom.xml   | 2 +-
 phoenix-flume/pom.xml  | 2 +-
 phoenix-hive/pom.xml   | 2 +-
 phoenix-kafka/pom.xml  | 2 +-
 phoenix-load-balancer/pom.xml  | 2 +-
 phoenix-pherf/pom.xml  | 2 +-
 phoenix-pig/pom.xml| 2 +-
 phoenix-queryserver-client/pom.xml | 2 +-
 phoenix-queryserver/pom.xml| 2 +-
 phoenix-server/pom.xml | 2 +-
 phoenix-spark/pom.xml  | 2 +-
 phoenix-tracing-webapp/pom.xml | 2 +-
 pom.xml| 2 +-
 15 files changed, 15 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ae234304/phoenix-assembly/pom.xml
--
diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index 77b1b83..5cbe55f 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-5.0.0-HBase-2.0
+5.1.0-HBase-2.0-SNAPSHOT
   
   phoenix-assembly
   Phoenix Assembly

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ae234304/phoenix-client/pom.xml
--
diff --git a/phoenix-client/pom.xml b/phoenix-client/pom.xml
index 7b53483..6189bba 100644
--- a/phoenix-client/pom.xml
+++ b/phoenix-client/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-5.0.0-HBase-2.0
+5.1.0-HBase-2.0-SNAPSHOT
   
   phoenix-client
   Phoenix Client

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ae234304/phoenix-core/pom.xml
--
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 96610fd..4065564 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -4,7 +4,7 @@
   
 org.apache.phoenix
 phoenix
-5.0.0-HBase-2.0
+5.1.0-HBase-2.0-SNAPSHOT
   
   phoenix-core
   Phoenix Core

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ae234304/phoenix-flume/pom.xml
--
diff --git a/phoenix-flume/pom.xml b/phoenix-flume/pom.xml
index affd5ce..1d66c90 100644
--- a/phoenix-flume/pom.xml
+++ b/phoenix-flume/pom.xml
@@ -26,7 +26,7 @@
   
 org.apache.phoenix
 phoenix
-5.0.0-HBase-2.0
+5.1.0-HBase-2.0-SNAPSHOT
   
   phoenix-flume
   Phoenix - Flume

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ae234304/phoenix-hive/pom.xml
--
diff --git a/phoenix-hive/pom.xml b/phoenix-hive/pom.xml
index d2c995a..08ad855 100644
--- a/phoenix-hive/pom.xml
+++ b/phoenix-hive/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-5.0.0-HBase-2.0
+5.1.0-HBase-2.0-SNAPSHOT
   
   phoenix-hive
   Phoenix - Hive

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ae234304/phoenix-kafka/pom.xml
--
diff --git a/phoenix-kafka/pom.xml b/phoenix-kafka/pom.xml
index 8abf6fe..cde8b8b 100644
--- a/phoenix-kafka/pom.xml
+++ b/phoenix-kafka/pom.xml
@@ -26,7 +26,7 @@

org.apache.phoenix
phoenix
-   5.0.0-HBase-2.0
+   5.1.0-HBase-2.0-SNAPSHOT

phoenix-kafka
Phoenix - Kafka

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ae234304/phoenix-load-balancer/pom.xml
--
diff --git a/phoenix-load-balancer/pom.xml b/phoenix-load-balancer/pom.xml
index 53a370e..4789c14 100644
--- a/phoenix-load-balancer/pom.xml
+++ b/phoenix-load-balancer/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-5.0.0-HBase-2.0
+5.1.0-HBase-2.0-SNAPSHOT
   
   phoenix-load-balancer
   Phoenix Load Balancer

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ae234304/phoenix-pherf/pom.xml
--
diff --git a/phoenix-pherf/pom.xml b/phoenix-pherf/pom.xml
index b5d5631..b5880f1 100644
--- a/phoenix-pherf/pom.xml
+++ b/phoenix-pherf/pom.xml
@@ -15,7 +15,7

[2/3] phoenix git commit: Updating KEYS for rajeshb...@apache.org

2018-07-03 Thread rajeshbabu
Updating KEYS for rajeshb...@apache.org


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8a819c6c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8a819c6c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8a819c6c

Branch: refs/heads/5.x-HBase-2.0
Commit: 8a819c6c3b4befce190c6ac759f744df511de61d
Parents: 479fab0
Author: Rajeshbabu Chintaguntla 
Authored: Tue Jun 26 10:34:08 2018 -0700
Committer: Rajeshbabu Chintaguntla 
Committed: Tue Jun 26 10:34:08 2018 -0700

--
 KEYS | 58 ++
 1 file changed, 58 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/8a819c6c/KEYS
--
diff --git a/KEYS b/KEYS
index dca2b38..ce3fcae 100644
--- a/KEYS
+++ b/KEYS
@@ -129,3 +129,61 @@ 
b4Ki2TbCKOPwRYX6+b2vmmOUlZ28yjeAhzHKXS9lh7nY2T+cv5cwkPZ1uw2AKG55
 pcl2PvSVaZeoTjguW8BqFjkEzA==
 =p/IB
 -END PGP PUBLIC KEY BLOCK-
+pub   4096R/AAEDBD7B 2018-06-11
+uid  Rajeshbabu Chintaguntla (CODE SIGNING KEY) 

+sig 3AAEDBD7B 2018-06-11  Rajeshbabu Chintaguntla (CODE SIGNING KEY) 

+sub   4096R/2A1817DB 2018-06-11
+sig  AAEDBD7B 2018-06-11  Rajeshbabu Chintaguntla (CODE SIGNING KEY) 

+
+-BEGIN PGP PUBLIC KEY BLOCK-
+Version: GnuPG v1
+
+mQINBFseLj8BEADgUf2qoeYAE3MzcxEDysTm0fo/qtHJXTvZexVi+w/Xg86yvSuw
+WzChpYY8Z04cY+ruXRmwfvsfH//YlquJr02uHavojeqKsOW2kV03czh16ktNWwtS
+v7OIU3RzCFt67D2wtMJzbiA9l1WJyzvOx7xnV+uovUNsURYc42YEnwgMJPUQUz0D
+4x8SBKBmL31DELZ72PdgsMSNs7xBrvsXqybDlWkFFkTZWQ6ZwGE6L22ddmOT4N8e
+0FMxIJRrCPn7xMKF2xUEE05sKw+lxLbCO38gyc42AeEVhP+qCvG3E1ZhvqNe/l1y
+LHm4vBNxmR9bgXHf2BvmSBKdGeI2oHM7BQzZ071nH3PHnwr3ksTvNpqW9FkgzjEp
+02fv/n9iANp4ZYCphOdm1Ea9iGKvn0M44seoktT2Gh0Eof3hNOsHJcOCmU494w4I
+77zeCFHbLEVpCMNDLzHccmBBD5FNoOQRQouTuy8aWXsZeRUQrzLw3fF5KcxEtTaI
+SzYKmbGJayeYDpLDvZcU7KZyKm3X+bHWWXlU4sG37hCDfEcQjSwJBXyDmWMpqw7R
+FC6pFHCjIXofoNFdY6nMOeQ89B8hSCzgBb5Dh8hxwsGIjSyNF63reJ64EAKAi69s
+EAmWx1f3ivi54ZSHhb2mQ/vfdy84xUk8RwYF6WBNmR3I+DRQNKpKDvUxSwARAQAB
+tEJSYWplc2hiYWJ1IENoaW50YWd1bnRsYSAoQ09ERSBTSUdOSU5HIEtFWSkgPHJh
+amVzaGJhYnVAYXBhY2hlLm9yZz6JAjgEEwECACIFAlseLj8CGwMGCwkIBwMCBhUI
+AgkKCwQWAgMBAh4BAheAAAoJEDGP2Guq7b1774QQAN2//IV3qQNGoGU2Ihvo3F3n
+6YkW8y9qy3+Gz17mNhcBgtxZitf8xdch5JqDh/TU/LpV4z9guxnM3dNuwvZ3tbIw
+qog0UTDGoeCe4YokRI0kl19a1rn575PQyYu6TEEJrgTKA/yzz8Ck2UbEJ6gJMkxj
+DW5EXFm0QN5Eg+NMrsxcDuf+9hFJ1LagFXZdhPfI9+XzuDDXgZaTXxhd8QQu0d/d
+dcrEc1Pnk65CdDHHezdtMvm0WTw+staTO6/bF6QonM+qgpsZaPxtnU3/rMamGT5k
+rRG+STpLDdz4EUkk5+ZMSJei7glxv4Nx+16yi2YoAtRgoxyjPKBG8v6GCSWXu2K+
+LYoUZmQsnUGgvH2E8V/7XAh68H4/YSTnqhn8TEq5771aya9PzhhvHipSHkWxIkR2
+UjmnJjTkBCXkM4ju4hc/QHFa2J/tvzcAx0WRCUqioE6i/JI72C+nmgRZ4TUYy/nS
+tpAH4MSlWCwEWHEKDs85rKIDTL8qAi4X4HAreSsZp8+igYsr0zkhLjAhaJB6qMpB
+oek0Ke7O1L4m5AEzMayJzMdqdSKK4X3rzW8Qu7Wc1e+oN3BXEWfdGOYB3Mrssfhp
+ZvJEHrTwuMBtb2bAdoiCG5Q/i6mozfzfYaGHd5i7J2ujDdC9Qie/lRyQlwryy1gb
+hxGRAScz2xfMa/XNCawCuQINBFseLj8BEAChyrxmBZsR/gKi0Wj6TaLGENMf2mT7
+hiVTnKYU0yAgA1LsDGMVyF+dzwNgLL600LLqrqFMTVqrOiEF3hKLAQ3cjxSE7rwg
+0X02ZCOX69Y3+7/xOAMUT93Aqk1WZYUN28uCGO+6Y7Q4oT/V8OZbixSzaIAJ3jyI
+jGElDaw+VMYydMXU7Z9c4rhIjjEE3AkKtlB5KGAKHTUYi8GiJhqrmy02jhtHSXT/
+AwAIIxT8gZ5Rgx4MErIjGn7fp+fvbMYK0FbcbreO7flyls6dHwWAGcI5VbxWCCZu
+5YZlpo8z/iYkGLB2QV/gtzp7Wm4lZkMX61j+PTpzNO7I6rRQckhyeFF1ZcPK73ey
+EmjwWBmo7K/iMPWCYFrU7/ybdXf+TDzuyUYosJUSqG8cIERHBIQc5E4TsdNd3hJR
+TPNF9YzXv/iJK9PDO962zLS3cP373/QCiU+Q8rAhqfrX9Mygobsvc1PopPVn66hc
+BzwUUuLUY/5du2/hh8BC99BIa3BJ/wxCN7kc1UGdyyffoBB5gcnOC14r7SbJSVNU
+ymw1y8UXC2XQQyKFixUCsJfY94ZwqO7cmPxsWBc+DUtL7AP69ZkzYFmPlIwh0f+G
+l8/m4ai+tWnxIUhMfyvwyo2E4AJ+AGCfZwUdxB+uCD7AbTy/Sef7vv0zXGTk7brb
+am8myY/u0xaF8QARAQABiQIfBBgBAgAJBQJbHi4/AhsMAAoJEDGP2Guq7b17EZMQ
+AJAWU7hFSZP1tlwDziRN2FSSd+jHV59oBxzC1MONhT9c0VneodzER/NaK+6N2H7+
+CpQRV6ePzG3iUPj2Wtz8U/eX42Ia8OwnmLM0voJH529ZLPv5tVxqMV+UgbssDMWu
+nTeLw/pg4Vy8bec0HHPMwsmooQJQXu+e9/5DoRrepaGMFXu+BpKOQDN8Gvlq0BEA
+v9ojEbhRlxflV11VBq57bj1d+5F+JOKexqJGsbZvnY86j7hAY9BIKUKaA4HmhKX2
+vRjM6izxVA0ivJM5X/3qHMakjARrUFnbQkfh+dWQo5NZfQMp5m6uhQwwfhGIYhII
+iqk0+zhbgix9DfK7muLdEvE4TxAQO4sc8s/EtEmfHFWVtZA0mf+6dqU9WrigMEq3
+yA1KmQYUjXR+/iax60nd5SacdFKyehhFSAE7dC8Sqp/wT+fkgcYzUEzkiMVqx53n
+PXfiDPR3VzC0FwBLgXyJLf3B9lGuICia1wcNc4oKiA68kxc3EPbcYxLtVMKRTvBf
+dxN6Siuv6yJzuGJaj/C1fPGVwzbt/x0L7CQw8W5k3XzZlp7bCYyd7AXNZZOvAEQ9
+mE6f5bevE4Bock2Ee72bg391F7sf/bFXAouOiUgyoVHmW2ZSMfLWx6fP8Dq2AmAM
+hP9muXjqvjO7SxxLReObjt/gs3HtjFUnJzTX/TlAaMq9
+=rPAq
+-END PGP PUBLIC KEY BLOCK-



[1/3] phoenix git commit: Updating version to 5.0.0-HBase-2.0

2018-07-03 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/5.x-HBase-2.0 2d44700fa -> ae2343042


Updating version to 5.0.0-HBase-2.0


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/479fab07
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/479fab07
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/479fab07

Branch: refs/heads/5.x-HBase-2.0
Commit: 479fab07f5e5798bfafcc7aebbe93669d804b72b
Parents: 2d44700
Author: Rajeshbabu Chintaguntla 
Authored: Tue Jun 26 10:33:24 2018 -0700
Committer: Rajeshbabu Chintaguntla 
Committed: Tue Jun 26 10:33:24 2018 -0700

--
 phoenix-assembly/pom.xml   | 2 +-
 phoenix-client/pom.xml | 2 +-
 phoenix-core/pom.xml   | 2 +-
 phoenix-flume/pom.xml  | 2 +-
 phoenix-hive/pom.xml   | 2 +-
 phoenix-kafka/pom.xml  | 2 +-
 phoenix-load-balancer/pom.xml  | 2 +-
 phoenix-pherf/pom.xml  | 2 +-
 phoenix-pig/pom.xml| 2 +-
 phoenix-queryserver-client/pom.xml | 2 +-
 phoenix-queryserver/pom.xml| 2 +-
 phoenix-server/pom.xml | 2 +-
 phoenix-spark/pom.xml  | 2 +-
 phoenix-tracing-webapp/pom.xml | 2 +-
 pom.xml| 2 +-
 15 files changed, 15 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/479fab07/phoenix-assembly/pom.xml
--
diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index 7528ef9..77b1b83 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-5.0.0-SNAPSHOT
+5.0.0-HBase-2.0
   
   phoenix-assembly
   Phoenix Assembly

http://git-wip-us.apache.org/repos/asf/phoenix/blob/479fab07/phoenix-client/pom.xml
--
diff --git a/phoenix-client/pom.xml b/phoenix-client/pom.xml
index ca95e62..7b53483 100644
--- a/phoenix-client/pom.xml
+++ b/phoenix-client/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-5.0.0-SNAPSHOT
+5.0.0-HBase-2.0
   
   phoenix-client
   Phoenix Client

http://git-wip-us.apache.org/repos/asf/phoenix/blob/479fab07/phoenix-core/pom.xml
--
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 24e34f3..96610fd 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -4,7 +4,7 @@
   
 org.apache.phoenix
 phoenix
-5.0.0-SNAPSHOT
+5.0.0-HBase-2.0
   
   phoenix-core
   Phoenix Core

http://git-wip-us.apache.org/repos/asf/phoenix/blob/479fab07/phoenix-flume/pom.xml
--
diff --git a/phoenix-flume/pom.xml b/phoenix-flume/pom.xml
index 0c0386a..affd5ce 100644
--- a/phoenix-flume/pom.xml
+++ b/phoenix-flume/pom.xml
@@ -26,7 +26,7 @@
   
 org.apache.phoenix
 phoenix
-5.0.0-SNAPSHOT
+5.0.0-HBase-2.0
   
   phoenix-flume
   Phoenix - Flume

http://git-wip-us.apache.org/repos/asf/phoenix/blob/479fab07/phoenix-hive/pom.xml
--
diff --git a/phoenix-hive/pom.xml b/phoenix-hive/pom.xml
index 0bc582c..d2c995a 100644
--- a/phoenix-hive/pom.xml
+++ b/phoenix-hive/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-5.0.0-SNAPSHOT
+5.0.0-HBase-2.0
   
   phoenix-hive
   Phoenix - Hive

http://git-wip-us.apache.org/repos/asf/phoenix/blob/479fab07/phoenix-kafka/pom.xml
--
diff --git a/phoenix-kafka/pom.xml b/phoenix-kafka/pom.xml
index 98bb858..8abf6fe 100644
--- a/phoenix-kafka/pom.xml
+++ b/phoenix-kafka/pom.xml
@@ -26,7 +26,7 @@

org.apache.phoenix
phoenix
-   5.0.0-SNAPSHOT
+   5.0.0-HBase-2.0

phoenix-kafka
Phoenix - Kafka

http://git-wip-us.apache.org/repos/asf/phoenix/blob/479fab07/phoenix-load-balancer/pom.xml
--
diff --git a/phoenix-load-balancer/pom.xml b/phoenix-load-balancer/pom.xml
index 169833d..53a370e 100644
--- a/phoenix-load-balancer/pom.xml
+++ b/phoenix-load-balancer/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-5.0.0-SNAPSHOT
+5.0.0-HBase-2.0
   
   phoenix-load-balancer
   Phoenix Load Balancer

http://git-wip-us.apache.org/repos/asf/phoenix/blob/479fab07/phoenix-pherf/pom.xml
--
diff --git a/phoenix-pherf/pom.xml b/phoenix-pherf/pom.xml
index 754a99f..b5d5631 100644
--- a/phoenix-pherf/pom.xml
+++ b/phoenix-pherf/pom.

[phoenix] Git Push Summary

2018-07-03 Thread rajeshbabu
Repository: phoenix
Updated Tags:  refs/tags/v5.0.0-HBase-2.0 [created] 481ed81fd


svn commit: r27901 - /dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/ /release/phoenix/apache-phoenix-5.0.0-HBase-2.0/

2018-07-03 Thread rajeshbabu
Author: rajeshbabu
Date: Tue Jul  3 17:04:35 2018
New Revision: 27901

Log:
Apache Phoenix 5.0.0-HBase-2.0 release

Added:
release/phoenix/apache-phoenix-5.0.0-HBase-2.0/
  - copied from r27900, dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/
Removed:
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/



svn commit: r27744 - in /dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1: ./ bin/ src/

2018-06-26 Thread rajeshbabu
Author: rajeshbabu
Date: Tue Jun 26 17:48:02 2018
New Revision: 27744

Log:
Adding binary and soruce of 5.0.0 rc1

Added:
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/bin/

dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz
   (with props)

dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.asc

dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha256

dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha512
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/src/

dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz
   (with props)

dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz.asc

dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz.sha256

dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz.sha512

Added: 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz
==
Binary file - no diff available.

Propchange: 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz
--
svn:mime-type = application/octet-stream

Added: 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.asc
==
--- 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.asc
 (added)
+++ 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.asc
 Tue Jun 26 17:48:02 2018
@@ -0,0 +1,11 @@
+-BEGIN PGP SIGNATURE-
+Version: GnuPG v2
+
+iQEcBAABCAAGBQJbMnroAAoJEO2Bo2zjpl28xwoH/2yuNnD1HVVqh3vsmB8XN8Tg
+nLFqJNKAVKhQqY183NdM7EQw9czgVhSQP3zbQJtiWaiSqgqxoU0Wh+3uJajE0TVY
+KQivvBur5Gu43uDwi2/UU8V97r6B6hHwgTwbpuJYkZz3WAnYy/Ro1qF5+bR8bL/W
+lQSAu4JGWJuez/BU7H3pPVPJ10YzxngJm7yfWqO5jOsqSyaz23JJLqc3frBplTTP
+EOX5kIATN5+PvsxM/82TrEA0WGeE83EVPo+5+PC8kb/eXGtGkwpk9H1rNEkkOXSS
+cKm7AbPETcPp99I1BqoA0b9HbXKuP8iamZmm3PWrxSq51eVInbv8+S/PvlOQ2xs=
+=ZYFd
+-END PGP SIGNATURE-

Added: 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha256
==
--- 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha256
 (added)
+++ 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha256
 Tue Jun 26 17:48:02 2018
@@ -0,0 +1 @@
+SHA256(apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz)= 
2f8eefa791078f52d415253bf592bfb1a4fadcb6d4c3e5fc9a0cb485c166ed4e

Added: 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha512
==
--- 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha512
 (added)
+++ 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha512
 Tue Jun 26 17:48:02 2018
@@ -0,0 +1 @@
+SHA512(apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz)= 
5c21612babd82982b1065488055dbd71b4f4501b490248f7d5576f164984e54791ffcbabf3932556cd167c6d16c11391f390c70f09a0dd10f84cce9280470506

Added: 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz
==
Binary file - no diff available.

Propchange: 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz
--
svn:mime-type = application/octet-stream

Added: 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz.asc
==
--- 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz.asc
 (added)
+++ 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc1/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz.asc
 Tue Jun 26 17:48:02 2018
@@ -0,0 +1,11 @@
+-BEGIN PGP SIGNATURE-
+Version: GnuPG v2
+
+iQEcBAABCAAGBQJbMnr1AAoJEO2Bo2zjpl28JH4IAK1FpZqmsmqy/HuhNCR7QbYA
+MvaRLee9kX7eTBJ2otONN+MTJAg5gyFhDBmPk6taQ6Rsq9g/i9g8zEe+oitm2kw9
+2INqho4it22v/nuNgTd5zSY9qyhoqMLiQ2DrX3tFnjYJHPku01tYGKbThf3QD6Tj
+ZFlAWs07cPusZdEi+RJX3KExNJHiGJldVSxLdfEgx94F/c8SMXv46eazdiXv5GIQ
+P3GXrO2NS7jBGuZm9Zk1+9qy1xdZMCo+nDGS3KQmH0adb1YI7JNtfGyMbrcGqAgd

[phoenix] Git Push Summary

2018-06-26 Thread rajeshbabu
Repository: phoenix
Updated Tags:  refs/tags/v5.0.0-HBase-2.0-rc1 [created] 6f058febb


phoenix git commit: PHOENIX-4795 Fix failing pherf tests in 5.x branch(Rajeshbabu)

2018-06-26 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/5.x-HBase-2.0 cb962f946 -> 2d44700fa


PHOENIX-4795 Fix failing pherf tests in 5.x branch(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/2d44700f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/2d44700f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/2d44700f

Branch: refs/heads/5.x-HBase-2.0
Commit: 2d44700fac405abd017f0201ff4a4fa814204b1f
Parents: cb962f9
Author: Rajeshbabu Chintaguntla 
Authored: Tue Jun 26 06:43:09 2018 -0700
Committer: Rajeshbabu Chintaguntla 
Committed: Tue Jun 26 06:43:09 2018 -0700

--
 phoenix-pherf/src/test/resources/hbase-site.xml | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d44700f/phoenix-pherf/src/test/resources/hbase-site.xml
--
diff --git a/phoenix-pherf/src/test/resources/hbase-site.xml 
b/phoenix-pherf/src/test/resources/hbase-site.xml
index 4972828..d4bebb7 100644
--- a/phoenix-pherf/src/test/resources/hbase-site.xml
+++ b/phoenix-pherf/src/test/resources/hbase-site.xml
@@ -22,4 +22,8 @@
 phoenix.query.threadPoolSize
 128
 
+
+   hbase.localcluster.assign.random.ports
+   true
+
 



phoenix git commit: PHOENIX-4786 Reduce log level to debug when logging new aggregate row key found and added results for scan ordered queries(Rajeshbabu)

2018-06-15 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/5.x-HBase-2.0 61affd431 -> 8cceea621


PHOENIX-4786 Reduce log level to debug when logging new aggregate row key found 
and added results for scan ordered queries(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8cceea62
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8cceea62
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8cceea62

Branch: refs/heads/5.x-HBase-2.0
Commit: 8cceea6214297659e79d89f762c07349f84b74e9
Parents: 61affd4
Author: Rajeshbabu Chintaguntla 
Authored: Fri Jun 15 15:41:21 2018 -0700
Committer: Rajeshbabu Chintaguntla 
Committed: Fri Jun 15 15:41:21 2018 -0700

--
 .../phoenix/coprocessor/GroupedAggregateRegionObserver.java  | 8 
 1 file changed, 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/8cceea62/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
index 1ded543..e58407f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
@@ -542,14 +542,6 @@ public class GroupedAggregateRegionObserver extends 
BaseScannerRegionObserver im
 currentKey.getLength(), SINGLE_COLUMN_FAMILY, 
SINGLE_COLUMN,
 AGG_TIMESTAMP, value, 0, value.length);
 results.add(keyValue);
-if (logger.isInfoEnabled()) {
-logger.info(LogUtil.addCustomAnnotations("Adding new 
aggregate row: "
-+ keyValue
-+ ",for current key "
-+ Bytes.toStringBinary(currentKey.get(), 
currentKey.getOffset(),
-currentKey.getLength()) + ", aggregated 
values: "
-+ Arrays.asList(rowAggregators), 
ScanUtil.getCustomAnnotations(scan)));
-}
 // If we're at an aggregation boundary, reset the
 // aggregators and
 // aggregate with the current result (which is not a part 
of



phoenix git commit: PHOENIX-4786 Reduce log level to debug when logging new aggregate row key found and added results for scan ordered queries(Rajeshbabu)

2018-06-15 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 6f4a48fe7 -> a0ef6613d


PHOENIX-4786 Reduce log level to debug when logging new aggregate row key found 
and added results for scan ordered queries(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a0ef6613
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a0ef6613
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a0ef6613

Branch: refs/heads/4.x-HBase-1.3
Commit: a0ef6613dfde647ac9b680744b4628dd2423c33f
Parents: 6f4a48f
Author: Rajeshbabu Chintaguntla 
Authored: Fri Jun 15 15:40:51 2018 -0700
Committer: Rajeshbabu Chintaguntla 
Committed: Fri Jun 15 15:40:51 2018 -0700

--
 .../phoenix/coprocessor/GroupedAggregateRegionObserver.java  | 8 
 1 file changed, 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a0ef6613/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
index 86ab275..aefe916 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
@@ -534,14 +534,6 @@ public class GroupedAggregateRegionObserver extends 
BaseScannerRegionObserver {
 currentKey.getLength(), SINGLE_COLUMN_FAMILY, 
SINGLE_COLUMN,
 AGG_TIMESTAMP, value, 0, value.length);
 results.add(keyValue);
-if (logger.isInfoEnabled()) {
-logger.info(LogUtil.addCustomAnnotations("Adding new 
aggregate row: "
-+ keyValue
-+ ",for current key "
-+ Bytes.toStringBinary(currentKey.get(), 
currentKey.getOffset(),
-currentKey.getLength()) + ", aggregated 
values: "
-+ Arrays.asList(rowAggregators), 
ScanUtil.getCustomAnnotations(scan)));
-}
 // If we're at an aggregation boundary, reset the
 // aggregators and
 // aggregate with the current result (which is not a part 
of



phoenix git commit: PHOENIX-4786 Reduce log level to debug when logging new aggregate row key found and added results for scan ordered queries(Rajeshbabu)

2018-06-15 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 cf1a1a683 -> 179bea2c1


PHOENIX-4786 Reduce log level to debug when logging new aggregate row key found 
and added results for scan ordered queries(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/179bea2c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/179bea2c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/179bea2c

Branch: refs/heads/4.x-HBase-1.2
Commit: 179bea2c186e1286fe7492423751e748d21afc5c
Parents: cf1a1a6
Author: Rajeshbabu Chintaguntla 
Authored: Fri Jun 15 15:40:25 2018 -0700
Committer: Rajeshbabu Chintaguntla 
Committed: Fri Jun 15 15:40:25 2018 -0700

--
 .../phoenix/coprocessor/GroupedAggregateRegionObserver.java  | 8 
 1 file changed, 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/179bea2c/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
index 86ab275..aefe916 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
@@ -534,14 +534,6 @@ public class GroupedAggregateRegionObserver extends 
BaseScannerRegionObserver {
 currentKey.getLength(), SINGLE_COLUMN_FAMILY, 
SINGLE_COLUMN,
 AGG_TIMESTAMP, value, 0, value.length);
 results.add(keyValue);
-if (logger.isInfoEnabled()) {
-logger.info(LogUtil.addCustomAnnotations("Adding new 
aggregate row: "
-+ keyValue
-+ ",for current key "
-+ Bytes.toStringBinary(currentKey.get(), 
currentKey.getOffset(),
-currentKey.getLength()) + ", aggregated 
values: "
-+ Arrays.asList(rowAggregators), 
ScanUtil.getCustomAnnotations(scan)));
-}
 // If we're at an aggregation boundary, reset the
 // aggregators and
 // aggregate with the current result (which is not a part 
of



phoenix git commit: PHOENIX-4786 Reduce log level to debug when logging new aggregate row key found and added results for scan ordered queries(Rajeshbabu)

2018-06-15 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.1 a6125a3b1 -> 441dfbd27


PHOENIX-4786 Reduce log level to debug when logging new aggregate row key found 
and added results for scan ordered queries(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/441dfbd2
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/441dfbd2
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/441dfbd2

Branch: refs/heads/4.x-HBase-1.1
Commit: 441dfbd2707c0b9f4ae9d1d61a1cdd585f90224c
Parents: a6125a3
Author: Rajeshbabu Chintaguntla 
Authored: Fri Jun 15 15:39:57 2018 -0700
Committer: Rajeshbabu Chintaguntla 
Committed: Fri Jun 15 15:39:57 2018 -0700

--
 .../phoenix/coprocessor/GroupedAggregateRegionObserver.java  | 8 
 1 file changed, 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/441dfbd2/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
index 86ab275..aefe916 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
@@ -534,14 +534,6 @@ public class GroupedAggregateRegionObserver extends 
BaseScannerRegionObserver {
 currentKey.getLength(), SINGLE_COLUMN_FAMILY, 
SINGLE_COLUMN,
 AGG_TIMESTAMP, value, 0, value.length);
 results.add(keyValue);
-if (logger.isInfoEnabled()) {
-logger.info(LogUtil.addCustomAnnotations("Adding new 
aggregate row: "
-+ keyValue
-+ ",for current key "
-+ Bytes.toStringBinary(currentKey.get(), 
currentKey.getOffset(),
-currentKey.getLength()) + ", aggregated 
values: "
-+ Arrays.asList(rowAggregators), 
ScanUtil.getCustomAnnotations(scan)));
-}
 // If we're at an aggregation boundary, reset the
 // aggregators and
 // aggregate with the current result (which is not a part 
of



phoenix git commit: PHOENIX-4786 Reduce log level to debug when logging new aggregate row key found and added results for scan ordered queries(Rajeshbabu)

2018-06-15 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 bfd083396 -> 175fe3fae


PHOENIX-4786 Reduce log level to debug when logging new aggregate row key found 
and added results for scan ordered queries(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/175fe3fa
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/175fe3fa
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/175fe3fa

Branch: refs/heads/4.x-HBase-0.98
Commit: 175fe3fae0577fdc769c8ffbada9a3c2e2d6fb91
Parents: bfd0833
Author: Rajeshbabu Chintaguntla 
Authored: Fri Jun 15 15:39:23 2018 -0700
Committer: Rajeshbabu Chintaguntla 
Committed: Fri Jun 15 15:39:23 2018 -0700

--
 .../phoenix/coprocessor/GroupedAggregateRegionObserver.java  | 8 
 1 file changed, 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/175fe3fa/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
index f69e30c..cfec0cd 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
@@ -531,14 +531,6 @@ public class GroupedAggregateRegionObserver extends 
BaseScannerRegionObserver {
 currentKey.getLength(), SINGLE_COLUMN_FAMILY, 
SINGLE_COLUMN,
 AGG_TIMESTAMP, value, 0, value.length);
 results.add(keyValue);
-if (logger.isInfoEnabled()) {
-logger.info(LogUtil.addCustomAnnotations("Adding new 
aggregate row: "
-+ keyValue
-+ ",for current key "
-+ Bytes.toStringBinary(currentKey.get(), 
currentKey.getOffset(),
-currentKey.getLength()) + ", aggregated 
values: "
-+ Arrays.asList(rowAggregators), 
ScanUtil.getCustomAnnotations(scan)));
-}
 // If we're at an aggregation boundary, reset the
 // aggregators and
 // aggregate with the current result (which is not a part 
of



phoenix git commit: PHOENIX-4786 Reduce log level to debug when logging new aggregate row key found and added results for scan ordered queries(Rajeshbabu)

2018-06-15 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/master c233c15c1 -> 6acdae0ff


PHOENIX-4786 Reduce log level to debug when logging new aggregate row key found 
and added results for scan ordered queries(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6acdae0f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6acdae0f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6acdae0f

Branch: refs/heads/master
Commit: 6acdae0ff1a63980f40fe1b794d40ab949cc423d
Parents: c233c15
Author: Rajeshbabu Chintaguntla 
Authored: Fri Jun 15 15:38:44 2018 -0700
Committer: Rajeshbabu Chintaguntla 
Committed: Fri Jun 15 15:38:44 2018 -0700

--
 .../phoenix/coprocessor/GroupedAggregateRegionObserver.java  | 8 
 1 file changed, 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/6acdae0f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
index 86ab275..aefe916 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
@@ -534,14 +534,6 @@ public class GroupedAggregateRegionObserver extends 
BaseScannerRegionObserver {
 currentKey.getLength(), SINGLE_COLUMN_FAMILY, 
SINGLE_COLUMN,
 AGG_TIMESTAMP, value, 0, value.length);
 results.add(keyValue);
-if (logger.isInfoEnabled()) {
-logger.info(LogUtil.addCustomAnnotations("Adding new 
aggregate row: "
-+ keyValue
-+ ",for current key "
-+ Bytes.toStringBinary(currentKey.get(), 
currentKey.getOffset(),
-currentKey.getLength()) + ", aggregated 
values: "
-+ Arrays.asList(rowAggregators), 
ScanUtil.getCustomAnnotations(scan)));
-}
 // If we're at an aggregation boundary, reset the
 // aggregators and
 // aggregate with the current result (which is not a part 
of



phoenix git commit: PHOENIX-4528 PhoenixAccessController checks permissions only at table level when creating views(Karan Mehta)

2018-06-15 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/5.x-HBase-2.0 16fa7f661 -> 61affd431


PHOENIX-4528 PhoenixAccessController checks permissions only at table level 
when creating views(Karan Mehta)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/61affd43
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/61affd43
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/61affd43

Branch: refs/heads/5.x-HBase-2.0
Commit: 61affd431b8c4a1730804f0c0d5a0035b797e178
Parents: 16fa7f6
Author: Rajeshbabu Chintaguntla 
Authored: Fri Jun 15 10:38:05 2018 -0700
Committer: Rajeshbabu Chintaguntla 
Committed: Fri Jun 15 10:38:05 2018 -0700

--
 .../phoenix/end2end/BasePermissionsIT.java  |  4 +
 .../phoenix/end2end/ChangePermissionsIT.java| 26 +-
 .../coprocessor/PhoenixAccessController.java| 95 +---
 3 files changed, 92 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/61affd43/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
index 9f91267..7698fca 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
@@ -748,6 +748,10 @@ public class BasePermissionsIT extends BaseTest {
 }
 }
 
+String surroundWithDoubleQuotes(String input) {
+return "\"" + input + "\"";
+}
+
 void validateAccessDeniedException(AccessDeniedException ade) {
 String msg = ade.getMessage();
 assertTrue("Exception contained unexpected message: '" + msg + "'",

http://git-wip-us.apache.org/repos/asf/phoenix/blob/61affd43/phoenix-core/src/it/java/org/apache/phoenix/end2end/ChangePermissionsIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ChangePermissionsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ChangePermissionsIT.java
index 0d764d8..106438f 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ChangePermissionsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ChangePermissionsIT.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.hbase.security.User;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.TableNotFoundException;
+import org.apache.phoenix.util.SchemaUtil;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -144,7 +145,7 @@ public class ChangePermissionsIT extends BasePermissionsIT {
 verifyAllowed(createSchema(SCHEMA_NAME), superUser1);
 verifyAllowed(grantPermissions("C", regularUser1, SCHEMA_NAME, 
true), superUser1);
 } else {
-verifyAllowed(grantPermissions("C", regularUser1, "\"" + 
QueryConstants.HBASE_DEFAULT_SCHEMA_NAME + "\"", true), superUser1);
+verifyAllowed(grantPermissions("C", regularUser1, 
surroundWithDoubleQuotes(QueryConstants.HBASE_DEFAULT_SCHEMA_NAME), true), 
superUser1);
 }
 
 // Create new table. Create indexes, views and view indexes on top of 
it. Verify the contents by querying it
@@ -235,7 +236,7 @@ public class ChangePermissionsIT extends BasePermissionsIT {
 verifyAllowed(createSchema(SCHEMA_NAME), superUser1);
 verifyAllowed(grantPermissions("C", regularUser1, SCHEMA_NAME, 
true), superUser1);
 } else {
-verifyAllowed(grantPermissions("C", regularUser1, "\"" + 
QueryConstants.HBASE_DEFAULT_SCHEMA_NAME + "\"", true), superUser1);
+verifyAllowed(grantPermissions("C", regularUser1, 
surroundWithDoubleQuotes(QueryConstants.HBASE_DEFAULT_SCHEMA_NAME), true), 
superUser1);
 }
 
 // Create MultiTenant Table (View Index Table should be automatically 
created)
@@ -266,4 +267,25 @@ public class ChangePermissionsIT extends BasePermissionsIT 
{
 verifyAllowed(readMultiTenantTableWithIndex(VIEW1_TABLE_NAME, "o1"), 
regularUser2);
 verifyAllowed(readMultiTenantTableWithoutIndex(VIEW2_TABLE_NAME, 
"o2"), regularUser2);
 }
+
+/**
+ * Grant RX permissions on the schema to regularUser1,
+ * Creating view on a table with that schema by regularUser1 should be 
allowed
+ */
+@Test
+

svn commit: r27420 - /dev/phoenix/KEYS

2018-06-13 Thread rajeshbabu
Author: rajeshbabu
Date: Wed Jun 13 08:39:04 2018
New Revision: 27420

Log:
Updating KEYS with rajeshb...@apache.org signing key

Modified:
dev/phoenix/KEYS

Modified: dev/phoenix/KEYS
==
--- dev/phoenix/KEYS (original)
+++ dev/phoenix/KEYS Wed Jun 13 08:39:04 2018
@@ -531,3 +531,61 @@ ym3HhetHFTWYHa7PR4f8VqAw4h578hAsNcsgj10f
 IAHw3/o2ABjphvjf9E8frQ==
 =gKGe
 -END PGP PUBLIC KEY BLOCK-
+pub   4096R/AAEDBD7B 2018-06-11
+uid  Rajeshbabu Chintaguntla (CODE SIGNING KEY) 

+sig 3AAEDBD7B 2018-06-11  Rajeshbabu Chintaguntla (CODE SIGNING KEY) 

+sub   4096R/2A1817DB 2018-06-11
+sig  AAEDBD7B 2018-06-11  Rajeshbabu Chintaguntla (CODE SIGNING KEY) 

+
+-BEGIN PGP PUBLIC KEY BLOCK-
+Version: GnuPG v1
+
+mQINBFseLj8BEADgUf2qoeYAE3MzcxEDysTm0fo/qtHJXTvZexVi+w/Xg86yvSuw
+WzChpYY8Z04cY+ruXRmwfvsfH//YlquJr02uHavojeqKsOW2kV03czh16ktNWwtS
+v7OIU3RzCFt67D2wtMJzbiA9l1WJyzvOx7xnV+uovUNsURYc42YEnwgMJPUQUz0D
+4x8SBKBmL31DELZ72PdgsMSNs7xBrvsXqybDlWkFFkTZWQ6ZwGE6L22ddmOT4N8e
+0FMxIJRrCPn7xMKF2xUEE05sKw+lxLbCO38gyc42AeEVhP+qCvG3E1ZhvqNe/l1y
+LHm4vBNxmR9bgXHf2BvmSBKdGeI2oHM7BQzZ071nH3PHnwr3ksTvNpqW9FkgzjEp
+02fv/n9iANp4ZYCphOdm1Ea9iGKvn0M44seoktT2Gh0Eof3hNOsHJcOCmU494w4I
+77zeCFHbLEVpCMNDLzHccmBBD5FNoOQRQouTuy8aWXsZeRUQrzLw3fF5KcxEtTaI
+SzYKmbGJayeYDpLDvZcU7KZyKm3X+bHWWXlU4sG37hCDfEcQjSwJBXyDmWMpqw7R
+FC6pFHCjIXofoNFdY6nMOeQ89B8hSCzgBb5Dh8hxwsGIjSyNF63reJ64EAKAi69s
+EAmWx1f3ivi54ZSHhb2mQ/vfdy84xUk8RwYF6WBNmR3I+DRQNKpKDvUxSwARAQAB
+tEJSYWplc2hiYWJ1IENoaW50YWd1bnRsYSAoQ09ERSBTSUdOSU5HIEtFWSkgPHJh
+amVzaGJhYnVAYXBhY2hlLm9yZz6JAjgEEwECACIFAlseLj8CGwMGCwkIBwMCBhUI
+AgkKCwQWAgMBAh4BAheAAAoJEDGP2Guq7b1774QQAN2//IV3qQNGoGU2Ihvo3F3n
+6YkW8y9qy3+Gz17mNhcBgtxZitf8xdch5JqDh/TU/LpV4z9guxnM3dNuwvZ3tbIw
+qog0UTDGoeCe4YokRI0kl19a1rn575PQyYu6TEEJrgTKA/yzz8Ck2UbEJ6gJMkxj
+DW5EXFm0QN5Eg+NMrsxcDuf+9hFJ1LagFXZdhPfI9+XzuDDXgZaTXxhd8QQu0d/d
+dcrEc1Pnk65CdDHHezdtMvm0WTw+staTO6/bF6QonM+qgpsZaPxtnU3/rMamGT5k
+rRG+STpLDdz4EUkk5+ZMSJei7glxv4Nx+16yi2YoAtRgoxyjPKBG8v6GCSWXu2K+
+LYoUZmQsnUGgvH2E8V/7XAh68H4/YSTnqhn8TEq5771aya9PzhhvHipSHkWxIkR2
+UjmnJjTkBCXkM4ju4hc/QHFa2J/tvzcAx0WRCUqioE6i/JI72C+nmgRZ4TUYy/nS
+tpAH4MSlWCwEWHEKDs85rKIDTL8qAi4X4HAreSsZp8+igYsr0zkhLjAhaJB6qMpB
+oek0Ke7O1L4m5AEzMayJzMdqdSKK4X3rzW8Qu7Wc1e+oN3BXEWfdGOYB3Mrssfhp
+ZvJEHrTwuMBtb2bAdoiCG5Q/i6mozfzfYaGHd5i7J2ujDdC9Qie/lRyQlwryy1gb
+hxGRAScz2xfMa/XNCawCuQINBFseLj8BEAChyrxmBZsR/gKi0Wj6TaLGENMf2mT7
+hiVTnKYU0yAgA1LsDGMVyF+dzwNgLL600LLqrqFMTVqrOiEF3hKLAQ3cjxSE7rwg
+0X02ZCOX69Y3+7/xOAMUT93Aqk1WZYUN28uCGO+6Y7Q4oT/V8OZbixSzaIAJ3jyI
+jGElDaw+VMYydMXU7Z9c4rhIjjEE3AkKtlB5KGAKHTUYi8GiJhqrmy02jhtHSXT/
+AwAIIxT8gZ5Rgx4MErIjGn7fp+fvbMYK0FbcbreO7flyls6dHwWAGcI5VbxWCCZu
+5YZlpo8z/iYkGLB2QV/gtzp7Wm4lZkMX61j+PTpzNO7I6rRQckhyeFF1ZcPK73ey
+EmjwWBmo7K/iMPWCYFrU7/ybdXf+TDzuyUYosJUSqG8cIERHBIQc5E4TsdNd3hJR
+TPNF9YzXv/iJK9PDO962zLS3cP373/QCiU+Q8rAhqfrX9Mygobsvc1PopPVn66hc
+BzwUUuLUY/5du2/hh8BC99BIa3BJ/wxCN7kc1UGdyyffoBB5gcnOC14r7SbJSVNU
+ymw1y8UXC2XQQyKFixUCsJfY94ZwqO7cmPxsWBc+DUtL7AP69ZkzYFmPlIwh0f+G
+l8/m4ai+tWnxIUhMfyvwyo2E4AJ+AGCfZwUdxB+uCD7AbTy/Sef7vv0zXGTk7brb
+am8myY/u0xaF8QARAQABiQIfBBgBAgAJBQJbHi4/AhsMAAoJEDGP2Guq7b17EZMQ
+AJAWU7hFSZP1tlwDziRN2FSSd+jHV59oBxzC1MONhT9c0VneodzER/NaK+6N2H7+
+CpQRV6ePzG3iUPj2Wtz8U/eX42Ia8OwnmLM0voJH529ZLPv5tVxqMV+UgbssDMWu
+nTeLw/pg4Vy8bec0HHPMwsmooQJQXu+e9/5DoRrepaGMFXu+BpKOQDN8Gvlq0BEA
+v9ojEbhRlxflV11VBq57bj1d+5F+JOKexqJGsbZvnY86j7hAY9BIKUKaA4HmhKX2
+vRjM6izxVA0ivJM5X/3qHMakjARrUFnbQkfh+dWQo5NZfQMp5m6uhQwwfhGIYhII
+iqk0+zhbgix9DfK7muLdEvE4TxAQO4sc8s/EtEmfHFWVtZA0mf+6dqU9WrigMEq3
+yA1KmQYUjXR+/iax60nd5SacdFKyehhFSAE7dC8Sqp/wT+fkgcYzUEzkiMVqx53n
+PXfiDPR3VzC0FwBLgXyJLf3B9lGuICia1wcNc4oKiA68kxc3EPbcYxLtVMKRTvBf
+dxN6Siuv6yJzuGJaj/C1fPGVwzbt/x0L7CQw8W5k3XzZlp7bCYyd7AXNZZOvAEQ9
+mE6f5bevE4Bock2Ee72bg391F7sf/bFXAouOiUgyoVHmW2ZSMfLWx6fP8Dq2AmAM
+hP9muXjqvjO7SxxLReObjt/gs3HtjFUnJzTX/TlAaMq9
+=rPAq
+-END PGP PUBLIC KEY BLOCK-




svn commit: r27419 - in /dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0: ./ bin/ src/

2018-06-13 Thread rajeshbabu
Author: rajeshbabu
Date: Wed Jun 13 08:36:58 2018
New Revision: 27419

Log:
Adding binary and soruce of 5.0.0 rc0

Added:
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/

dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz
   (with props)

dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.asc

dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha256

dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha512
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/

dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz
   (with props)

dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz.asc

dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz.sha256

dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz.sha512

Added: 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz
==
Binary file - no diff available.

Propchange: 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz
--
svn:mime-type = application/octet-stream

Added: 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.asc
==
--- 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.asc
 (added)
+++ 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.asc
 Wed Jun 13 08:36:58 2018
@@ -0,0 +1,11 @@
+-BEGIN PGP SIGNATURE-
+Version: GnuPG v2
+
+iQEcBAABCAAGBQJbINJfAAoJEO2Bo2zjpl28SLsH/jV3xZWY6caHEcg6AYXo21sL
++pC3h4k9lK449bCJwNQSNrabi6KinTRGcTNXGDwOGqp/y1VzZLl9XKSQUXyppPqY
+D30ODpRcbuKSPFW7fxc+KQQDBYI16zbphc2u1AsNJIIGxIYHU257tQfVxxW6RR/1
+sDllD6WuTlGIdLqhfD5qhKkG+l6r2jDv6ZucQ0Z4GSMQa45Jm3u7m3uwqdGMpkAB
+xC1YirY7EsKM3ei1tqBb9GMv5ZujEfNyaTvD5rfHa3BCgJIuY42fSmcmNN2ZRmEq
+/tyrOAIU+OXIimd9o3/4672uwcghCzv5z3dgpK9Oi3ApJFY43L4UNDLJ3triJmg=
+=i3s9
+-END PGP SIGNATURE-

Added: 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha256
==
--- 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha256
 (added)
+++ 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha256
 Wed Jun 13 08:36:58 2018
@@ -0,0 +1 @@
+SHA256(apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz)= 
8d6a82196dd9a03dc3f0657daf1c57ea2fd34fa7f2586436bcb84abb5c1212d3

Added: 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha512
==
--- 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha512
 (added)
+++ 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha512
 Wed Jun 13 08:36:58 2018
@@ -0,0 +1 @@
+SHA512(apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz)= 
3ff6f5f6757362e768491c8d97fb1bc1c661f6b9147495a6167ed9a6050afe09f0b746df5a005991359a0f0bb25c69adb966123158a8671e27f157e31ab59f09

Added: 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz
==
Binary file - no diff available.

Propchange: 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz
--
svn:mime-type = application/octet-stream

Added: 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz.asc
==
--- 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz.asc
 (added)
+++ 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz.asc
 Wed Jun 13 08:36:58 2018
@@ -0,0 +1,11 @@
+-BEGIN PGP SIGNATURE-
+Version: GnuPG v2
+
+iQEcBAABCAAGBQJbINJqAAoJEO2Bo2zjpl28UjMH/3tnlH23N2e9M5eb1F9lY+Ml
+SGYO8ZawDSIhxnldF8/6VAkWWolp/Kx1o6tXAZ05Idce6zvZT3VG3QZbXfbnOBxt
+XZgKUzWzbqtpHVWL0dIK+S04D/0mgDtprgCdDpo8Gs8KULd9jV7f8qBvM0AShjzt
+pKNO1JqJ7hAoFVtuRmkPa0k7FUZ4STXTMKkvdkCxMPeGNcWDt29oA8WS4hD61u+z
+U7OQD8pJ9t8JwQMRZGTr1thQEbD72u1R/Grk4HjHMam+V8cuC2cp+cMr4fsbCZX7
+f2J1spBk/DYSV+R5plx2im

svn commit: r27418 - /dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/

2018-06-13 Thread rajeshbabu
Author: rajeshbabu
Date: Wed Jun 13 08:21:08 2018
New Revision: 27418

Log:
Deleting 5.0.0 rc0

Removed:
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/



[phoenix] Git Push Summary

2018-06-13 Thread rajeshbabu
Repository: phoenix
Updated Tags:  refs/tags/v5.0.0-HBase-2.0-rc0 [created] eb19c9017


[phoenix] Git Push Summary

2018-06-13 Thread rajeshbabu
Repository: phoenix
Updated Tags:  refs/tags/v5.0.0-HBase-2.0-rc0 [deleted] 555bada4e


svn commit: r27384 - in /dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0: ./ bin/ src/

2018-06-11 Thread rajeshbabu
Author: rajeshbabu
Date: Mon Jun 11 21:09:01 2018
New Revision: 27384

Log:
Adding binary and soruce of 5.0.0 rc0

Added:
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/

dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz
   (with props)

dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.asc

dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.md5

dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha256

dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha512
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/

dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz
   (with props)

dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz.asc

dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz.md5

dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz.sha256

dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz.sha512

Added: 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz
==
Binary file - no diff available.

Propchange: 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz
--
svn:mime-type = application/octet-stream

Added: 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.asc
==
--- 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.asc
 (added)
+++ 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.asc
 Mon Jun 11 21:09:01 2018
@@ -0,0 +1,11 @@
+-BEGIN PGP SIGNATURE-
+Version: GnuPG v2
+
+iQEcBAABCAAGBQJbHrawAAoJEO2Bo2zjpl28qYUIALTHO8qINC6jKVVWgqIw2XAR
+Ss2xRzRFd4/aCyCfmI/jGwT78QCnMDjNUaQYEknZfa4xVRK+YBTUesEVCrOdOOkG
+FGrU9fjIrielTR4sCVAMxks7CiKH0K0uOgRXzgLNHBYTddgy1eqnLEHmaBLC6T42
+z5FmsZpH1jnF8FeX64ofqeM5V35OKJaVWRQTIirCiYBYAONW4CjSDWX5VfWets93
+XaBgY/m9CvrI2vQTcOBjgMnObrvVmt1TWXNczPRosi0TMHZ9kBfNT5/Pq3n1dgzl
+QESr2OmvFjpckW+shKtcQf5OMhDOzHGhetIch0ER8FucZlGv88OZXuhVP4gY7zo=
+=p5TV
+-END PGP SIGNATURE-

Added: 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.md5
==
--- 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.md5
 (added)
+++ 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.md5
 Mon Jun 11 21:09:01 2018
@@ -0,0 +1 @@
+MD5(apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz)= 
2e9a64512ae3022eee0ec7e8215e9afb

Added: 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha256
==
--- 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha256
 (added)
+++ 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha256
 Mon Jun 11 21:09:01 2018
@@ -0,0 +1 @@
+SHA256(apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz)= 
254d9067867803d779a3564f6d520e1d04bb2da4acba99ceaa9f5d95c35a10ee

Added: 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha512
==
--- 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha512
 (added)
+++ 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/bin/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz.sha512
 Mon Jun 11 21:09:01 2018
@@ -0,0 +1 @@
+SHA512(apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz)= 
ac10d64016ac839fb82747843c5b098303b99830551da08d1345508f9026b2a55faeae9894e8a7b882e9f6d86446cc95e41c72177c2b2e34e2d9e3b2ea229163

Added: 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz
==
Binary file - no diff available.

Propchange: 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz
--
svn:mime-type = application/octet-stream

Added: 
dev/phoenix/apache-phoenix-5.0.0-HBase-2.0-rc0/src/apache-phoenix-5.0.0-HBase-2.0-src.tar.gz.asc

[phoenix] Git Push Summary

2018-06-11 Thread rajeshbabu
Repository: phoenix
Updated Tags:  refs/tags/v5.0.0-HBase-2.0-rc0 [created] 555bada4e


phoenix git commit: PHOENIX-4778 Fix rat:check failure on 5.x branch(Rajeshbabu)

2018-06-11 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/5.x-HBase-2.0 abcf0d1ab -> 7ecf47443


PHOENIX-4778 Fix rat:check failure on 5.x branch(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7ecf4744
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7ecf4744
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7ecf4744

Branch: refs/heads/5.x-HBase-2.0
Commit: 7ecf47443b5dcc4c6993463e20f9a13ea8564cb0
Parents: abcf0d1
Author: Rajeshbabu Chintaguntla 
Authored: Mon Jun 11 12:00:37 2018 +0530
Committer: Rajeshbabu Chintaguntla 
Committed: Mon Jun 11 12:00:37 2018 +0530

--
 .../apache/phoenix/end2end/MutationStateIT.java| 17 +
 .../end2end/index/MutableIndexRebuilderIT.java | 17 +
 2 files changed, 34 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/7ecf4744/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java
index 2d5f360..36782c1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.phoenix.end2end;
 
 import static org.junit.Assert.assertEquals;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7ecf4744/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexRebuilderIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexRebuilderIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexRebuilderIT.java
index a29766f..e1c8f81 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexRebuilderIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexRebuilderIT.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.phoenix.end2end.index;
 
 import static org.junit.Assert.assertEquals;



phoenix git commit: PHOENIX-4671 Fix minor size accounting bug for MutationSize(Lars Hofhansl)

2018-05-24 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/5.x-HBase-2.0 09bd6983c -> 1b2c95094


PHOENIX-4671 Fix minor size accounting bug for MutationSize(Lars Hofhansl)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1b2c9509
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1b2c9509
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1b2c9509

Branch: refs/heads/5.x-HBase-2.0
Commit: 1b2c95094701bf62e1f646ba33d070aba0ac3d94
Parents: 09bd698
Author: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Authored: Thu May 24 16:33:15 2018 +0530
Committer: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Committed: Thu May 24 16:33:15 2018 +0530

--
 .../end2end/UpsertSelectAutoCommitIT.java   | 28 
 .../apache/phoenix/execute/MutationState.java   |  1 +
 2 files changed, 29 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/1b2c9509/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectAutoCommitIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectAutoCommitIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectAutoCommitIT.java
index 6b781a0..38d48d6 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectAutoCommitIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectAutoCommitIT.java
@@ -23,15 +23,19 @@ import static 
org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.sql.Connection;
 import java.sql.Date;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
+import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.Properties;
 
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.junit.Test;
@@ -173,4 +177,28 @@ public class UpsertSelectAutoCommitIT extends 
ParallelStatsDisabledIT {
 conn.close();
 }
 
+@Test
+public void testMaxMutationSize() throws Exception {
+Properties connectionProperties = new Properties();
+
connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB, "3");
+
connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, 
"5");
+PhoenixConnection connection =
+(PhoenixConnection) DriverManager.getConnection(getUrl(), 
connectionProperties);
+connection.setAutoCommit(true);
+String fullTableName = generateUniqueName();
+try (Statement stmt = connection.createStatement()) {
+stmt.execute(
+"CREATE TABLE " + fullTableName + " (pk INTEGER PRIMARY 
KEY, v1 INTEGER, v2 INTEGER)");
+stmt.execute(
+"CREATE SEQUENCE " + fullTableName + "_seq cache 1000");
+stmt.execute("UPSERT INTO " + fullTableName + " VALUES (NEXT VALUE 
FOR " + fullTableName + "_seq, rand(), rand())");
+}
+try (Statement stmt = connection.createStatement()) {
+for (int i=0; i<16; i++) {
+stmt.execute("UPSERT INTO " + fullTableName + " SELECT NEXT 
VALUE FOR " + fullTableName + "_seq, rand(), rand() FROM " + fullTableName);
+}
+}
+connection.close();
+}
+
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1b2c9509/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java 
b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index 1d95f08..1d662ab 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -1581,6 +1581,7 @@ public class MutationState implements SQLCloseable {
 
 public void clear(){
 rowKeyToRowMutationState.clear();
+estimatedSize = 0;
 }
 
 public Collection values() {



phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu)

2018-05-11 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/5.x-HBase-2.0 ded15dc42 -> e05f2bcea


PHOENIX-4685 Properly handle connection caching for Phoenix inside 
RegionServers-addendum(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e05f2bce
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e05f2bce
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e05f2bce

Branch: refs/heads/5.x-HBase-2.0
Commit: e05f2bceae498573f124d12e5c570af9ca02cf7e
Parents: ded15dc
Author: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Authored: Sat May 12 03:00:49 2018 +0530
Committer: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Committed: Sat May 12 03:00:49 2018 +0530

--
 .../src/main/java/org/apache/phoenix/util/ServerUtil.java| 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e05f2bce/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
--
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
index cccf1c9..d34514d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
@@ -37,9 +37,7 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.NotServingRegionException;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@@ -366,7 +364,7 @@ public class ServerUtil {
 }
 
 public static Configuration 
getIndexWriterConfigurationWithCustomThreads(Configuration conf) {
-Configuration clonedConfig = PropertiesUtil.cloneConfig(conf);
+Configuration clonedConfig = getIndexWriterConnection(conf);
 setHTableThreads(clonedConfig);
 return clonedConfig;
 }



phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu)

2018-05-11 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 faacf0469 -> 829f3fcc8


PHOENIX-4685 Properly handle connection caching for Phoenix inside 
RegionServers-addendum(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/829f3fcc
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/829f3fcc
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/829f3fcc

Branch: refs/heads/4.x-HBase-0.98
Commit: 829f3fcc82c6a00910db5b21b85aea90f9f0afbf
Parents: faacf04
Author: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Authored: Fri May 11 23:39:31 2018 +0530
Committer: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Committed: Fri May 11 23:39:31 2018 +0530

--
 .../src/main/java/org/apache/phoenix/util/ServerUtil.java  | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/829f3fcc/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
--
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
index 106adb1..fe27ab4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
@@ -351,8 +351,8 @@ public class ServerUtil {
 HConnection connection = null;
 if((connection = connections.get(connectionType)) == null) {
 synchronized (CoprocessorHConnectionTableFactory.class) {
-if(connections.get(connectionType) == null) {
-connection = new CoprocessorHConnection(conf, server);
+if((connection = connections.get(connectionType)) == null) 
{
+connection = new 
CoprocessorHConnection(getTypeSpecificConfiguration(connectionType, conf), 
server);
 connections.put(connectionType, connection);
 return connection;
 }
@@ -410,7 +410,7 @@ public class ServerUtil {
 }
 
 public static Configuration 
getIndexWriterConfigurationWithCustomThreads(Configuration conf) {
-Configuration clonedConfig = PropertiesUtil.cloneConfig(conf);
+Configuration clonedConfig = getIndexWriterConnection(conf);
 setHTableThreads(clonedConfig);
 return clonedConfig;
 }



phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu)

2018-05-11 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.1 e6119229c -> c63d563c1


PHOENIX-4685 Properly handle connection caching for Phoenix inside 
RegionServers-addendum(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c63d563c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c63d563c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c63d563c

Branch: refs/heads/4.x-HBase-1.1
Commit: c63d563c1b7bb7affbfcd0e81ea23c4a436bf396
Parents: e611922
Author: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Authored: Fri May 11 23:23:03 2018 +0530
Committer: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Committed: Fri May 11 23:23:03 2018 +0530

--
 .../src/main/java/org/apache/phoenix/util/ServerUtil.java  | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c63d563c/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
--
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
index 5d2a94c..66bfca3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
@@ -346,8 +346,8 @@ public class ServerUtil {
 ClusterConnection connection = null;
 if((connection = connections.get(connectionType)) == null) {
 synchronized (CoprocessorHConnectionTableFactory.class) {
-if(connections.get(connectionType) == null) {
-connection = new CoprocessorHConnection(conf, server);
+if((connection = connections.get(connectionType)) == null) 
{
+connection = new 
CoprocessorHConnection(getTypeSpecificConfiguration(connectionType, conf), 
server);
 connections.put(connectionType, connection);
 return connection;
 }
@@ -405,7 +405,7 @@ public class ServerUtil {
 }
 
 public static Configuration 
getIndexWriterConfigurationWithCustomThreads(Configuration conf) {
-Configuration clonedConfig = PropertiesUtil.cloneConfig(conf);
+Configuration clonedConfig = getIndexWriterConnection(conf);
 setHTableThreads(clonedConfig);
 return clonedConfig;
 }



phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu)

2018-05-11 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 141686ab5 -> 64b77450a


PHOENIX-4685 Properly handle connection caching for Phoenix inside 
RegionServers-addendum(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/64b77450
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/64b77450
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/64b77450

Branch: refs/heads/4.x-HBase-1.2
Commit: 64b77450a8ca45686f7e7d0abf14231dccda2d32
Parents: 141686a
Author: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Authored: Fri May 11 23:21:55 2018 +0530
Committer: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Committed: Fri May 11 23:21:55 2018 +0530

--
 .../src/main/java/org/apache/phoenix/util/ServerUtil.java  | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/64b77450/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
--
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
index 9190373..891839a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
@@ -346,8 +346,8 @@ public class ServerUtil {
 ClusterConnection connection = null;
 if((connection = connections.get(connectionType)) == null) {
 synchronized (CoprocessorHConnectionTableFactory.class) {
-if(connections.get(connectionType) == null) {
-connection = new CoprocessorHConnection(conf, server);
+if((connection = connections.get(connectionType)) == null) 
{
+connection = new 
CoprocessorHConnection(getTypeSpecificConfiguration(connectionType, conf), 
server);
 connections.put(connectionType, connection);
 return connection;
 }
@@ -405,7 +405,7 @@ public class ServerUtil {
 }
 
 public static Configuration 
getIndexWriterConfigurationWithCustomThreads(Configuration conf) {
-Configuration clonedConfig = PropertiesUtil.cloneConfig(conf);
+Configuration clonedConfig = getIndexWriterConnection(conf);
 setHTableThreads(clonedConfig);
 return clonedConfig;
 }



phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu)

2018-05-11 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 72fa8749e -> 39b92bf9e


PHOENIX-4685 Properly handle connection caching for Phoenix inside 
RegionServers-addendum(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/39b92bf9
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/39b92bf9
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/39b92bf9

Branch: refs/heads/4.x-HBase-1.3
Commit: 39b92bf9e8d9cae46b1fa230d91ac04a8e49e629
Parents: 72fa874
Author: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Authored: Fri May 11 23:20:52 2018 +0530
Committer: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Committed: Fri May 11 23:20:52 2018 +0530

--
 .../src/main/java/org/apache/phoenix/util/ServerUtil.java  | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/39b92bf9/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
--
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
index 9190373..891839a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
@@ -346,8 +346,8 @@ public class ServerUtil {
 ClusterConnection connection = null;
 if((connection = connections.get(connectionType)) == null) {
 synchronized (CoprocessorHConnectionTableFactory.class) {
-if(connections.get(connectionType) == null) {
-connection = new CoprocessorHConnection(conf, server);
+if((connection = connections.get(connectionType)) == null) 
{
+connection = new 
CoprocessorHConnection(getTypeSpecificConfiguration(connectionType, conf), 
server);
 connections.put(connectionType, connection);
 return connection;
 }
@@ -405,7 +405,7 @@ public class ServerUtil {
 }
 
 public static Configuration 
getIndexWriterConfigurationWithCustomThreads(Configuration conf) {
-Configuration clonedConfig = PropertiesUtil.cloneConfig(conf);
+Configuration clonedConfig = getIndexWriterConnection(conf);
 setHTableThreads(clonedConfig);
 return clonedConfig;
 }



phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu)

2018-05-11 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/master 0c8349e3c -> 56f109603


PHOENIX-4685 Properly handle connection caching for Phoenix inside 
RegionServers-addendum(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/56f10960
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/56f10960
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/56f10960

Branch: refs/heads/master
Commit: 56f109603cec93f3904366d4bb23415981947ae0
Parents: 0c8349e
Author: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Authored: Fri May 11 23:19:21 2018 +0530
Committer: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Committed: Fri May 11 23:19:21 2018 +0530

--
 .../src/main/java/org/apache/phoenix/util/ServerUtil.java  | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/56f10960/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
--
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
index 9190373..891839a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
@@ -346,8 +346,8 @@ public class ServerUtil {
 ClusterConnection connection = null;
 if((connection = connections.get(connectionType)) == null) {
 synchronized (CoprocessorHConnectionTableFactory.class) {
-if(connections.get(connectionType) == null) {
-connection = new CoprocessorHConnection(conf, server);
+if((connection = connections.get(connectionType)) == null) 
{
+connection = new 
CoprocessorHConnection(getTypeSpecificConfiguration(connectionType, conf), 
server);
 connections.put(connectionType, connection);
 return connection;
 }
@@ -405,7 +405,7 @@ public class ServerUtil {
 }
 
 public static Configuration 
getIndexWriterConfigurationWithCustomThreads(Configuration conf) {
-Configuration clonedConfig = PropertiesUtil.cloneConfig(conf);
+Configuration clonedConfig = getIndexWriterConnection(conf);
 setHTableThreads(clonedConfig);
 return clonedConfig;
 }



phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu)

2018-05-09 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.1 9178f569c -> 6f52b737d


PHOENIX-4685 Properly handle connection caching for Phoenix inside 
RegionServers-addendum(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6f52b737
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6f52b737
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6f52b737

Branch: refs/heads/4.x-HBase-1.1
Commit: 6f52b737d8902800692f62ded13dffa0355465c9
Parents: 9178f56
Author: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Authored: Wed May 9 18:13:38 2018 +0530
Committer: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Committed: Wed May 9 18:13:38 2018 +0530

--
 .../src/main/java/org/apache/phoenix/util/ServerUtil.java | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/6f52b737/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
--
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
index 451bc52..5d2a94c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
@@ -26,6 +26,7 @@ import java.io.IOException;
 import java.sql.SQLException;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutorService;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
@@ -66,8 +67,6 @@ import org.apache.phoenix.hbase.index.write.IndexWriterUtils;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.StaleRegionBoundaryCacheException;
-import org.jboss.netty.util.internal.ConcurrentHashMap;
-
 
 @SuppressWarnings("deprecation")
 public class ServerUtil {



phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu)

2018-05-09 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 d38822ed7 -> 7e5c3871d


PHOENIX-4685 Properly handle connection caching for Phoenix inside 
RegionServers-addendum(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7e5c3871
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7e5c3871
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7e5c3871

Branch: refs/heads/4.x-HBase-1.3
Commit: 7e5c3871dc55e00d800c8a2cf17f44d45cb8a194
Parents: d38822e
Author: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Authored: Wed May 9 18:13:13 2018 +0530
Committer: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Committed: Wed May 9 18:13:13 2018 +0530

--
 .../src/main/java/org/apache/phoenix/util/ServerUtil.java | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/7e5c3871/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
--
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
index d80b6fc..9190373 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
@@ -26,6 +26,7 @@ import java.io.IOException;
 import java.sql.SQLException;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutorService;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
@@ -66,8 +67,6 @@ import org.apache.phoenix.hbase.index.write.IndexWriterUtils;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.StaleRegionBoundaryCacheException;
-import org.jboss.netty.util.internal.ConcurrentHashMap;
-
 
 @SuppressWarnings("deprecation")
 public class ServerUtil {



phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu)

2018-05-09 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/master ea06a10a3 -> 637cedbd4


PHOENIX-4685 Properly handle connection caching for Phoenix inside 
RegionServers-addendum(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/637cedbd
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/637cedbd
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/637cedbd

Branch: refs/heads/master
Commit: 637cedbd40b4eacb2189fc3d88b49671dce44eca
Parents: ea06a10
Author: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Authored: Wed May 9 18:12:42 2018 +0530
Committer: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Committed: Wed May 9 18:12:42 2018 +0530

--
 .../src/main/java/org/apache/phoenix/util/ServerUtil.java | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/637cedbd/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
--
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
index d80b6fc..9190373 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
@@ -26,6 +26,7 @@ import java.io.IOException;
 import java.sql.SQLException;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutorService;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
@@ -66,8 +67,6 @@ import org.apache.phoenix.hbase.index.write.IndexWriterUtils;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.StaleRegionBoundaryCacheException;
-import org.jboss.netty.util.internal.ConcurrentHashMap;
-
 
 @SuppressWarnings("deprecation")
 public class ServerUtil {



phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers(Rajeshbabu)

2018-05-09 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 292ec36a3 -> f186c3080


PHOENIX-4685 Properly handle connection caching for Phoenix inside 
RegionServers(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f186c308
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f186c308
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f186c308

Branch: refs/heads/4.x-HBase-1.2
Commit: f186c3080fef2bbbf4ef6c22028daf6c976c55f4
Parents: 292ec36
Author: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Authored: Wed May 9 18:11:29 2018 +0530
Committer: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Committed: Wed May 9 18:11:29 2018 +0530

--
 .../src/main/java/org/apache/phoenix/util/ServerUtil.java | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f186c308/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
--
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
index d80b6fc..9190373 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
@@ -26,6 +26,7 @@ import java.io.IOException;
 import java.sql.SQLException;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutorService;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
@@ -66,8 +67,6 @@ import org.apache.phoenix.hbase.index.write.IndexWriterUtils;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.StaleRegionBoundaryCacheException;
-import org.jboss.netty.util.internal.ConcurrentHashMap;
-
 
 @SuppressWarnings("deprecation")
 public class ServerUtil {



phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu)

2018-05-08 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 4082c73ee -> d38822ed7


PHOENIX-4685 Properly handle connection caching for Phoenix inside 
RegionServers-addendum(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d38822ed
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d38822ed
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d38822ed

Branch: refs/heads/4.x-HBase-1.3
Commit: d38822ed7cc6a39578a2423556a036c5d48d8540
Parents: 4082c73
Author: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Authored: Tue May 8 20:50:25 2018 +0530
Committer: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Committed: Tue May 8 20:50:25 2018 +0530

--
 phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d38822ed/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
--
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
index 2dab076..d80b6fc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
@@ -27,7 +27,6 @@ import java.sql.SQLException;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.ExecutorService;
-import java.util.function.Function;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 



phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu)

2018-05-08 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 b792c06c5 -> 292ec36a3


PHOENIX-4685 Properly handle connection caching for Phoenix inside 
RegionServers-addendum(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/292ec36a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/292ec36a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/292ec36a

Branch: refs/heads/4.x-HBase-1.2
Commit: 292ec36a3d8e854e6b0375797cf478e241576aeb
Parents: b792c06
Author: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Authored: Tue May 8 20:49:43 2018 +0530
Committer: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Committed: Tue May 8 20:49:43 2018 +0530

--
 phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/292ec36a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
--
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
index 2dab076..d80b6fc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
@@ -27,7 +27,6 @@ import java.sql.SQLException;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.ExecutorService;
-import java.util.function.Function;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 



phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu)

2018-05-08 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.1 35d202019 -> 9178f569c


PHOENIX-4685 Properly handle connection caching for Phoenix inside 
RegionServers-addendum(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9178f569
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9178f569
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9178f569

Branch: refs/heads/4.x-HBase-1.1
Commit: 9178f569ca50a3ec90fffd5fa8de46cda9f5cafc
Parents: 35d2020
Author: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Authored: Tue May 8 20:37:50 2018 +0530
Committer: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Committed: Tue May 8 20:37:50 2018 +0530

--
 phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9178f569/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
--
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
index 5097245..451bc52 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
@@ -27,7 +27,6 @@ import java.sql.SQLException;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.ExecutorService;
-import java.util.function.Function;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 



phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers-addendum(Rajeshbabu)

2018-05-08 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/master 0675fe545 -> ea06a10a3


PHOENIX-4685 Properly handle connection caching for Phoenix inside 
RegionServers-addendum(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ea06a10a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ea06a10a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ea06a10a

Branch: refs/heads/master
Commit: ea06a10a35af8eb0773b077cbbe704275bb7869c
Parents: 0675fe5
Author: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Authored: Tue May 8 20:32:04 2018 +0530
Committer: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Committed: Tue May 8 20:32:04 2018 +0530

--
 .../java/org/apache/phoenix/util/ServerUtil.java  | 18 +-
 1 file changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ea06a10a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
--
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
index 1043fd2..d80b6fc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
@@ -27,7 +27,6 @@ import java.sql.SQLException;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.ExecutorService;
-import java.util.function.Function;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -345,16 +344,17 @@ public class ServerUtil {
 new ConcurrentHashMap<ConnectionType, ClusterConnection>();
 
 public static ClusterConnection getConnection(final ConnectionType 
connectionType, final Configuration conf, final HRegionServer server) throws 
IOException {
-return connections.computeIfAbsent(connectionType, new 
Function<ConnectionType, ClusterConnection>() {
-@Override
-public ClusterConnection apply(ConnectionType t) {
-try {
-return new 
CoprocessorHConnection(getTypeSpecificConfiguration(connectionType, conf), 
server);
-} catch (IOException e) {
-   throw new RuntimeException(e);
+ClusterConnection connection = null;
+if((connection = connections.get(connectionType)) == null) {
+synchronized (CoprocessorHConnectionTableFactory.class) {
+if(connections.get(connectionType) == null) {
+connection = new CoprocessorHConnection(conf, server);
+connections.put(connectionType, connection);
+return connection;
 }
 }
-});
+}
+return connection;
 }
 
 public static Configuration 
getTypeSpecificConfiguration(ConnectionType connectionType, Configuration conf) 
{



phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers(Rajeshbabu)

2018-05-08 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 3b45df999 -> b09f26b73


PHOENIX-4685 Properly handle connection caching for Phoenix inside 
RegionServers(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b09f26b7
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b09f26b7
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b09f26b7

Branch: refs/heads/4.x-HBase-0.98
Commit: b09f26b73e839dbb1adcce4b295afbea63c7c309
Parents: 3b45df9
Author: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Authored: Tue May 8 16:19:16 2018 +0530
Committer: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Committed: Tue May 8 16:19:16 2018 +0530

--
 .../DelegateRegionCoprocessorEnvironment.java   |   7 +-
 .../UngroupedAggregateRegionObserver.java   |  12 +-
 .../org/apache/phoenix/hbase/index/Indexer.java |  19 +--
 .../hbase/index/write/IndexWriterUtils.java |  27 +---
 .../index/PhoenixTransactionalIndexer.java  |  18 +--
 .../org/apache/phoenix/util/ServerUtil.java | 139 ---
 6 files changed, 138 insertions(+), 84 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b09f26b7/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
index a3a8bc3..9701f5e 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
@@ -32,6 +32,7 @@ import 
org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.phoenix.hbase.index.table.HTableFactory;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.util.ServerUtil;
+import org.apache.phoenix.util.ServerUtil.ConnectionType;
 
 /**
  * Class to encapsulate {@link RegionCoprocessorEnvironment} for phoenix 
coprocessors. Often we
@@ -44,10 +45,10 @@ public class DelegateRegionCoprocessorEnvironment 
implements RegionCoprocessorEn
 private RegionCoprocessorEnvironment delegate;
 private HTableFactory tableFactory;
 
-public DelegateRegionCoprocessorEnvironment(Configuration config, 
RegionCoprocessorEnvironment delegate) {
-this.config = config;
+public DelegateRegionCoprocessorEnvironment(RegionCoprocessorEnvironment 
delegate, ConnectionType connectionType) {
+this.config = 
ServerUtil.ConnectionFactory.getTypeSpecificConfiguration(connectionType, 
delegate.getConfiguration());
 this.delegate = delegate;
-this.tableFactory = ServerUtil.getDelegateHTableFactory(this, config);
+this.tableFactory = ServerUtil.getDelegateHTableFactory(this, 
connectionType);
 }
 
 @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b09f26b7/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index f3c7679..e8658a6 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -145,6 +145,7 @@ import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.ServerUtil;
+import org.apache.phoenix.util.ServerUtil.ConnectionType;
 import org.apache.phoenix.util.StringUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -226,14 +227,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 
upsertSelectConfig.setClass(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY,
 InterRegionServerIndexRpcControllerFactory.class, 
RpcControllerFactory.class);
 
-compactionConfig = PropertiesUtil.cloneConfig(e.getConfiguration());
-// lower the number of rpc retries, so we don't hang the compaction
-compactionConfig.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
-
e.getConfiguration().getInt(QueryServices.METADATA_WRITE_RETRIES_NUMBER,
-QueryServicesOptions.DEFAULT_METADATA_WRITE_RETRIES_NUMBER));
-compacti

phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers(Rajeshbabu)

2018-05-08 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 d0f98a020 -> 4082c73ee


PHOENIX-4685 Properly handle connection caching for Phoenix inside 
RegionServers(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4082c73e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4082c73e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4082c73e

Branch: refs/heads/4.x-HBase-1.3
Commit: 4082c73ee23d901642d8c5bc45ececfcf5e50ede
Parents: d0f98a0
Author: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Authored: Tue May 8 12:06:49 2018 +0530
Committer: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Committed: Tue May 8 12:06:49 2018 +0530

--
 .../DelegateRegionCoprocessorEnvironment.java   |   7 +-
 .../UngroupedAggregateRegionObserver.java   |  14 +-
 .../org/apache/phoenix/hbase/index/Indexer.java |  19 +--
 .../hbase/index/write/IndexWriterUtils.java |  27 +---
 .../index/PhoenixTransactionalIndexer.java  |  18 +--
 .../org/apache/phoenix/util/ServerUtil.java | 141 ---
 6 files changed, 142 insertions(+), 84 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/4082c73e/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
index 284d53c..a791f4a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
@@ -32,6 +32,7 @@ import 
org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.phoenix.hbase.index.table.HTableFactory;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.util.ServerUtil;
+import org.apache.phoenix.util.ServerUtil.ConnectionType;
 
 /**
  * Class to encapsulate {@link RegionCoprocessorEnvironment} for phoenix 
coprocessors. Often we
@@ -44,10 +45,10 @@ public class DelegateRegionCoprocessorEnvironment 
implements RegionCoprocessorEn
 private RegionCoprocessorEnvironment delegate;
 private HTableFactory tableFactory;
 
-public DelegateRegionCoprocessorEnvironment(Configuration config, 
RegionCoprocessorEnvironment delegate) {
-this.config = config;
+public DelegateRegionCoprocessorEnvironment(RegionCoprocessorEnvironment 
delegate, ConnectionType connectionType) {
+this.config = 
ServerUtil.ConnectionFactory.getTypeSpecificConfiguration(connectionType, 
delegate.getConfiguration());
 this.delegate = delegate;
-this.tableFactory = ServerUtil.getDelegateHTableFactory(this, config);
+this.tableFactory = ServerUtil.getDelegateHTableFactory(this, 
connectionType);
 }
 
 @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4082c73e/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 6bee65c..14213f4 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -144,6 +144,7 @@ import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.ServerUtil;
+import org.apache.phoenix.util.ServerUtil.ConnectionType;
 import org.apache.phoenix.util.StringUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -225,14 +226,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 
upsertSelectConfig.setClass(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY,
 InterRegionServerIndexRpcControllerFactory.class, 
RpcControllerFactory.class);
 
-compactionConfig = PropertiesUtil.cloneConfig(e.getConfiguration());
-// lower the number of rpc retries, so we don't hang the compaction
-compactionConfig.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
-
e.getConfiguration().getInt(QueryServices.METADATA_WRITE_RETRIES_NUMBER,
-QueryServicesOptions.DEFAULT_METADATA_WRITE_RETRIES_NUMBER));
-compacti

phoenix git commit: PHOENIX-4685 Properly handle connection caching for Phoenix inside RegionServers(Rajeshbabu)

2018-05-07 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.2 6d994b02b -> b792c06c5


PHOENIX-4685 Properly handle connection caching for Phoenix inside 
RegionServers(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b792c06c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b792c06c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b792c06c

Branch: refs/heads/4.x-HBase-1.2
Commit: b792c06c5a3805721b7fb5e635c5cbc2d30d0a12
Parents: 6d994b0
Author: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Authored: Tue May 8 09:27:16 2018 +0530
Committer: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
Committed: Tue May 8 09:27:16 2018 +0530

--
 .../DelegateRegionCoprocessorEnvironment.java   |   7 +-
 .../UngroupedAggregateRegionObserver.java   |  14 +-
 .../org/apache/phoenix/hbase/index/Indexer.java |  19 +--
 .../hbase/index/write/IndexWriterUtils.java |  27 +---
 .../index/PhoenixTransactionalIndexer.java  |  18 +--
 .../org/apache/phoenix/util/ServerUtil.java | 141 ---
 6 files changed, 142 insertions(+), 84 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b792c06c/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
index 284d53c..a791f4a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionCoprocessorEnvironment.java
@@ -32,6 +32,7 @@ import 
org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.phoenix.hbase.index.table.HTableFactory;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.util.ServerUtil;
+import org.apache.phoenix.util.ServerUtil.ConnectionType;
 
 /**
  * Class to encapsulate {@link RegionCoprocessorEnvironment} for phoenix 
coprocessors. Often we
@@ -44,10 +45,10 @@ public class DelegateRegionCoprocessorEnvironment 
implements RegionCoprocessorEn
 private RegionCoprocessorEnvironment delegate;
 private HTableFactory tableFactory;
 
-public DelegateRegionCoprocessorEnvironment(Configuration config, 
RegionCoprocessorEnvironment delegate) {
-this.config = config;
+public DelegateRegionCoprocessorEnvironment(RegionCoprocessorEnvironment 
delegate, ConnectionType connectionType) {
+this.config = 
ServerUtil.ConnectionFactory.getTypeSpecificConfiguration(connectionType, 
delegate.getConfiguration());
 this.delegate = delegate;
-this.tableFactory = ServerUtil.getDelegateHTableFactory(this, config);
+this.tableFactory = ServerUtil.getDelegateHTableFactory(this, 
connectionType);
 }
 
 @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b792c06c/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 6bee65c..14213f4 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -144,6 +144,7 @@ import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.ServerUtil;
+import org.apache.phoenix.util.ServerUtil.ConnectionType;
 import org.apache.phoenix.util.StringUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -225,14 +226,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 
upsertSelectConfig.setClass(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY,
 InterRegionServerIndexRpcControllerFactory.class, 
RpcControllerFactory.class);
 
-compactionConfig = PropertiesUtil.cloneConfig(e.getConfiguration());
-// lower the number of rpc retries, so we don't hang the compaction
-compactionConfig.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
-
e.getConfiguration().getInt(QueryServices.METADATA_WRITE_RETRIES_NUMBER,
-QueryServicesOptions.DEFAULT_METADATA_WRITE_RETRIES_NUMBER));
-compacti

  1   2   3   4   5   6   >