[phoenix] branch master updated: PHOENIX-5403 Optimize metadata cache lookup of global tables using a tenant specific connection

2019-07-31 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 12c538c  PHOENIX-5403 Optimize metadata cache lookup of global tables 
using a tenant specific connection
12c538c is described below

commit 12c538ce2db4779b91bc294ebe8e4880673ff8c2
Author: Thomas D'Silva 
AuthorDate: Mon Jul 22 22:45:04 2019 -0700

PHOENIX-5403 Optimize metadata cache lookup of global tables using a tenant 
specific connection
---
 .../java/org/apache/phoenix/end2end/Array2IT.java  | 17 ++---
 .../org/apache/phoenix/compile/FromCompiler.java   |  6 +-
 .../phoenix/query/ConnectionQueryServicesImpl.java |  7 +-
 .../org/apache/phoenix/schema/MetaDataClient.java  | 74 --
 4 files changed, 39 insertions(+), 65 deletions(-)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/Array2IT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/Array2IT.java
index 52bfb86..0cb60c2 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/Array2IT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/Array2IT.java
@@ -37,6 +37,7 @@ import org.apache.phoenix.schema.types.PhoenixArray;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.StringUtil;
+import org.junit.Ignore;
 import org.junit.Test;
 
 public class Array2IT extends ArrayIT {
@@ -669,27 +670,19 @@ public class Array2IT extends ArrayIT {
 
 }
 
-@Test
+@Test // see PHOENIX-5416
+@Ignore
 public void testArrayRefToLiteral() throws Exception {
-Connection conn;
-
 Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-conn = DriverManager.getConnection(getUrl(), props);
-try {
-PreparedStatement stmt = conn.prepareStatement("select ?[2] from 
\"SYSTEM\".\"catalog\" limit 1");
+try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+PreparedStatement stmt = conn.prepareStatement("select ?[2] from 
\"SYSTEM\".\"CATALOG\" limit 1");
 Array array = conn.createArrayOf("CHAR", new String[] 
{"a","b","c"});
 stmt.setArray(1, array);
 ResultSet rs = stmt.executeQuery();
 assertTrue(rs.next());
 assertEquals("b", rs.getString(1));
 assertFalse(rs.next());
-} catch (SQLException e) {
-} finally {
-if (conn != null) {
-conn.close();
-}
 }
-
 }
 
 @Test
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
index ce0c3a1..a1ee0bf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
@@ -174,11 +174,7 @@ public class FromCompiler {
 NamedTableNode tableNode = NamedTableNode.create(null, baseTable, 
Collections.emptyList());
 // Always use non-tenant-specific connection here
 try {
-// We need to always get the latest meta data for the parent table 
of a create view call to ensure that
-// that we're copying the current table meta data as of when the 
view is created. Once we no longer
-// copy the parent meta data, but store only the local diffs 
(PHOENIX-3534), we will no longer need
-// to do this.
-SingleTableColumnResolver visitor = new 
SingleTableColumnResolver(connection, tableNode, true, true);
+SingleTableColumnResolver visitor = new 
SingleTableColumnResolver(connection, tableNode, true);
 return visitor;
 } catch (TableNotFoundException e) {
 // Used for mapped VIEW, since we won't be able to resolve that.
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index fc761dd..4112984 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -228,6 +228,7 @@ import org.apache.phoenix.schema.PSynchronizedMetaData;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableImpl;
 import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.schema.PTableRef;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.ReadOnlyTableException;
 import org.apache.phoenix.schema.SaltingUtil;
@@ -271,6 +272,7 @@ import org.apache.phoenix.util

[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5403 Optimize metadata cache lookup of global tables using a tenant specific connection

2019-07-31 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new b25dcce  PHOENIX-5403 Optimize metadata cache lookup of global tables 
using a tenant specific connection
b25dcce is described below

commit b25dcce13fdd2c31cceaf222e74c8380037640dc
Author: Thomas D'Silva 
AuthorDate: Mon Jul 22 22:45:04 2019 -0700

PHOENIX-5403 Optimize metadata cache lookup of global tables using a tenant 
specific connection
---
 .../java/org/apache/phoenix/end2end/Array2IT.java  | 17 ++---
 .../org/apache/phoenix/compile/FromCompiler.java   |  6 +-
 .../phoenix/query/ConnectionQueryServicesImpl.java |  7 +-
 .../org/apache/phoenix/schema/MetaDataClient.java  | 74 --
 4 files changed, 39 insertions(+), 65 deletions(-)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/Array2IT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/Array2IT.java
index 52bfb86..0cb60c2 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/Array2IT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/Array2IT.java
@@ -37,6 +37,7 @@ import org.apache.phoenix.schema.types.PhoenixArray;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.StringUtil;
+import org.junit.Ignore;
 import org.junit.Test;
 
 public class Array2IT extends ArrayIT {
@@ -669,27 +670,19 @@ public class Array2IT extends ArrayIT {
 
 }
 
-@Test
+@Test // see PHOENIX-5416
+@Ignore
 public void testArrayRefToLiteral() throws Exception {
-Connection conn;
-
 Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-conn = DriverManager.getConnection(getUrl(), props);
-try {
-PreparedStatement stmt = conn.prepareStatement("select ?[2] from 
\"SYSTEM\".\"catalog\" limit 1");
+try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+PreparedStatement stmt = conn.prepareStatement("select ?[2] from 
\"SYSTEM\".\"CATALOG\" limit 1");
 Array array = conn.createArrayOf("CHAR", new String[] 
{"a","b","c"});
 stmt.setArray(1, array);
 ResultSet rs = stmt.executeQuery();
 assertTrue(rs.next());
 assertEquals("b", rs.getString(1));
 assertFalse(rs.next());
-} catch (SQLException e) {
-} finally {
-if (conn != null) {
-conn.close();
-}
 }
-
 }
 
 @Test
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
index 97ac0f6..0bdc748 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
@@ -174,11 +174,7 @@ public class FromCompiler {
 NamedTableNode tableNode = NamedTableNode.create(null, baseTable, 
Collections.emptyList());
 // Always use non-tenant-specific connection here
 try {
-// We need to always get the latest meta data for the parent table 
of a create view call to ensure that
-// that we're copying the current table meta data as of when the 
view is created. Once we no longer
-// copy the parent meta data, but store only the local diffs 
(PHOENIX-3534), we will no longer need
-// to do this.
-SingleTableColumnResolver visitor = new 
SingleTableColumnResolver(connection, tableNode, true, true);
+SingleTableColumnResolver visitor = new 
SingleTableColumnResolver(connection, tableNode, true);
 return visitor;
 } catch (TableNotFoundException e) {
 // Used for mapped VIEW, since we won't be able to resolve that.
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index bd3f2a9..d5a08bc 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -223,6 +223,7 @@ import org.apache.phoenix.schema.PSynchronizedMetaData;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableImpl;
 import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.schema.PTableRef;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.ReadOnlyTableException;
 import org.apache.phoenix.schema.SaltingUtil;
@@ -266,6 +267,7 @@ import org.apache.phoenix.util

[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5403 Optimize metadata cache lookup of global tables using a tenant specific connection

2019-07-31 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new ded4fa1  PHOENIX-5403 Optimize metadata cache lookup of global tables 
using a tenant specific connection
ded4fa1 is described below

commit ded4fa175f23c0c481c39edcd100b334f3661b7c
Author: Thomas D'Silva 
AuthorDate: Mon Jul 22 22:45:04 2019 -0700

PHOENIX-5403 Optimize metadata cache lookup of global tables using a tenant 
specific connection
---
 .../java/org/apache/phoenix/end2end/Array2IT.java  | 17 ++---
 .../org/apache/phoenix/compile/FromCompiler.java   |  6 +-
 .../phoenix/query/ConnectionQueryServicesImpl.java |  7 +-
 .../org/apache/phoenix/schema/MetaDataClient.java  | 74 --
 4 files changed, 39 insertions(+), 65 deletions(-)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/Array2IT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/Array2IT.java
index 52bfb86..0cb60c2 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/Array2IT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/Array2IT.java
@@ -37,6 +37,7 @@ import org.apache.phoenix.schema.types.PhoenixArray;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.StringUtil;
+import org.junit.Ignore;
 import org.junit.Test;
 
 public class Array2IT extends ArrayIT {
@@ -669,27 +670,19 @@ public class Array2IT extends ArrayIT {
 
 }
 
-@Test
+@Test // see PHOENIX-5416
+@Ignore
 public void testArrayRefToLiteral() throws Exception {
-Connection conn;
-
 Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-conn = DriverManager.getConnection(getUrl(), props);
-try {
-PreparedStatement stmt = conn.prepareStatement("select ?[2] from 
\"SYSTEM\".\"catalog\" limit 1");
+try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+PreparedStatement stmt = conn.prepareStatement("select ?[2] from 
\"SYSTEM\".\"CATALOG\" limit 1");
 Array array = conn.createArrayOf("CHAR", new String[] 
{"a","b","c"});
 stmt.setArray(1, array);
 ResultSet rs = stmt.executeQuery();
 assertTrue(rs.next());
 assertEquals("b", rs.getString(1));
 assertFalse(rs.next());
-} catch (SQLException e) {
-} finally {
-if (conn != null) {
-conn.close();
-}
 }
-
 }
 
 @Test
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
index 97ac0f6..0bdc748 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
@@ -174,11 +174,7 @@ public class FromCompiler {
 NamedTableNode tableNode = NamedTableNode.create(null, baseTable, 
Collections.emptyList());
 // Always use non-tenant-specific connection here
 try {
-// We need to always get the latest meta data for the parent table 
of a create view call to ensure that
-// that we're copying the current table meta data as of when the 
view is created. Once we no longer
-// copy the parent meta data, but store only the local diffs 
(PHOENIX-3534), we will no longer need
-// to do this.
-SingleTableColumnResolver visitor = new 
SingleTableColumnResolver(connection, tableNode, true, true);
+SingleTableColumnResolver visitor = new 
SingleTableColumnResolver(connection, tableNode, true);
 return visitor;
 } catch (TableNotFoundException e) {
 // Used for mapped VIEW, since we won't be able to resolve that.
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index bd3f2a9..d5a08bc 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -223,6 +223,7 @@ import org.apache.phoenix.schema.PSynchronizedMetaData;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableImpl;
 import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.schema.PTableRef;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.ReadOnlyTableException;
 import org.apache.phoenix.schema.SaltingUtil;
@@ -266,6 +267,7 @@ import org.apache.phoenix.util

[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5403 Optimize metadata cache lookup of global tables using a tenant specific connection

2019-07-31 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 3c209d2  PHOENIX-5403 Optimize metadata cache lookup of global tables 
using a tenant specific connection
3c209d2 is described below

commit 3c209d2f53738aa7458bfffae7110a204cac064e
Author: Thomas D'Silva 
AuthorDate: Mon Jul 22 22:45:04 2019 -0700

PHOENIX-5403 Optimize metadata cache lookup of global tables using a tenant 
specific connection
---
 .../java/org/apache/phoenix/end2end/Array2IT.java  | 17 ++---
 .../org/apache/phoenix/compile/FromCompiler.java   |  6 +-
 .../phoenix/query/ConnectionQueryServicesImpl.java |  7 +-
 .../org/apache/phoenix/schema/MetaDataClient.java  | 74 --
 4 files changed, 39 insertions(+), 65 deletions(-)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/Array2IT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/Array2IT.java
index 52bfb86..0cb60c2 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/Array2IT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/Array2IT.java
@@ -37,6 +37,7 @@ import org.apache.phoenix.schema.types.PhoenixArray;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.StringUtil;
+import org.junit.Ignore;
 import org.junit.Test;
 
 public class Array2IT extends ArrayIT {
@@ -669,27 +670,19 @@ public class Array2IT extends ArrayIT {
 
 }
 
-@Test
+@Test // see PHOENIX-5416
+@Ignore
 public void testArrayRefToLiteral() throws Exception {
-Connection conn;
-
 Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-conn = DriverManager.getConnection(getUrl(), props);
-try {
-PreparedStatement stmt = conn.prepareStatement("select ?[2] from 
\"SYSTEM\".\"catalog\" limit 1");
+try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+PreparedStatement stmt = conn.prepareStatement("select ?[2] from 
\"SYSTEM\".\"CATALOG\" limit 1");
 Array array = conn.createArrayOf("CHAR", new String[] 
{"a","b","c"});
 stmt.setArray(1, array);
 ResultSet rs = stmt.executeQuery();
 assertTrue(rs.next());
 assertEquals("b", rs.getString(1));
 assertFalse(rs.next());
-} catch (SQLException e) {
-} finally {
-if (conn != null) {
-conn.close();
-}
 }
-
 }
 
 @Test
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
index 97ac0f6..0bdc748 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
@@ -174,11 +174,7 @@ public class FromCompiler {
 NamedTableNode tableNode = NamedTableNode.create(null, baseTable, 
Collections.emptyList());
 // Always use non-tenant-specific connection here
 try {
-// We need to always get the latest meta data for the parent table 
of a create view call to ensure that
-// that we're copying the current table meta data as of when the 
view is created. Once we no longer
-// copy the parent meta data, but store only the local diffs 
(PHOENIX-3534), we will no longer need
-// to do this.
-SingleTableColumnResolver visitor = new 
SingleTableColumnResolver(connection, tableNode, true, true);
+SingleTableColumnResolver visitor = new 
SingleTableColumnResolver(connection, tableNode, true);
 return visitor;
 } catch (TableNotFoundException e) {
 // Used for mapped VIEW, since we won't be able to resolve that.
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index bd3f2a9..d5a08bc 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -223,6 +223,7 @@ import org.apache.phoenix.schema.PSynchronizedMetaData;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableImpl;
 import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.schema.PTableRef;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.ReadOnlyTableException;
 import org.apache.phoenix.schema.SaltingUtil;
@@ -266,6 +267,7 @@ import org.apache.phoenix.util

[phoenix] branch master updated: PHOENIX-5104: breaks client backwards compatibility

2019-07-29 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new b09d544  PHOENIX-5104: breaks client backwards compatibility
b09d544 is described below

commit b09d54417b21cd8a5d996c79b6dbb1480d4699e5
Author: Mehdi Salarkia 
AuthorDate: Sun Jul 14 22:04:14 2019 -0700

PHOENIX-5104: breaks client backwards compatibility

Fixing by adding a new connection property 
"phoenix.index.longViewIndex.enabled" which is by default false.
Clients can set this property to true to be able to use longViewIndexes.
---
 phoenix-core/src/it/resources/hbase-site.xml  |  9 +
 .../main/java/org/apache/phoenix/query/QueryServices.java |  2 ++
 .../org/apache/phoenix/query/QueryServicesOptions.java|  1 +
 .../java/org/apache/phoenix/schema/MetaDataClient.java| 15 ++-
 4 files changed, 26 insertions(+), 1 deletion(-)

diff --git a/phoenix-core/src/it/resources/hbase-site.xml 
b/phoenix-core/src/it/resources/hbase-site.xml
index 326ef70..7ca33a1 100644
--- a/phoenix-core/src/it/resources/hbase-site.xml
+++ b/phoenix-core/src/it/resources/hbase-site.xml
@@ -37,4 +37,13 @@
 hbase.localcluster.assign.random.ports
 true
   
+  
+
+phoenix.index.longViewIndex.enabled
+true
+  
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index e109293..1302760 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -352,6 +352,8 @@ public interface QueryServices extends SQLCloseable {
 public static final String GLOBAL_INDEX_ROW_REPAIR_COUNT_ATTRIB = 
"phoenix.global.index.row.repair.count.ms";
 // Enable the IndexRegionObserver Coprocessor
 public static final String INDEX_REGION_OBSERVER_ENABLED_ATTRIB = 
"phoenix.index.region.observer.enabled";
+// Enable support for long view index(default is false)
+public static final String LONG_VIEW_INDEX_ENABLED_ATTRIB = 
"phoenix.index.longViewIndex.enabled";
 
 
 // Before 4.15 when we created a view we included the parent table column 
metadata in the view
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 40876d0..fb0eb9b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -394,6 +394,7 @@ public class QueryServicesOptions {
 
 public static final String DEFAULT_GUIDE_POSTS_CACHE_FACTORY_CLASS = 
"org.apache.phoenix.query.DefaultGuidePostsCacheFactory";
 
+public static final boolean DEFAULT_LONG_VIEW_INDEX_ENABLED = false;
 private final Configuration config;
 
 private QueryServicesOptions(Configuration config) {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index f63f815..9c4ee2c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -97,6 +97,7 @@ import static 
org.apache.phoenix.query.QueryConstants.BASE_TABLE_BASE_COLUMN_COU
 import static org.apache.phoenix.query.QueryConstants.DEFAULT_COLUMN_FAMILY;
 import static 
org.apache.phoenix.query.QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE;
 import static org.apache.phoenix.query.QueryServices.DROP_METADATA_ATTRIB;
+import static 
org.apache.phoenix.query.QueryServices.LONG_VIEW_INDEX_ENABLED_ATTRIB;
 import static 
org.apache.phoenix.query.QueryServicesOptions.DEFAULT_DROP_METADATA;
 import static 
org.apache.phoenix.query.QueryServicesOptions.DEFAULT_RUN_UPDATE_STATS_ASYNC;
 import static org.apache.phoenix.schema.PTable.EncodedCQCounter.NULL_COUNTER;
@@ -1427,6 +1428,18 @@ public class MetaDataClient {
 }
 
 /**
+ * Supprort long viewIndexId only if client has explicitly set
+ * the QueryServices.LONG_VIEW_INDEX_ENABLED_ATTRIB connection property to 
'true'.
+ * @return
+ */
+private PDataType getViewIndexDataType() throws SQLException {
+boolean supportsLongViewIndexId = 
connection.getQueryServices().getProps().getBoolean(
+QueryServices.LONG_VIEW_INDEX_ENABLED_ATTRIB,
+
QueryServicesOptions.DEFAULT_LONG_VIEW_INDEX_ENABLED);
+return supportsLongViewIndexId ? MetaDataUtil.getViewIndexIdDataType() 
: MetaDataUtil.getLegacyViewIndexIdDataType();
+}
+
+/**
  * Create a

[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5104: breaks client backwards compatibility

2019-07-29 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 135202d  PHOENIX-5104: breaks client backwards compatibility
135202d is described below

commit 135202d0cde7746ca1e4798d662f1cdb89b3a76b
Author: Mehdi Salarkia 
AuthorDate: Sun Jul 28 18:40:55 2019 -0700

PHOENIX-5104: breaks client backwards compatibility
---
 phoenix-core/src/it/resources/hbase-site.xml  |  9 +
 .../main/java/org/apache/phoenix/query/QueryServices.java |  2 ++
 .../org/apache/phoenix/query/QueryServicesOptions.java|  1 +
 .../java/org/apache/phoenix/schema/MetaDataClient.java| 15 ++-
 4 files changed, 26 insertions(+), 1 deletion(-)

diff --git a/phoenix-core/src/it/resources/hbase-site.xml 
b/phoenix-core/src/it/resources/hbase-site.xml
index 691b702..ba45107 100644
--- a/phoenix-core/src/it/resources/hbase-site.xml
+++ b/phoenix-core/src/it/resources/hbase-site.xml
@@ -33,4 +33,13 @@
 version is X.X.X-SNAPSHOT"
 
   
+  
+
+phoenix.index.longViewIndex.enabled
+true
+  
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index e109293..1302760 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -352,6 +352,8 @@ public interface QueryServices extends SQLCloseable {
 public static final String GLOBAL_INDEX_ROW_REPAIR_COUNT_ATTRIB = 
"phoenix.global.index.row.repair.count.ms";
 // Enable the IndexRegionObserver Coprocessor
 public static final String INDEX_REGION_OBSERVER_ENABLED_ATTRIB = 
"phoenix.index.region.observer.enabled";
+// Enable support for long view index(default is false)
+public static final String LONG_VIEW_INDEX_ENABLED_ATTRIB = 
"phoenix.index.longViewIndex.enabled";
 
 
 // Before 4.15 when we created a view we included the parent table column 
metadata in the view
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index d67cb11..be68134 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -395,6 +395,7 @@ public class QueryServicesOptions {
 
 public static final String DEFAULT_GUIDE_POSTS_CACHE_FACTORY_CLASS = 
"org.apache.phoenix.query.DefaultGuidePostsCacheFactory";
 
+public static final boolean DEFAULT_LONG_VIEW_INDEX_ENABLED = false;
 private final Configuration config;
 
 private QueryServicesOptions(Configuration config) {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index f4c442d..4d56e38 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -97,6 +97,7 @@ import static 
org.apache.phoenix.query.QueryConstants.BASE_TABLE_BASE_COLUMN_COU
 import static org.apache.phoenix.query.QueryConstants.DEFAULT_COLUMN_FAMILY;
 import static 
org.apache.phoenix.query.QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE;
 import static org.apache.phoenix.query.QueryServices.DROP_METADATA_ATTRIB;
+import static 
org.apache.phoenix.query.QueryServices.LONG_VIEW_INDEX_ENABLED_ATTRIB;
 import static 
org.apache.phoenix.query.QueryServicesOptions.DEFAULT_DROP_METADATA;
 import static 
org.apache.phoenix.query.QueryServicesOptions.DEFAULT_RUN_UPDATE_STATS_ASYNC;
 import static org.apache.phoenix.schema.PTable.EncodedCQCounter.NULL_COUNTER;
@@ -1426,6 +1427,18 @@ public class MetaDataClient {
 }
 
 /**
+ * Supprort long viewIndexId only if client has explicitly set
+ * the QueryServices.LONG_VIEW_INDEX_ENABLED_ATTRIB connection property to 
'true'.
+ * @return
+ */
+private PDataType getViewIndexDataType() throws SQLException {
+boolean supportsLongViewIndexId = 
connection.getQueryServices().getProps().getBoolean(
+QueryServices.LONG_VIEW_INDEX_ENABLED_ATTRIB,
+
QueryServicesOptions.DEFAULT_LONG_VIEW_INDEX_ENABLED);
+return supportsLongViewIndexId ? MetaDataUtil.getViewIndexIdDataType() 
: MetaDataUtil.getLegacyViewIndexIdDataType();
+}
+
+/**
  * Create an index table by morphing the CreateIndexStatement into a 
CreateTableStatement and calling
  * MetaDataClient.createTable. In doing so, we perform the following 
translations:
  * 1) Change the type of any colu

[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5104: breaks client backwards compatibility

2019-07-29 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 8aad8d1  PHOENIX-5104: breaks client backwards compatibility
8aad8d1 is described below

commit 8aad8d15be072bb85f8b19b494ad09463b4bbaf2
Author: Mehdi Salarkia 
AuthorDate: Sun Jul 28 18:40:55 2019 -0700

PHOENIX-5104: breaks client backwards compatibility
---
 phoenix-core/src/it/resources/hbase-site.xml  |  9 +
 .../main/java/org/apache/phoenix/query/QueryServices.java |  2 ++
 .../org/apache/phoenix/query/QueryServicesOptions.java|  1 +
 .../java/org/apache/phoenix/schema/MetaDataClient.java| 15 ++-
 4 files changed, 26 insertions(+), 1 deletion(-)

diff --git a/phoenix-core/src/it/resources/hbase-site.xml 
b/phoenix-core/src/it/resources/hbase-site.xml
index 691b702..ba45107 100644
--- a/phoenix-core/src/it/resources/hbase-site.xml
+++ b/phoenix-core/src/it/resources/hbase-site.xml
@@ -33,4 +33,13 @@
 version is X.X.X-SNAPSHOT"
 
   
+  
+
+phoenix.index.longViewIndex.enabled
+true
+  
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index 9bdd22f..0a284f3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -352,6 +352,8 @@ public interface QueryServices extends SQLCloseable {
 public static final String GLOBAL_INDEX_ROW_REPAIR_COUNT_ATTRIB = 
"phoenix.global.index.row.repair.count.ms";
 // Enable the IndexRegionObserver Coprocessor
 public static final String INDEX_REGION_OBSERVER_ENABLED_ATTRIB = 
"phoenix.index.region.observer.enabled";
+// Enable support for long view index(default is false)
+public static final String LONG_VIEW_INDEX_ENABLED_ATTRIB = 
"phoenix.index.longViewIndex.enabled";
 
 
 // Before 4.15 when we created a view we included the parent table column 
metadata in the view
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 144d388..0ccd0c6 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -393,6 +393,7 @@ public class QueryServicesOptions {
 
 public static final String DEFAULT_GUIDE_POSTS_CACHE_FACTORY_CLASS = 
"org.apache.phoenix.query.DefaultGuidePostsCacheFactory";
 
+public static final boolean DEFAULT_LONG_VIEW_INDEX_ENABLED = false;
 private final Configuration config;
 
 private QueryServicesOptions(Configuration config) {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index b1fffcc..8291edd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -98,6 +98,7 @@ import static 
org.apache.phoenix.query.QueryConstants.BASE_TABLE_BASE_COLUMN_COU
 import static org.apache.phoenix.query.QueryConstants.DEFAULT_COLUMN_FAMILY;
 import static 
org.apache.phoenix.query.QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE;
 import static org.apache.phoenix.query.QueryServices.DROP_METADATA_ATTRIB;
+import static 
org.apache.phoenix.query.QueryServices.LONG_VIEW_INDEX_ENABLED_ATTRIB;
 import static 
org.apache.phoenix.query.QueryServicesOptions.DEFAULT_DROP_METADATA;
 import static 
org.apache.phoenix.query.QueryServicesOptions.DEFAULT_RUN_UPDATE_STATS_ASYNC;
 import static org.apache.phoenix.schema.PTable.EncodedCQCounter.NULL_COUNTER;
@@ -1440,6 +1441,18 @@ public class MetaDataClient {
 }
 
 /**
+ * Supprort long viewIndexId only if client has explicitly set
+ * the QueryServices.LONG_VIEW_INDEX_ENABLED_ATTRIB connection property to 
'true'.
+ * @return
+ */
+private PDataType getViewIndexDataType() throws SQLException {
+boolean supportsLongViewIndexId = 
connection.getQueryServices().getProps().getBoolean(
+QueryServices.LONG_VIEW_INDEX_ENABLED_ATTRIB,
+
QueryServicesOptions.DEFAULT_LONG_VIEW_INDEX_ENABLED);
+return supportsLongViewIndexId ? MetaDataUtil.getViewIndexIdDataType() 
: MetaDataUtil.getLegacyViewIndexIdDataType();
+}
+
+/**
  * Create an index table by morphing the CreateIndexStatement into a 
CreateTableStatement and calling
  * MetaDataClient.createTable. In doing so, we perform the following 
translations:
  * 1) Change the type of any colu

[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5104: breaks client backwards compatibility

2019-07-29 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new 4931f5b  PHOENIX-5104: breaks client backwards compatibility
4931f5b is described below

commit 4931f5bb6644107150b677f36e0644b52b542c0a
Author: Mehdi Salarkia 
AuthorDate: Sun Jul 28 18:40:55 2019 -0700

PHOENIX-5104: breaks client backwards compatibility
---
 phoenix-core/src/it/resources/hbase-site.xml  |  9 +
 .../main/java/org/apache/phoenix/query/QueryServices.java |  2 ++
 .../org/apache/phoenix/query/QueryServicesOptions.java|  1 +
 .../java/org/apache/phoenix/schema/MetaDataClient.java| 15 ++-
 4 files changed, 26 insertions(+), 1 deletion(-)

diff --git a/phoenix-core/src/it/resources/hbase-site.xml 
b/phoenix-core/src/it/resources/hbase-site.xml
index 691b702..ba45107 100644
--- a/phoenix-core/src/it/resources/hbase-site.xml
+++ b/phoenix-core/src/it/resources/hbase-site.xml
@@ -33,4 +33,13 @@
 version is X.X.X-SNAPSHOT"
 
   
+  
+
+phoenix.index.longViewIndex.enabled
+true
+  
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index e109293..1302760 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -352,6 +352,8 @@ public interface QueryServices extends SQLCloseable {
 public static final String GLOBAL_INDEX_ROW_REPAIR_COUNT_ATTRIB = 
"phoenix.global.index.row.repair.count.ms";
 // Enable the IndexRegionObserver Coprocessor
 public static final String INDEX_REGION_OBSERVER_ENABLED_ATTRIB = 
"phoenix.index.region.observer.enabled";
+// Enable support for long view index(default is false)
+public static final String LONG_VIEW_INDEX_ENABLED_ATTRIB = 
"phoenix.index.longViewIndex.enabled";
 
 
 // Before 4.15 when we created a view we included the parent table column 
metadata in the view
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index d67cb11..be68134 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -395,6 +395,7 @@ public class QueryServicesOptions {
 
 public static final String DEFAULT_GUIDE_POSTS_CACHE_FACTORY_CLASS = 
"org.apache.phoenix.query.DefaultGuidePostsCacheFactory";
 
+public static final boolean DEFAULT_LONG_VIEW_INDEX_ENABLED = false;
 private final Configuration config;
 
 private QueryServicesOptions(Configuration config) {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index f4c442d..4d56e38 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -97,6 +97,7 @@ import static 
org.apache.phoenix.query.QueryConstants.BASE_TABLE_BASE_COLUMN_COU
 import static org.apache.phoenix.query.QueryConstants.DEFAULT_COLUMN_FAMILY;
 import static 
org.apache.phoenix.query.QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE;
 import static org.apache.phoenix.query.QueryServices.DROP_METADATA_ATTRIB;
+import static 
org.apache.phoenix.query.QueryServices.LONG_VIEW_INDEX_ENABLED_ATTRIB;
 import static 
org.apache.phoenix.query.QueryServicesOptions.DEFAULT_DROP_METADATA;
 import static 
org.apache.phoenix.query.QueryServicesOptions.DEFAULT_RUN_UPDATE_STATS_ASYNC;
 import static org.apache.phoenix.schema.PTable.EncodedCQCounter.NULL_COUNTER;
@@ -1426,6 +1427,18 @@ public class MetaDataClient {
 }
 
 /**
+ * Supprort long viewIndexId only if client has explicitly set
+ * the QueryServices.LONG_VIEW_INDEX_ENABLED_ATTRIB connection property to 
'true'.
+ * @return
+ */
+private PDataType getViewIndexDataType() throws SQLException {
+boolean supportsLongViewIndexId = 
connection.getQueryServices().getProps().getBoolean(
+QueryServices.LONG_VIEW_INDEX_ENABLED_ATTRIB,
+
QueryServicesOptions.DEFAULT_LONG_VIEW_INDEX_ENABLED);
+return supportsLongViewIndexId ? MetaDataUtil.getViewIndexIdDataType() 
: MetaDataUtil.getLegacyViewIndexIdDataType();
+}
+
+/**
  * Create an index table by morphing the CreateIndexStatement into a 
CreateTableStatement and calling
  * MetaDataClient.createTable. In doing so, we perform the following 
translations:
  * 1) Change the type of any colu

[phoenix] branch master updated: PHOENIX-4893 Move parent column combining logic of view and view indexes from server to client (addendum)

2019-07-22 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 62387ee  PHOENIX-4893 Move parent column combining logic of view and 
view indexes from server to client (addendum)
62387ee is described below

commit 62387ee3c55f8be1947161bc9d501b1867cc24f1
Author: Thomas D'Silva 
AuthorDate: Mon Jul 22 22:41:47 2019 -0700

PHOENIX-4893 Move parent column combining logic of view and view indexes 
from server to client (addendum)
---
 .../org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java   | 10 ++
 1 file changed, 10 insertions(+)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 6457185..9c2e05f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -2640,6 +2640,16 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements RegionCopr
 }
 } finally {
 ServerUtil.releaseRowLocks(locks);
+// drop indexes on views that require the column being dropped
+// these could be on a different region server so don't hold 
row locks while dropping them
+for (Pair pair : 
mutator.getTableAndDroppedColumnPairs()) {
+result = dropRemoteIndexes(env, pair.getFirst(), 
clientTimeStamp, pair.getSecond(),
+tableNamesToDelete, sharedTablesToDelete);
+if (result != null
+&& result.getMutationCode() != 
MutationCode.TABLE_ALREADY_EXISTS) {
+return result;
+}
+}
 }
 } catch (Throwable t) {
 ServerUtil.throwIOException(fullTableName, t);



[phoenix] branch master updated: PHOENIX-5275: Remove accidental imports from curator-client-2.12.0

2019-07-17 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new edaf082  PHOENIX-5275: Remove accidental imports from 
curator-client-2.12.0
edaf082 is described below

commit edaf0822ce675c45871c62f7e9d318c3ca691877
Author: William Shen 
AuthorDate: Thu May 16 11:18:35 2019 -0700

PHOENIX-5275: Remove accidental imports from curator-client-2.12.0
---
 phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java | 2 +-
 .../src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java   | 2 +-
 .../src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java  | 3 +--
 3 files changed, 3 insertions(+), 4 deletions(-)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
index d22bc54..facab6c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
@@ -41,7 +41,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import com.google.common.collect.Lists;
-import org.apache.curator.shaded.com.google.common.collect.Sets;
+import com.google.common.collect.Sets;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
index f2d5cfe..f3a8fa1 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
@@ -50,7 +50,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Properties;
 
-import org.apache.curator.shaded.com.google.common.collect.Lists;
+import com.google.common.collect.Lists;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
index df9d843..8bef7c9 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
@@ -20,11 +20,10 @@ package org.apache.phoenix.query;
 import static 
org.apache.phoenix.query.QueryServicesOptions.DEFAULT_SPOOL_DIRECTORY;
 import static org.apache.phoenix.query.QueryServicesOptions.withDefaults;
 
-import org.apache.curator.shaded.com.google.common.io.Files;
+import com.google.common.io.Files;
 import org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.ReadOnlyProps;
-import org.apache.phoenix.util.TestUtil;
 import org.apache.tephra.TxConstants;
 
 



[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5275: Remove accidental imports from curator-client-2.12.0

2019-07-17 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new 1e284e4  PHOENIX-5275: Remove accidental imports from 
curator-client-2.12.0
1e284e4 is described below

commit 1e284e43128a31d26ab44ddffa7efb553aad9246
Author: William Shen 
AuthorDate: Tue Jul 16 10:36:41 2019 -0700

PHOENIX-5275: Remove accidental imports from curator-client-2.12.0
---
 phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java | 2 +-
 .../src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java   | 2 +-
 .../src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java  | 3 +--
 3 files changed, 3 insertions(+), 4 deletions(-)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
index 3f9948b..98ea0fc 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
@@ -41,7 +41,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import com.google.common.collect.Lists;
-import org.apache.curator.shaded.com.google.common.collect.Sets;
+import com.google.common.collect.Sets;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
 import org.apache.hadoop.hbase.util.Bytes;
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
index f2d5cfe..f3a8fa1 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
@@ -50,7 +50,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Properties;
 
-import org.apache.curator.shaded.com.google.common.collect.Lists;
+import com.google.common.collect.Lists;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
index df9d843..8bef7c9 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
@@ -20,11 +20,10 @@ package org.apache.phoenix.query;
 import static 
org.apache.phoenix.query.QueryServicesOptions.DEFAULT_SPOOL_DIRECTORY;
 import static org.apache.phoenix.query.QueryServicesOptions.withDefaults;
 
-import org.apache.curator.shaded.com.google.common.io.Files;
+import com.google.common.io.Files;
 import org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.ReadOnlyProps;
-import org.apache.phoenix.util.TestUtil;
 import org.apache.tephra.TxConstants;
 
 



[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5275: Remove accidental imports from curator-client-2.12.0

2019-07-17 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 73edce0  PHOENIX-5275: Remove accidental imports from 
curator-client-2.12.0
73edce0 is described below

commit 73edce02916d9e1e8d9ab51c26b0f4f79729e44d
Author: William Shen 
AuthorDate: Tue Jul 16 10:36:41 2019 -0700

PHOENIX-5275: Remove accidental imports from curator-client-2.12.0
---
 phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java | 2 +-
 .../src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java   | 2 +-
 .../src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java  | 3 +--
 3 files changed, 3 insertions(+), 4 deletions(-)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
index 3f9948b..98ea0fc 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
@@ -41,7 +41,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import com.google.common.collect.Lists;
-import org.apache.curator.shaded.com.google.common.collect.Sets;
+import com.google.common.collect.Sets;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
 import org.apache.hadoop.hbase.util.Bytes;
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
index f2d5cfe..f3a8fa1 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
@@ -50,7 +50,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Properties;
 
-import org.apache.curator.shaded.com.google.common.collect.Lists;
+import com.google.common.collect.Lists;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
index df9d843..8bef7c9 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
@@ -20,11 +20,10 @@ package org.apache.phoenix.query;
 import static 
org.apache.phoenix.query.QueryServicesOptions.DEFAULT_SPOOL_DIRECTORY;
 import static org.apache.phoenix.query.QueryServicesOptions.withDefaults;
 
-import org.apache.curator.shaded.com.google.common.io.Files;
+import com.google.common.io.Files;
 import org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.ReadOnlyProps;
-import org.apache.phoenix.util.TestUtil;
 import org.apache.tephra.TxConstants;
 
 



[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5275: Remove accidental imports from curator-client-2.12.0

2019-07-17 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 3f4a72d  PHOENIX-5275: Remove accidental imports from 
curator-client-2.12.0
3f4a72d is described below

commit 3f4a72ddf05e06b277c986d0bda96308ba7c22d2
Author: William Shen 
AuthorDate: Tue Jul 16 10:36:41 2019 -0700

PHOENIX-5275: Remove accidental imports from curator-client-2.12.0
---
 phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java | 2 +-
 .../src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java   | 2 +-
 .../src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java  | 3 +--
 3 files changed, 3 insertions(+), 4 deletions(-)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
index 3f9948b..98ea0fc 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
@@ -41,7 +41,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import com.google.common.collect.Lists;
-import org.apache.curator.shaded.com.google.common.collect.Sets;
+import com.google.common.collect.Sets;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
 import org.apache.hadoop.hbase.util.Bytes;
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
index f2d5cfe..f3a8fa1 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
@@ -50,7 +50,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Properties;
 
-import org.apache.curator.shaded.com.google.common.collect.Lists;
+import com.google.common.collect.Lists;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
index df9d843..8bef7c9 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
@@ -20,11 +20,10 @@ package org.apache.phoenix.query;
 import static 
org.apache.phoenix.query.QueryServicesOptions.DEFAULT_SPOOL_DIRECTORY;
 import static org.apache.phoenix.query.QueryServicesOptions.withDefaults;
 
-import org.apache.curator.shaded.com.google.common.io.Files;
+import com.google.common.io.Files;
 import org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.ReadOnlyProps;
-import org.apache.phoenix.util.TestUtil;
 import org.apache.tephra.TxConstants;
 
 



[phoenix] branch master updated: PHOENIX-5368 Convert query statements in PhoenixDatabaseMetaData to prepared statements (Rajeshbabu Chintaguntla)

2019-07-12 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 3c5edd6  PHOENIX-5368 Convert query statements in 
PhoenixDatabaseMetaData to prepared statements (Rajeshbabu Chintaguntla)
3c5edd6 is described below

commit 3c5edd6fab97ccc09ec9e93b8c839f8a076ca891
Author: Thomas D'Silva 
AuthorDate: Fri Jul 12 18:13:38 2019 -0700

PHOENIX-5368 Convert query statements in PhoenixDatabaseMetaData to 
prepared statements (Rajeshbabu Chintaguntla)
---
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java  | 100 +++--
 .../phoenix/jdbc/PhoenixPreparedStatement.java |   2 +-
 2 files changed, 72 insertions(+), 30 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index d56eaa4..a70dc92 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -19,6 +19,7 @@ package org.apache.phoenix.jdbc;
 
 import java.sql.Connection;
 import java.sql.DatabaseMetaData;
+import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.RowIdLifetime;
 import java.sql.SQLException;
@@ -27,6 +28,7 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Comparator;
+import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.hadoop.hbase.Cell;
@@ -473,16 +475,18 @@ public class PhoenixDatabaseMetaData implements 
DatabaseMetaData {
 
 @Override
 public ResultSet getCatalogs() throws SQLException {
+List parameterValues = new ArrayList(4);
 StringBuilder buf = new StringBuilder("select \n" +
 " DISTINCT " + TENANT_ID + " " + TABLE_CAT +
 " from " + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS +
 " where " + COLUMN_NAME + " is null" +
 " and " + COLUMN_FAMILY + " is null" +
 " and " + TENANT_ID + " is not null");
-addTenantIdFilter(buf, null);
+addTenantIdFilter(buf, null, parameterValues);
 buf.append(" order by " + TENANT_ID);
-Statement stmt = connection.createStatement();
-return stmt.executeQuery(buf.toString());
+PreparedStatement stmt = connection.prepareStatement(buf.toString());
+setParameters(stmt, parameterValues);
+return stmt.executeQuery();
 }
 
 @Override
@@ -498,22 +502,26 @@ public class PhoenixDatabaseMetaData implements 
DatabaseMetaData {
 
 public static final String GLOBAL_TENANANTS_ONLY = "null";
 
-private void addTenantIdFilter(StringBuilder buf, String tenantIdPattern) {
+private void addTenantIdFilter(StringBuilder buf, String tenantIdPattern,
+List parameterValues) {
 PName tenantId = connection.getTenantId();
 if (tenantIdPattern == null) {
 if (tenantId != null) {
 appendConjunction(buf);
 buf.append(" (" + TENANT_ID + " IS NULL " +
-" OR " + TENANT_ID + " = '" + 
StringUtil.escapeStringConstant(tenantId.getString()) + "') ");
+" OR " + TENANT_ID + " = ?) ");
+parameterValues.add(tenantId.getString());
 }
 } else if (tenantIdPattern.length() == 0) {
 appendConjunction(buf);
 buf.append(TENANT_ID + " IS NULL ");
 } else {
 appendConjunction(buf);
-buf.append(" TENANT_ID LIKE '" + 
StringUtil.escapeStringConstant(tenantIdPattern) + "' ");
+buf.append(" TENANT_ID LIKE ? ");
+parameterValues.add(tenantIdPattern);
 if (tenantId != null) {
-buf.append(" and TENANT_ID = '" + 
StringUtil.escapeStringConstant(tenantId.getString()) + "' ");
+buf.append(" and TENANT_ID = ? ");
+parameterValues.add(tenantId.getString());
 }
 }
 }
@@ -1018,6 +1026,7 @@ public class PhoenixDatabaseMetaData implements 
DatabaseMetaData {
 if (unique) { // No unique indexes
 return emptyResultSet;
 }
+List parameterValues = new ArrayList(4);
 StringBuilder buf = new StringBuilder("select \n" +
 TENANT_ID + " " + TABLE_CAT + ",\n" + // use this column for 
column family name
 TABLE_

[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5368 Convert query statements in PhoenixDatabaseMetaData to prepared statements (Rajeshbabu Chintaguntla)

2019-07-12 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new dd341b9  PHOENIX-5368 Convert query statements in 
PhoenixDatabaseMetaData to prepared statements (Rajeshbabu Chintaguntla)
dd341b9 is described below

commit dd341b9c5f674e2db6c20a5fb2781c070baaa349
Author: Thomas D'Silva 
AuthorDate: Fri Jul 12 18:13:38 2019 -0700

PHOENIX-5368 Convert query statements in PhoenixDatabaseMetaData to 
prepared statements (Rajeshbabu Chintaguntla)
---
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java  | 100 +++--
 .../phoenix/jdbc/PhoenixPreparedStatement.java |   2 +-
 2 files changed, 72 insertions(+), 30 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index 208cf46..56e052d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -19,6 +19,7 @@ package org.apache.phoenix.jdbc;
 
 import java.sql.Connection;
 import java.sql.DatabaseMetaData;
+import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.RowIdLifetime;
 import java.sql.SQLException;
@@ -27,6 +28,7 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Comparator;
+import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.hadoop.hbase.Cell;
@@ -455,16 +457,18 @@ public class PhoenixDatabaseMetaData implements 
DatabaseMetaData {
 
 @Override
 public ResultSet getCatalogs() throws SQLException {
+List parameterValues = new ArrayList(4);
 StringBuilder buf = new StringBuilder("select \n" +
 " DISTINCT " + TENANT_ID + " " + TABLE_CAT +
 " from " + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS +
 " where " + COLUMN_NAME + " is null" +
 " and " + COLUMN_FAMILY + " is null" +
 " and " + TENANT_ID + " is not null");
-addTenantIdFilter(buf, null);
+addTenantIdFilter(buf, null, parameterValues);
 buf.append(" order by " + TENANT_ID);
-Statement stmt = connection.createStatement();
-return stmt.executeQuery(buf.toString());
+PreparedStatement stmt = connection.prepareStatement(buf.toString());
+setParameters(stmt, parameterValues);
+return stmt.executeQuery();
 }
 
 @Override
@@ -480,22 +484,26 @@ public class PhoenixDatabaseMetaData implements 
DatabaseMetaData {
 
 public static final String GLOBAL_TENANANTS_ONLY = "null";
 
-private void addTenantIdFilter(StringBuilder buf, String tenantIdPattern) {
+private void addTenantIdFilter(StringBuilder buf, String tenantIdPattern,
+List parameterValues) {
 PName tenantId = connection.getTenantId();
 if (tenantIdPattern == null) {
 if (tenantId != null) {
 appendConjunction(buf);
 buf.append(" (" + TENANT_ID + " IS NULL " +
-" OR " + TENANT_ID + " = '" + 
StringUtil.escapeStringConstant(tenantId.getString()) + "') ");
+" OR " + TENANT_ID + " = ?) ");
+parameterValues.add(tenantId.getString());
 }
 } else if (tenantIdPattern.length() == 0) {
 appendConjunction(buf);
 buf.append(TENANT_ID + " IS NULL ");
 } else {
 appendConjunction(buf);
-buf.append(" TENANT_ID LIKE '" + 
StringUtil.escapeStringConstant(tenantIdPattern) + "' ");
+buf.append(" TENANT_ID LIKE ? ");
+parameterValues.add(tenantIdPattern);
 if (tenantId != null) {
-buf.append(" and TENANT_ID = '" + 
StringUtil.escapeStringConstant(tenantId.getString()) + "' ");
+buf.append(" and TENANT_ID = ? ");
+parameterValues.add(tenantId.getString());
 }
 }
 }
@@ -999,6 +1007,7 @@ public class PhoenixDatabaseMetaData implements 
DatabaseMetaData {
 if (unique) { // No unique indexes
 return emptyResultSet;
 }
+List parameterValues = new ArrayList(4);
 StringBuilder buf = new StringBuilder("select \n" +
 TENANT_ID + " " + TABLE_CAT + ",\n" + // use this column for 
column family name
 TABLE_

[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5368 Convert query statements in PhoenixDatabaseMetaData to prepared statements (Rajeshbabu Chintaguntla)

2019-07-12 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new a36efb1  PHOENIX-5368 Convert query statements in 
PhoenixDatabaseMetaData to prepared statements (Rajeshbabu Chintaguntla)
a36efb1 is described below

commit a36efb1b5e9954ca50cc10bd9582c0963857751c
Author: Thomas D'Silva 
AuthorDate: Fri Jul 12 18:13:38 2019 -0700

PHOENIX-5368 Convert query statements in PhoenixDatabaseMetaData to 
prepared statements (Rajeshbabu Chintaguntla)
---
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java  | 100 +++--
 .../phoenix/jdbc/PhoenixPreparedStatement.java |   2 +-
 2 files changed, 72 insertions(+), 30 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index 208cf46..56e052d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -19,6 +19,7 @@ package org.apache.phoenix.jdbc;
 
 import java.sql.Connection;
 import java.sql.DatabaseMetaData;
+import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.RowIdLifetime;
 import java.sql.SQLException;
@@ -27,6 +28,7 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Comparator;
+import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.hadoop.hbase.Cell;
@@ -455,16 +457,18 @@ public class PhoenixDatabaseMetaData implements 
DatabaseMetaData {
 
 @Override
 public ResultSet getCatalogs() throws SQLException {
+List parameterValues = new ArrayList(4);
 StringBuilder buf = new StringBuilder("select \n" +
 " DISTINCT " + TENANT_ID + " " + TABLE_CAT +
 " from " + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS +
 " where " + COLUMN_NAME + " is null" +
 " and " + COLUMN_FAMILY + " is null" +
 " and " + TENANT_ID + " is not null");
-addTenantIdFilter(buf, null);
+addTenantIdFilter(buf, null, parameterValues);
 buf.append(" order by " + TENANT_ID);
-Statement stmt = connection.createStatement();
-return stmt.executeQuery(buf.toString());
+PreparedStatement stmt = connection.prepareStatement(buf.toString());
+setParameters(stmt, parameterValues);
+return stmt.executeQuery();
 }
 
 @Override
@@ -480,22 +484,26 @@ public class PhoenixDatabaseMetaData implements 
DatabaseMetaData {
 
 public static final String GLOBAL_TENANANTS_ONLY = "null";
 
-private void addTenantIdFilter(StringBuilder buf, String tenantIdPattern) {
+private void addTenantIdFilter(StringBuilder buf, String tenantIdPattern,
+List parameterValues) {
 PName tenantId = connection.getTenantId();
 if (tenantIdPattern == null) {
 if (tenantId != null) {
 appendConjunction(buf);
 buf.append(" (" + TENANT_ID + " IS NULL " +
-" OR " + TENANT_ID + " = '" + 
StringUtil.escapeStringConstant(tenantId.getString()) + "') ");
+" OR " + TENANT_ID + " = ?) ");
+parameterValues.add(tenantId.getString());
 }
 } else if (tenantIdPattern.length() == 0) {
 appendConjunction(buf);
 buf.append(TENANT_ID + " IS NULL ");
 } else {
 appendConjunction(buf);
-buf.append(" TENANT_ID LIKE '" + 
StringUtil.escapeStringConstant(tenantIdPattern) + "' ");
+buf.append(" TENANT_ID LIKE ? ");
+parameterValues.add(tenantIdPattern);
 if (tenantId != null) {
-buf.append(" and TENANT_ID = '" + 
StringUtil.escapeStringConstant(tenantId.getString()) + "' ");
+buf.append(" and TENANT_ID = ? ");
+parameterValues.add(tenantId.getString());
 }
 }
 }
@@ -999,6 +1007,7 @@ public class PhoenixDatabaseMetaData implements 
DatabaseMetaData {
 if (unique) { // No unique indexes
 return emptyResultSet;
 }
+List parameterValues = new ArrayList(4);
 StringBuilder buf = new StringBuilder("select \n" +
 TENANT_ID + " " + TABLE_CAT + ",\n" + // use this column for 
column family name
 TABLE_

[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5368 Convert query statements in PhoenixDatabaseMetaData to prepared statements (Rajeshbabu Chintaguntla)

2019-07-12 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new fa870f1  PHOENIX-5368 Convert query statements in 
PhoenixDatabaseMetaData to prepared statements (Rajeshbabu Chintaguntla)
fa870f1 is described below

commit fa870f1d44db0c0d6d94c5eeb5fe9fdb48c65798
Author: Thomas D'Silva 
AuthorDate: Fri Jul 12 18:13:38 2019 -0700

PHOENIX-5368 Convert query statements in PhoenixDatabaseMetaData to 
prepared statements (Rajeshbabu Chintaguntla)
---
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java  | 100 +++--
 .../phoenix/jdbc/PhoenixPreparedStatement.java |   2 +-
 2 files changed, 72 insertions(+), 30 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index 208cf46..56e052d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -19,6 +19,7 @@ package org.apache.phoenix.jdbc;
 
 import java.sql.Connection;
 import java.sql.DatabaseMetaData;
+import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.RowIdLifetime;
 import java.sql.SQLException;
@@ -27,6 +28,7 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Comparator;
+import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.hadoop.hbase.Cell;
@@ -455,16 +457,18 @@ public class PhoenixDatabaseMetaData implements 
DatabaseMetaData {
 
 @Override
 public ResultSet getCatalogs() throws SQLException {
+List parameterValues = new ArrayList(4);
 StringBuilder buf = new StringBuilder("select \n" +
 " DISTINCT " + TENANT_ID + " " + TABLE_CAT +
 " from " + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS +
 " where " + COLUMN_NAME + " is null" +
 " and " + COLUMN_FAMILY + " is null" +
 " and " + TENANT_ID + " is not null");
-addTenantIdFilter(buf, null);
+addTenantIdFilter(buf, null, parameterValues);
 buf.append(" order by " + TENANT_ID);
-Statement stmt = connection.createStatement();
-return stmt.executeQuery(buf.toString());
+PreparedStatement stmt = connection.prepareStatement(buf.toString());
+setParameters(stmt, parameterValues);
+return stmt.executeQuery();
 }
 
 @Override
@@ -480,22 +484,26 @@ public class PhoenixDatabaseMetaData implements 
DatabaseMetaData {
 
 public static final String GLOBAL_TENANANTS_ONLY = "null";
 
-private void addTenantIdFilter(StringBuilder buf, String tenantIdPattern) {
+private void addTenantIdFilter(StringBuilder buf, String tenantIdPattern,
+List parameterValues) {
 PName tenantId = connection.getTenantId();
 if (tenantIdPattern == null) {
 if (tenantId != null) {
 appendConjunction(buf);
 buf.append(" (" + TENANT_ID + " IS NULL " +
-" OR " + TENANT_ID + " = '" + 
StringUtil.escapeStringConstant(tenantId.getString()) + "') ");
+" OR " + TENANT_ID + " = ?) ");
+parameterValues.add(tenantId.getString());
 }
 } else if (tenantIdPattern.length() == 0) {
 appendConjunction(buf);
 buf.append(TENANT_ID + " IS NULL ");
 } else {
 appendConjunction(buf);
-buf.append(" TENANT_ID LIKE '" + 
StringUtil.escapeStringConstant(tenantIdPattern) + "' ");
+buf.append(" TENANT_ID LIKE ? ");
+parameterValues.add(tenantIdPattern);
 if (tenantId != null) {
-buf.append(" and TENANT_ID = '" + 
StringUtil.escapeStringConstant(tenantId.getString()) + "' ");
+buf.append(" and TENANT_ID = ? ");
+parameterValues.add(tenantId.getString());
 }
 }
 }
@@ -999,6 +1007,7 @@ public class PhoenixDatabaseMetaData implements 
DatabaseMetaData {
 if (unique) { // No unique indexes
 return emptyResultSet;
 }
+List parameterValues = new ArrayList(4);
 StringBuilder buf = new StringBuilder("select \n" +
 TENANT_ID + " " + TABLE_CAT + ",\n" + // use this column for 
column family name
 TABLE_

[phoenix] 02/02: PHOENIX-5269 use AccessChecker to check for user permisssions

2019-06-20 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 138c495f2f6875bfca812ab975a2db4a63385e39
Author: Kiran Kumar Maturi 
AuthorDate: Thu Jun 20 09:03:41 2019 +0530

PHOENIX-5269 use AccessChecker to check for user permisssions
---
 .../apache/phoenix/end2end/PermissionsCacheIT.java | 105 +
 .../coprocessor/PhoenixAccessController.java   |  91 --
 pom.xml|   2 +-
 3 files changed, 190 insertions(+), 8 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionsCacheIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionsCacheIT.java
new file mode 100644
index 000..c2f7ce2
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionsCacheIT.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.junit.Assert.assertTrue;
+
+import java.security.PrivilegedExceptionAction;
+import java.sql.Connection;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.AuthUtil;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.security.access.AccessControlLists;
+import org.apache.hadoop.hbase.security.access.Permission.Action;
+import org.apache.hadoop.hbase.security.access.TablePermission;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.google.common.collect.ListMultimap;
+
+public class PermissionsCacheIT extends BasePermissionsIT {
+
+
+public PermissionsCacheIT(boolean isNamespaceMapped) throws Exception {
+super(isNamespaceMapped);
+}
+
+@Test
+public void testPermissionsCachedWithAccessChecker() throws Throwable {
+if (!isNamespaceMapped) {
+return;
+}
+startNewMiniCluster();
+final String schema = generateUniqueName();
+final String tableName = generateUniqueName();
+final String phoenixTableName = SchemaUtil.getTableName(schema, 
tableName);
+try (Connection conn = getConnection()) {
+grantPermissions(regularUser1.getShortName(), 
PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES,
+Action.READ, Action.EXEC);
+grantPermissions(regularUser1.getShortName(), 
Collections.singleton("SYSTEM:SEQUENCE"),
+Action.WRITE, Action.READ, Action.EXEC);
+superUser1.runAs(new PrivilegedExceptionAction() {
+@Override
+public Void run() throws Exception {
+try {
+verifyAllowed(createSchema(schema), superUser1);
+grantPermissions(regularUser1.getShortName(), schema, 
Action.CREATE);
+
grantPermissions(AuthUtil.toGroupEntry(GROUP_SYSTEM_ACCESS), schema,
+Action.CREATE);
+} catch (Throwable e) {
+if (e instanceof Exception) {
+throw (Exception) e;
+} else {
+throw new Exception(e);
+}
+}
+return null;
+}
+});
+verifyAllowed(createTable(phoenixTableName), regularUser1);
+HBaseTestingUtility utility = getUtility();
+Configuration conf = utility.getConfiguration();
+ZooKeeperWatcher zkw = 
HBaseTestingUtility.getZooKeeperWatcher(utility);
+String aclZnodeParent = conf.get("zookeeper.znode.acl.parent", 
"acl");
+String aclZNode = ZKUtil.joinZNode(zkw.baseZNode, aclZnodeParent);
+String tableZNode = ZKUtil.joinZNode(aclZNode, "@" + schema);
+byte[] data = ZKUtil.getData

[phoenix] branch 4.14-HBase-1.3 updated (18007c8 -> 138c495)

2019-06-20 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a change to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from 18007c8  PHOENIX-5343 OrphanViewTool should not check Index Tables
 new 7095dd5  PHOENIX-5303 Fix index failures with some versions of HBase.
 new 138c495  PHOENIX-5269 use AccessChecker to check for user permisssions

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../apache/phoenix/end2end/PermissionsCacheIT.java | 105 +
 .../coprocessor/PhoenixAccessController.java   |  91 --
 .../hbase/index/scanner/ScannerBuilder.java|   9 +-
 pom.xml|   2 +-
 4 files changed, 197 insertions(+), 10 deletions(-)
 create mode 100644 
phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionsCacheIT.java



[phoenix] 01/02: PHOENIX-5303 Fix index failures with some versions of HBase.

2019-06-20 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 7095dd575da2aad38c2d8bd173d83dd4b6994f61
Author: Lars Hofhansl 
AuthorDate: Tue May 28 10:49:43 2019 -0700

PHOENIX-5303 Fix index failures with some versions of HBase.
---
 .../org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java   | 9 +++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java
index 703fcd2..318517c 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java
@@ -24,6 +24,7 @@ import java.util.HashSet;
 import java.util.Set;
 
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.client.Mutation;
@@ -33,6 +34,7 @@ import org.apache.hadoop.hbase.filter.FamilyFilter;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.FilterBase;
 import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FilterList.Operator;
 import org.apache.hadoop.hbase.filter.QualifierFilter;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.hbase.index.covered.KeyValueStore;
@@ -92,10 +94,13 @@ public class ScannerBuilder {
   Filter columnFilter =
   new FamilyFilter(CompareOp.EQUAL, new 
BinaryComparator(ref.getFamily()));
   // combine with a match for the qualifier, if the qualifier is a 
specific qualifier
+  // in that case we *must* let empty qualifiers through for family delete 
markers
   if (!Bytes.equals(ColumnReference.ALL_QUALIFIERS, ref.getQualifier())) {
 columnFilter =
-new FilterList(columnFilter, new QualifierFilter(CompareOp.EQUAL, 
new BinaryComparator(
-ref.getQualifier(;
+new FilterList(columnFilter,
+new FilterList(Operator.MUST_PASS_ONE,
+new QualifierFilter(CompareOp.EQUAL, new 
BinaryComparator(ref.getQualifier())),
+new QualifierFilter(CompareOp.EQUAL, new 
BinaryComparator(HConstants.EMPTY_BYTE_ARRAY;
   }
   columnFilters.addFilter(columnFilter);
 }



[phoenix] branch 4.14-HBase-1.4 updated: PHOENIX-5303 Fix index failures with some versions of HBase.

2019-06-20 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.4 by this push:
 new 0f07bdf  PHOENIX-5303 Fix index failures with some versions of HBase.
0f07bdf is described below

commit 0f07bdf4db6da166249ce909216881500dd11080
Author: Lars Hofhansl 
AuthorDate: Tue May 28 10:49:43 2019 -0700

PHOENIX-5303 Fix index failures with some versions of HBase.
---
 .../org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java   | 9 +++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java
index 703fcd2..318517c 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java
@@ -24,6 +24,7 @@ import java.util.HashSet;
 import java.util.Set;
 
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.client.Mutation;
@@ -33,6 +34,7 @@ import org.apache.hadoop.hbase.filter.FamilyFilter;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.FilterBase;
 import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FilterList.Operator;
 import org.apache.hadoop.hbase.filter.QualifierFilter;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.hbase.index.covered.KeyValueStore;
@@ -92,10 +94,13 @@ public class ScannerBuilder {
   Filter columnFilter =
   new FamilyFilter(CompareOp.EQUAL, new 
BinaryComparator(ref.getFamily()));
   // combine with a match for the qualifier, if the qualifier is a 
specific qualifier
+  // in that case we *must* let empty qualifiers through for family delete 
markers
   if (!Bytes.equals(ColumnReference.ALL_QUALIFIERS, ref.getQualifier())) {
 columnFilter =
-new FilterList(columnFilter, new QualifierFilter(CompareOp.EQUAL, 
new BinaryComparator(
-ref.getQualifier(;
+new FilterList(columnFilter,
+new FilterList(Operator.MUST_PASS_ONE,
+new QualifierFilter(CompareOp.EQUAL, new 
BinaryComparator(ref.getQualifier())),
+new QualifierFilter(CompareOp.EQUAL, new 
BinaryComparator(HConstants.EMPTY_BYTE_ARRAY;
   }
   columnFilters.addFilter(columnFilter);
 }



[phoenix] branch master updated: PHOENIX-5269 use AccessChecker to check for user permisssions

2019-06-18 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 1f2508d  PHOENIX-5269 use AccessChecker to check for user permisssions
1f2508d is described below

commit 1f2508dbde365aaedac628c89df237e8b6b46df8
Author: Kiran Kumar Maturi 
AuthorDate: Mon Jun 17 16:42:49 2019 +0530

PHOENIX-5269 use AccessChecker to check for user permisssions
---
 .../apache/phoenix/end2end/PermissionsCacheIT.java | 107 +
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |   2 +
 .../coprocessor/PhoenixAccessController.java   |  77 +--
 .../PhoenixMetaDataCoprocessorHost.java|   5 +
 4 files changed, 185 insertions(+), 6 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionsCacheIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionsCacheIT.java
new file mode 100644
index 000..8d0c694
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionsCacheIT.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.junit.Assert.assertTrue;
+
+import java.security.PrivilegedExceptionAction;
+import java.sql.Connection;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.AuthUtil;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.security.access.AccessControlLists;
+import org.apache.hadoop.hbase.security.access.Permission.Action;
+import org.apache.hadoop.hbase.security.access.TablePermission;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
+import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class PermissionsCacheIT extends BasePermissionsIT {
+
+public PermissionsCacheIT() throws Exception {
+   super(true);
+   }
+
+@BeforeClass
+public static void doSetup() throws Exception {
+BasePermissionsIT.initCluster(true);
+}
+
+@Test
+public void testPermissionsCachedWithAccessChecker() throws Throwable {
+if (!isNamespaceMapped) {
+return;
+}
+final String schema = generateUniqueName();
+final String tableName = generateUniqueName();
+final String phoenixTableName = SchemaUtil.getTableName(schema, 
tableName);
+try (Connection conn = getConnection()) {
+grantPermissions(regularUser1.getShortName(), 
PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES,
+Action.READ, Action.EXEC);
+grantPermissions(regularUser1.getShortName(), 
Collections.singleton("SYSTEM:SEQUENCE"),
+Action.WRITE, Action.READ, Action.EXEC);
+superUser1.runAs(new PrivilegedExceptionAction() {
+@Override
+public Void run() throws Exception {
+try {
+verifyAllowed(createSchema(schema), superUser1);
+grantPermissions(regularUser1.getShortName(), schema, 
Action.CREATE);
+
grantPermissions(AuthUtil.toGroupEntry(GROUP_SYSTEM_ACCESS), schema,
+Action.CREATE);
+} catch (Throwable e) {
+if (e instanceof Exception) {
+throw (Exception) e;
+} else {
+throw new Exception(e);
+}
+}
+return null;
+}
+});
+verifyAllowed(createTable(phoenixTableName), regularUser1);
+HBaseTestingUtility utility = getUtility();
+Configuration conf = utility.getConfiguration

[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5269 use AccessChecker to check for user permisssions

2019-06-18 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new eb8ac33  PHOENIX-5269 use AccessChecker to check for user permisssions
eb8ac33 is described below

commit eb8ac33029cd1ce781bf2f8b826502f642f735c5
Author: Kiran Kumar Maturi 
AuthorDate: Tue Jun 18 14:50:44 2019 +0530

PHOENIX-5269 use AccessChecker to check for user permisssions
---
 .../apache/phoenix/end2end/PermissionsCacheIT.java | 108 +
 .../coprocessor/PhoenixAccessController.java   |  91 +++--
 pom.xml|   2 +-
 3 files changed, 193 insertions(+), 8 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionsCacheIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionsCacheIT.java
new file mode 100644
index 000..030c03f
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionsCacheIT.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.junit.Assert.assertTrue;
+
+import java.security.PrivilegedExceptionAction;
+import java.sql.Connection;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.AuthUtil;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.security.access.AccessControlLists;
+import org.apache.hadoop.hbase.security.access.Permission.Action;
+import org.apache.hadoop.hbase.security.access.TablePermission;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.google.common.collect.ListMultimap;
+
+public class PermissionsCacheIT extends BasePermissionsIT {
+
+public PermissionsCacheIT() throws Exception {
+super(true);
+}
+
+@BeforeClass
+public static void doSetup() throws Exception {
+BasePermissionsIT.initCluster(true);
+}
+
+@Test
+public void testPermissionsCachedWithAccessChecker() throws Throwable {
+if (!isNamespaceMapped) {
+return;
+}
+final String schema = generateUniqueName();
+final String tableName = generateUniqueName();
+final String phoenixTableName = SchemaUtil.getTableName(schema, 
tableName);
+try (Connection conn = getConnection()) {
+grantPermissions(regularUser1.getShortName(), 
PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES,
+Action.READ, Action.EXEC);
+grantPermissions(regularUser1.getShortName(), 
Collections.singleton("SYSTEM:SEQUENCE"),
+Action.WRITE, Action.READ, Action.EXEC);
+superUser1.runAs(new PrivilegedExceptionAction() {
+@Override
+public Void run() throws Exception {
+try {
+verifyAllowed(createSchema(schema), superUser1);
+grantPermissions(regularUser1.getShortName(), schema, 
Action.CREATE);
+
grantPermissions(AuthUtil.toGroupEntry(GROUP_SYSTEM_ACCESS), schema,
+Action.CREATE);
+} catch (Throwable e) {
+if (e instanceof Exception) {
+throw (Exception) e;
+} else {
+throw new Exception(e);
+}
+}
+return null;
+}
+});
+verifyAllowed(createTable(phoenixTableName), regularUser1);
+HBaseTestingUtility utility = getUtility();
+Configuration conf = utility.getConfiguration();
+ZooKeeperWatcher zkw = 
HBaseTestingUtility.getZooKeeperWatcher(utility);
+String aclZnodeParent = conf.get("zookeeper.znode.acl.pa

[phoenix] branch master updated: PHOENIX-5122: PHOENIX-4322 breaks client backward compatibility (addendum)

2019-06-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new b8f465c  PHOENIX-5122: PHOENIX-4322 breaks client backward 
compatibility (addendum)
b8f465c is described below

commit b8f465cd3649b66f19e307e5e00dea20ac0a8667
Author: Jacob Isaac 
AuthorDate: Thu Jun 6 13:05:13 2019 -0700

PHOENIX-5122: PHOENIX-4322 breaks client backward compatibility (addendum)
---
 .../apache/phoenix/expression/RowValueConstructorExpression.java  | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
index c06bdc8..f92d1e2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
@@ -34,6 +34,7 @@ import java.util.List;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.expression.visitor.ExpressionVisitor;
 import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PVarbinary;
@@ -236,12 +237,15 @@ public class RowValueConstructorExpression extends 
BaseCompoundExpression {
 int outputSize = output.size();
 byte[] outputBytes = output.getBuffer();
 // Don't remove trailing separator byte unless it's the 
one for ASC
-// as otherwise we need it to ensure sort order is correct
+// as otherwise we need it to ensure sort order is correct.
+// Additionally for b/w compat with clients older than 
4.14.1 -
+// If SortOorder.ASC then always strip trailing separator 
byte (as before)
+// else only strip for >= 4.14 client (when 
STRIP_TRAILING_SEPARATOR_BYTE bit is set)
 for (int k = expressionCount -1 ; 
 k >=0 &&  getChildren().get(k).getDataType() != 
null 
   && 
!getChildren().get(k).getDataType().isFixedWidth()
   && outputBytes[outputSize-1] == 
SchemaUtil.getSeparatorByte(true, false, getChildren().get(k))
-  && isStripTrailingSepByte() ; k--) {
+  &&  (getChildren().get(k).getSortOrder() == 
SortOrder.ASC ? true : isStripTrailingSepByte()) ; k--) {
 outputSize--;
 }
 ptr.set(outputBytes, 0, outputSize);



[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5122: PHOENIX-4322 breaks client backward compatibility (addendum)

2019-06-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new baea100  PHOENIX-5122: PHOENIX-4322 breaks client backward 
compatibility (addendum)
baea100 is described below

commit baea100944410d25a021402475b48ea0c8478bc0
Author: Jacob Isaac 
AuthorDate: Thu Jun 6 13:05:13 2019 -0700

PHOENIX-5122: PHOENIX-4322 breaks client backward compatibility (addendum)
---
 .../apache/phoenix/expression/RowValueConstructorExpression.java  | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
index c06bdc8..f92d1e2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
@@ -34,6 +34,7 @@ import java.util.List;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.expression.visitor.ExpressionVisitor;
 import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PVarbinary;
@@ -236,12 +237,15 @@ public class RowValueConstructorExpression extends 
BaseCompoundExpression {
 int outputSize = output.size();
 byte[] outputBytes = output.getBuffer();
 // Don't remove trailing separator byte unless it's the 
one for ASC
-// as otherwise we need it to ensure sort order is correct
+// as otherwise we need it to ensure sort order is correct.
+// Additionally for b/w compat with clients older than 
4.14.1 -
+// If SortOorder.ASC then always strip trailing separator 
byte (as before)
+// else only strip for >= 4.14 client (when 
STRIP_TRAILING_SEPARATOR_BYTE bit is set)
 for (int k = expressionCount -1 ; 
 k >=0 &&  getChildren().get(k).getDataType() != 
null 
   && 
!getChildren().get(k).getDataType().isFixedWidth()
   && outputBytes[outputSize-1] == 
SchemaUtil.getSeparatorByte(true, false, getChildren().get(k))
-  && isStripTrailingSepByte() ; k--) {
+  &&  (getChildren().get(k).getSortOrder() == 
SortOrder.ASC ? true : isStripTrailingSepByte()) ; k--) {
 outputSize--;
 }
 ptr.set(outputBytes, 0, outputSize);



[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5122: PHOENIX-4322 breaks client backward compatibility (addendum)

2019-06-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new e03e61e  PHOENIX-5122: PHOENIX-4322 breaks client backward 
compatibility (addendum)
e03e61e is described below

commit e03e61e6e6818bbb12bcdcc867067a7f73c31647
Author: Jacob Isaac 
AuthorDate: Thu Jun 6 13:05:13 2019 -0700

PHOENIX-5122: PHOENIX-4322 breaks client backward compatibility (addendum)
---
 .../apache/phoenix/expression/RowValueConstructorExpression.java  | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
index c06bdc8..f92d1e2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
@@ -34,6 +34,7 @@ import java.util.List;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.expression.visitor.ExpressionVisitor;
 import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PVarbinary;
@@ -236,12 +237,15 @@ public class RowValueConstructorExpression extends 
BaseCompoundExpression {
 int outputSize = output.size();
 byte[] outputBytes = output.getBuffer();
 // Don't remove trailing separator byte unless it's the 
one for ASC
-// as otherwise we need it to ensure sort order is correct
+// as otherwise we need it to ensure sort order is correct.
+// Additionally for b/w compat with clients older than 
4.14.1 -
+// If SortOorder.ASC then always strip trailing separator 
byte (as before)
+// else only strip for >= 4.14 client (when 
STRIP_TRAILING_SEPARATOR_BYTE bit is set)
 for (int k = expressionCount -1 ; 
 k >=0 &&  getChildren().get(k).getDataType() != 
null 
   && 
!getChildren().get(k).getDataType().isFixedWidth()
   && outputBytes[outputSize-1] == 
SchemaUtil.getSeparatorByte(true, false, getChildren().get(k))
-  && isStripTrailingSepByte() ; k--) {
+  &&  (getChildren().get(k).getSortOrder() == 
SortOrder.ASC ? true : isStripTrailingSepByte()) ; k--) {
 outputSize--;
 }
 ptr.set(outputBytes, 0, outputSize);



[phoenix] branch 4.14-HBase-1.4 updated: PHOENIX-5122: PHOENIX-4322 breaks client backward compatibility (addendum)

2019-06-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.4 by this push:
 new a521090  PHOENIX-5122: PHOENIX-4322 breaks client backward 
compatibility (addendum)
a521090 is described below

commit a52109006fa636a1fe12e7c821e51d728e47f436
Author: Jacob Isaac 
AuthorDate: Thu Jun 6 13:05:13 2019 -0700

PHOENIX-5122: PHOENIX-4322 breaks client backward compatibility (addendum)
---
 .../apache/phoenix/expression/RowValueConstructorExpression.java  | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
index c06bdc8..f92d1e2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
@@ -34,6 +34,7 @@ import java.util.List;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.expression.visitor.ExpressionVisitor;
 import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PVarbinary;
@@ -236,12 +237,15 @@ public class RowValueConstructorExpression extends 
BaseCompoundExpression {
 int outputSize = output.size();
 byte[] outputBytes = output.getBuffer();
 // Don't remove trailing separator byte unless it's the 
one for ASC
-// as otherwise we need it to ensure sort order is correct
+// as otherwise we need it to ensure sort order is correct.
+// Additionally for b/w compat with clients older than 
4.14.1 -
+// If SortOorder.ASC then always strip trailing separator 
byte (as before)
+// else only strip for >= 4.14 client (when 
STRIP_TRAILING_SEPARATOR_BYTE bit is set)
 for (int k = expressionCount -1 ; 
 k >=0 &&  getChildren().get(k).getDataType() != 
null 
   && 
!getChildren().get(k).getDataType().isFixedWidth()
   && outputBytes[outputSize-1] == 
SchemaUtil.getSeparatorByte(true, false, getChildren().get(k))
-  && isStripTrailingSepByte() ; k--) {
+  &&  (getChildren().get(k).getSortOrder() == 
SortOrder.ASC ? true : isStripTrailingSepByte()) ; k--) {
 outputSize--;
 }
 ptr.set(outputBytes, 0, outputSize);



[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5122: PHOENIX-4322 breaks client backward compatibility (addendum)

2019-06-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 1d12f26  PHOENIX-5122: PHOENIX-4322 breaks client backward 
compatibility (addendum)
1d12f26 is described below

commit 1d12f269cb36c5090c4689597e313d03567cc6bc
Author: Jacob Isaac 
AuthorDate: Thu Jun 6 13:05:13 2019 -0700

PHOENIX-5122: PHOENIX-4322 breaks client backward compatibility (addendum)
---
 .../apache/phoenix/expression/RowValueConstructorExpression.java  | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
index c06bdc8..f92d1e2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
@@ -34,6 +34,7 @@ import java.util.List;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.expression.visitor.ExpressionVisitor;
 import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PVarbinary;
@@ -236,12 +237,15 @@ public class RowValueConstructorExpression extends 
BaseCompoundExpression {
 int outputSize = output.size();
 byte[] outputBytes = output.getBuffer();
 // Don't remove trailing separator byte unless it's the 
one for ASC
-// as otherwise we need it to ensure sort order is correct
+// as otherwise we need it to ensure sort order is correct.
+// Additionally for b/w compat with clients older than 
4.14.1 -
+// If SortOorder.ASC then always strip trailing separator 
byte (as before)
+// else only strip for >= 4.14 client (when 
STRIP_TRAILING_SEPARATOR_BYTE bit is set)
 for (int k = expressionCount -1 ; 
 k >=0 &&  getChildren().get(k).getDataType() != 
null 
   && 
!getChildren().get(k).getDataType().isFixedWidth()
   && outputBytes[outputSize-1] == 
SchemaUtil.getSeparatorByte(true, false, getChildren().get(k))
-  && isStripTrailingSepByte() ; k--) {
+  &&  (getChildren().get(k).getSortOrder() == 
SortOrder.ASC ? true : isStripTrailingSepByte()) ; k--) {
 outputSize--;
 }
 ptr.set(outputBytes, 0, outputSize);



[phoenix] branch 4.14-HBase-1.3 updated: PHOENIX-5122: PHOENIX-4322 breaks client backward compatibility (addendum)

2019-06-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.3 by this push:
 new 3e790bd  PHOENIX-5122: PHOENIX-4322 breaks client backward 
compatibility (addendum)
3e790bd is described below

commit 3e790bdd3ca2eb7ec816715c03642d8c0e2479fb
Author: Jacob Isaac 
AuthorDate: Thu Jun 6 13:05:13 2019 -0700

PHOENIX-5122: PHOENIX-4322 breaks client backward compatibility (addendum)
---
 .../apache/phoenix/expression/RowValueConstructorExpression.java  | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
index c06bdc8..f92d1e2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
@@ -34,6 +34,7 @@ import java.util.List;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.expression.visitor.ExpressionVisitor;
 import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PVarbinary;
@@ -236,12 +237,15 @@ public class RowValueConstructorExpression extends 
BaseCompoundExpression {
 int outputSize = output.size();
 byte[] outputBytes = output.getBuffer();
 // Don't remove trailing separator byte unless it's the 
one for ASC
-// as otherwise we need it to ensure sort order is correct
+// as otherwise we need it to ensure sort order is correct.
+// Additionally for b/w compat with clients older than 
4.14.1 -
+// If SortOorder.ASC then always strip trailing separator 
byte (as before)
+// else only strip for >= 4.14 client (when 
STRIP_TRAILING_SEPARATOR_BYTE bit is set)
 for (int k = expressionCount -1 ; 
 k >=0 &&  getChildren().get(k).getDataType() != 
null 
   && 
!getChildren().get(k).getDataType().isFixedWidth()
   && outputBytes[outputSize-1] == 
SchemaUtil.getSeparatorByte(true, false, getChildren().get(k))
-  && isStripTrailingSepByte() ; k--) {
+  &&  (getChildren().get(k).getSortOrder() == 
SortOrder.ASC ? true : isStripTrailingSepByte()) ; k--) {
 outputSize--;
 }
 ptr.set(outputBytes, 0, outputSize);



[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5318 Slots passed to SkipScan filter is incorrect for desc primary keys that are prefixes of each other

2019-06-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new 34e4a76  PHOENIX-5318 Slots passed to SkipScan filter is incorrect for 
desc primary keys that are prefixes of each other
34e4a76 is described below

commit 34e4a7604852960f37a730f35cb8971a8ad4e7f5
Author: Thomas D'Silva 
AuthorDate: Thu Jun 6 15:19:03 2019 -0700

PHOENIX-5318 Slots passed to SkipScan filter is incorrect for desc primary 
keys that are prefixes of each other
---
 .../apache/phoenix/end2end/SkipScanQueryIT.java| 71 ++
 .../org/apache/phoenix/compile/ScanRanges.java |  6 +-
 .../java/org/apache/phoenix/query/KeyRange.java| 29 +
 3 files changed, 103 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
index fb0b568..f66f196 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
@@ -29,13 +29,20 @@ import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.sql.Statement;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
 import java.util.Properties;
 
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.FilterList;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.filter.SkipScanFilter;
+import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.util.TestUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.PropertiesUtil;
@@ -584,4 +591,68 @@ public class SkipScanQueryIT extends 
ParallelStatsDisabledIT {
 assertTrue(rs.next());
 }
 }
+
+@Test
+public void testOrWithMixedOrderPKs() throws Exception {
+String tableName = generateUniqueName();
+try (Connection conn = DriverManager.getConnection(getUrl())) {
+conn.setAutoCommit(true);
+Statement stmt = conn.createStatement();
+
+stmt.execute("CREATE TABLE " + tableName +
+" (COL1 VARCHAR, COL2 VARCHAR CONSTRAINT PK PRIMARY KEY 
(COL1 DESC, COL2)) ");
+
+// this is the order the rows will be stored on disk
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('8', 'a')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('6', 'a')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('23', 'b')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('23', 'bb')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('2', 'a')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('17', 'a')");
+
+
+// test values in the skip scan filter which are prefixes of 
another value, eg 1,12 and 2,23
+String sql = "select COL1, COL2 from " + tableName + " where 
COL1='1' OR COL1='2' OR COL1='3' OR COL1='4' " +
+"OR COL1='5' OR COL1='6' OR COL1='8' OR COL1='17' OR 
COL1='12' OR COL1='23'";
+
+ResultSet rs = stmt.executeQuery(sql);
+assertTrue(rs.next());
+
+QueryPlan plan = 
stmt.unwrap(PhoenixStatement.class).getQueryPlan();
+assertEquals("Expected a single scan ", 1, plan.getScans().size());
+assertEquals("Expected a single scan ", 1, 
plan.getScans().get(0).size());
+Scan scan = plan.getScans().get(0).get(0);
+FilterList filterList = (FilterList)scan.getFilter();
+boolean skipScanFilterFound = false;
+for (Filter f : filterList.getFilters()) {
+if (f instanceof SkipScanFilter) {
+skipScanFilterFound = true;
+SkipScanFilter skipScanFilter = (SkipScanFilter) f;
+assertEquals("Expected a single slot ", 
skipScanFilter.getSlots().size(), 1);
+assertEquals("Number of key ranges should match number of 
or filters ",
+skipScanFilter.getSlots().get(0).size(), 10);
+}
+}
+assertTrue("Should use skip scan filter", skipScanFilterFound);
+
+   

[phoenix] branch 4.14-HBase-1.3 updated: PHOENIX-5318 Slots passed to SkipScan filter is incorrect for desc primary keys that are prefixes of each other

2019-06-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.3 by this push:
 new 3c4f030  PHOENIX-5318 Slots passed to SkipScan filter is incorrect for 
desc primary keys that are prefixes of each other
3c4f030 is described below

commit 3c4f0301a749b3c1e6ef4d3fa39e3e0358775f95
Author: Thomas D'Silva 
AuthorDate: Thu Jun 6 15:19:03 2019 -0700

PHOENIX-5318 Slots passed to SkipScan filter is incorrect for desc primary 
keys that are prefixes of each other
---
 .../apache/phoenix/end2end/SkipScanQueryIT.java| 71 ++
 .../org/apache/phoenix/compile/ScanRanges.java |  6 +-
 .../java/org/apache/phoenix/query/KeyRange.java| 29 +
 3 files changed, 103 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
index fb0b568..f66f196 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
@@ -29,13 +29,20 @@ import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.sql.Statement;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
 import java.util.Properties;
 
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.FilterList;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.filter.SkipScanFilter;
+import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.util.TestUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.PropertiesUtil;
@@ -584,4 +591,68 @@ public class SkipScanQueryIT extends 
ParallelStatsDisabledIT {
 assertTrue(rs.next());
 }
 }
+
+@Test
+public void testOrWithMixedOrderPKs() throws Exception {
+String tableName = generateUniqueName();
+try (Connection conn = DriverManager.getConnection(getUrl())) {
+conn.setAutoCommit(true);
+Statement stmt = conn.createStatement();
+
+stmt.execute("CREATE TABLE " + tableName +
+" (COL1 VARCHAR, COL2 VARCHAR CONSTRAINT PK PRIMARY KEY 
(COL1 DESC, COL2)) ");
+
+// this is the order the rows will be stored on disk
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('8', 'a')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('6', 'a')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('23', 'b')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('23', 'bb')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('2', 'a')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('17', 'a')");
+
+
+// test values in the skip scan filter which are prefixes of 
another value, eg 1,12 and 2,23
+String sql = "select COL1, COL2 from " + tableName + " where 
COL1='1' OR COL1='2' OR COL1='3' OR COL1='4' " +
+"OR COL1='5' OR COL1='6' OR COL1='8' OR COL1='17' OR 
COL1='12' OR COL1='23'";
+
+ResultSet rs = stmt.executeQuery(sql);
+assertTrue(rs.next());
+
+QueryPlan plan = 
stmt.unwrap(PhoenixStatement.class).getQueryPlan();
+assertEquals("Expected a single scan ", 1, plan.getScans().size());
+assertEquals("Expected a single scan ", 1, 
plan.getScans().get(0).size());
+Scan scan = plan.getScans().get(0).get(0);
+FilterList filterList = (FilterList)scan.getFilter();
+boolean skipScanFilterFound = false;
+for (Filter f : filterList.getFilters()) {
+if (f instanceof SkipScanFilter) {
+skipScanFilterFound = true;
+SkipScanFilter skipScanFilter = (SkipScanFilter) f;
+assertEquals("Expected a single slot ", 
skipScanFilter.getSlots().size(), 1);
+assertEquals("Number of key ranges should match number of 
or filters ",
+skipScanFilter.getSlots().get(0).size(), 10);
+}
+}
+assertTrue("Should use skip scan filter", skipScanFilterFound);
+
+   

[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5318 Slots passed to SkipScan filter is incorrect for desc primary keys that are prefixes of each other

2019-06-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 0513cd7  PHOENIX-5318 Slots passed to SkipScan filter is incorrect for 
desc primary keys that are prefixes of each other
0513cd7 is described below

commit 0513cd7254fc93380ee8539e1fbd9e8af3e1aa7f
Author: Thomas D'Silva 
AuthorDate: Thu Jun 6 15:19:03 2019 -0700

PHOENIX-5318 Slots passed to SkipScan filter is incorrect for desc primary 
keys that are prefixes of each other
---
 .../apache/phoenix/end2end/SkipScanQueryIT.java| 71 ++
 .../org/apache/phoenix/compile/ScanRanges.java |  6 +-
 .../java/org/apache/phoenix/query/KeyRange.java| 29 +
 3 files changed, 103 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
index fb0b568..f66f196 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
@@ -29,13 +29,20 @@ import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.sql.Statement;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
 import java.util.Properties;
 
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.FilterList;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.filter.SkipScanFilter;
+import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.util.TestUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.PropertiesUtil;
@@ -584,4 +591,68 @@ public class SkipScanQueryIT extends 
ParallelStatsDisabledIT {
 assertTrue(rs.next());
 }
 }
+
+@Test
+public void testOrWithMixedOrderPKs() throws Exception {
+String tableName = generateUniqueName();
+try (Connection conn = DriverManager.getConnection(getUrl())) {
+conn.setAutoCommit(true);
+Statement stmt = conn.createStatement();
+
+stmt.execute("CREATE TABLE " + tableName +
+" (COL1 VARCHAR, COL2 VARCHAR CONSTRAINT PK PRIMARY KEY 
(COL1 DESC, COL2)) ");
+
+// this is the order the rows will be stored on disk
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('8', 'a')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('6', 'a')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('23', 'b')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('23', 'bb')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('2', 'a')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('17', 'a')");
+
+
+// test values in the skip scan filter which are prefixes of 
another value, eg 1,12 and 2,23
+String sql = "select COL1, COL2 from " + tableName + " where 
COL1='1' OR COL1='2' OR COL1='3' OR COL1='4' " +
+"OR COL1='5' OR COL1='6' OR COL1='8' OR COL1='17' OR 
COL1='12' OR COL1='23'";
+
+ResultSet rs = stmt.executeQuery(sql);
+assertTrue(rs.next());
+
+QueryPlan plan = 
stmt.unwrap(PhoenixStatement.class).getQueryPlan();
+assertEquals("Expected a single scan ", 1, plan.getScans().size());
+assertEquals("Expected a single scan ", 1, 
plan.getScans().get(0).size());
+Scan scan = plan.getScans().get(0).get(0);
+FilterList filterList = (FilterList)scan.getFilter();
+boolean skipScanFilterFound = false;
+for (Filter f : filterList.getFilters()) {
+if (f instanceof SkipScanFilter) {
+skipScanFilterFound = true;
+SkipScanFilter skipScanFilter = (SkipScanFilter) f;
+assertEquals("Expected a single slot ", 
skipScanFilter.getSlots().size(), 1);
+assertEquals("Number of key ranges should match number of 
or filters ",
+skipScanFilter.getSlots().get(0).size(), 10);
+}
+}
+assertTrue("Should use skip scan filter", skipScanFilterFound);
+
+   

[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5318 Slots passed to SkipScan filter is incorrect for desc primary keys that are prefixes of each other

2019-06-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 237ad8b  PHOENIX-5318 Slots passed to SkipScan filter is incorrect for 
desc primary keys that are prefixes of each other
237ad8b is described below

commit 237ad8b480f785eeab33598e9de39a5b8cf23962
Author: Thomas D'Silva 
AuthorDate: Thu Jun 6 15:19:03 2019 -0700

PHOENIX-5318 Slots passed to SkipScan filter is incorrect for desc primary 
keys that are prefixes of each other
---
 .../apache/phoenix/end2end/SkipScanQueryIT.java| 71 ++
 .../org/apache/phoenix/compile/ScanRanges.java |  6 +-
 .../java/org/apache/phoenix/query/KeyRange.java| 29 +
 3 files changed, 103 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
index fb0b568..f66f196 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
@@ -29,13 +29,20 @@ import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.sql.Statement;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
 import java.util.Properties;
 
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.FilterList;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.filter.SkipScanFilter;
+import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.util.TestUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.PropertiesUtil;
@@ -584,4 +591,68 @@ public class SkipScanQueryIT extends 
ParallelStatsDisabledIT {
 assertTrue(rs.next());
 }
 }
+
+@Test
+public void testOrWithMixedOrderPKs() throws Exception {
+String tableName = generateUniqueName();
+try (Connection conn = DriverManager.getConnection(getUrl())) {
+conn.setAutoCommit(true);
+Statement stmt = conn.createStatement();
+
+stmt.execute("CREATE TABLE " + tableName +
+" (COL1 VARCHAR, COL2 VARCHAR CONSTRAINT PK PRIMARY KEY 
(COL1 DESC, COL2)) ");
+
+// this is the order the rows will be stored on disk
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('8', 'a')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('6', 'a')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('23', 'b')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('23', 'bb')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('2', 'a')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('17', 'a')");
+
+
+// test values in the skip scan filter which are prefixes of 
another value, eg 1,12 and 2,23
+String sql = "select COL1, COL2 from " + tableName + " where 
COL1='1' OR COL1='2' OR COL1='3' OR COL1='4' " +
+"OR COL1='5' OR COL1='6' OR COL1='8' OR COL1='17' OR 
COL1='12' OR COL1='23'";
+
+ResultSet rs = stmt.executeQuery(sql);
+assertTrue(rs.next());
+
+QueryPlan plan = 
stmt.unwrap(PhoenixStatement.class).getQueryPlan();
+assertEquals("Expected a single scan ", 1, plan.getScans().size());
+assertEquals("Expected a single scan ", 1, 
plan.getScans().get(0).size());
+Scan scan = plan.getScans().get(0).get(0);
+FilterList filterList = (FilterList)scan.getFilter();
+boolean skipScanFilterFound = false;
+for (Filter f : filterList.getFilters()) {
+if (f instanceof SkipScanFilter) {
+skipScanFilterFound = true;
+SkipScanFilter skipScanFilter = (SkipScanFilter) f;
+assertEquals("Expected a single slot ", 
skipScanFilter.getSlots().size(), 1);
+assertEquals("Number of key ranges should match number of 
or filters ",
+skipScanFilter.getSlots().get(0).size(), 10);
+}
+}
+assertTrue("Should use skip scan filter", skipScanFilterFound);
+
+   

[phoenix] branch master updated: PHOENIX-5318 Slots passed to SkipScan filter is incorrect for desc primary keys that are prefixes of each other

2019-06-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 2860a71  PHOENIX-5318 Slots passed to SkipScan filter is incorrect for 
desc primary keys that are prefixes of each other
2860a71 is described below

commit 2860a717ffd8f4e5993a33a5f82124fca0c3b4c2
Author: Thomas D'Silva 
AuthorDate: Thu Jun 6 15:19:03 2019 -0700

PHOENIX-5318 Slots passed to SkipScan filter is incorrect for desc primary 
keys that are prefixes of each other
---
 .../apache/phoenix/end2end/SkipScanQueryIT.java| 71 ++
 .../org/apache/phoenix/compile/ScanRanges.java |  6 +-
 .../java/org/apache/phoenix/query/KeyRange.java| 29 +
 3 files changed, 103 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
index e823559..5a6bc23 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
@@ -29,6 +29,7 @@ import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.sql.Statement;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
@@ -36,7 +37,13 @@ import java.util.Properties;
 
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.FilterList;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.filter.SkipScanFilter;
+import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.util.TestUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.PropertiesUtil;
@@ -585,4 +592,68 @@ public class SkipScanQueryIT extends 
ParallelStatsDisabledIT {
 assertTrue(rs.next());
 }
 }
+
+@Test
+public void testOrWithMixedOrderPKs() throws Exception {
+String tableName = generateUniqueName();
+try (Connection conn = DriverManager.getConnection(getUrl())) {
+conn.setAutoCommit(true);
+Statement stmt = conn.createStatement();
+
+stmt.execute("CREATE TABLE " + tableName +
+" (COL1 VARCHAR, COL2 VARCHAR CONSTRAINT PK PRIMARY KEY 
(COL1 DESC, COL2)) ");
+
+// this is the order the rows will be stored on disk
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('8', 'a')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('6', 'a')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('23', 'b')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('23', 'bb')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('2', 'a')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('17', 'a')");
+
+
+// test values in the skip scan filter which are prefixes of 
another value, eg 1,12 and 2,23
+String sql = "select COL1, COL2 from " + tableName + " where 
COL1='1' OR COL1='2' OR COL1='3' OR COL1='4' " +
+"OR COL1='5' OR COL1='6' OR COL1='8' OR COL1='17' OR 
COL1='12' OR COL1='23'";
+
+ResultSet rs = stmt.executeQuery(sql);
+assertTrue(rs.next());
+
+QueryPlan plan = 
stmt.unwrap(PhoenixStatement.class).getQueryPlan();
+assertEquals("Expected a single scan ", 1, plan.getScans().size());
+assertEquals("Expected a single scan ", 1, 
plan.getScans().get(0).size());
+Scan scan = plan.getScans().get(0).get(0);
+FilterList filterList = (FilterList)scan.getFilter();
+boolean skipScanFilterFound = false;
+for (Filter f : filterList.getFilters()) {
+if (f instanceof SkipScanFilter) {
+skipScanFilterFound = true;
+SkipScanFilter skipScanFilter = (SkipScanFilter) f;
+assertEquals("Expected a single slot ", 
skipScanFilter.getSlots().size(), 1);
+assertEquals("Number of key ranges should match number of 
or filters ",
+skipScanFilter.getSlots().get(0).size(), 10);
+}
+}
+assertTrue("Should use skip scan filter", skipScanFilt

[phoenix] branch 4.14-HBase-1.4 updated: PHOENIX-5318 Slots passed to SkipScan filter is incorrect for desc primary keys that are prefixes of each other

2019-06-07 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.4 by this push:
 new 1a33f38  PHOENIX-5318 Slots passed to SkipScan filter is incorrect for 
desc primary keys that are prefixes of each other
1a33f38 is described below

commit 1a33f38150bbda9fa16e3574ac89c4f47743dbf9
Author: Thomas D'Silva 
AuthorDate: Thu Jun 6 15:19:03 2019 -0700

PHOENIX-5318 Slots passed to SkipScan filter is incorrect for desc primary 
keys that are prefixes of each other
---
 .../apache/phoenix/end2end/SkipScanQueryIT.java| 71 ++
 .../org/apache/phoenix/compile/ScanRanges.java |  6 +-
 .../java/org/apache/phoenix/query/KeyRange.java| 29 +
 3 files changed, 103 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
index fb0b568..f66f196 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
@@ -29,13 +29,20 @@ import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.sql.Statement;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
 import java.util.Properties;
 
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.FilterList;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.compile.QueryPlan;
+import org.apache.phoenix.filter.SkipScanFilter;
+import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.util.TestUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.PropertiesUtil;
@@ -584,4 +591,68 @@ public class SkipScanQueryIT extends 
ParallelStatsDisabledIT {
 assertTrue(rs.next());
 }
 }
+
+@Test
+public void testOrWithMixedOrderPKs() throws Exception {
+String tableName = generateUniqueName();
+try (Connection conn = DriverManager.getConnection(getUrl())) {
+conn.setAutoCommit(true);
+Statement stmt = conn.createStatement();
+
+stmt.execute("CREATE TABLE " + tableName +
+" (COL1 VARCHAR, COL2 VARCHAR CONSTRAINT PK PRIMARY KEY 
(COL1 DESC, COL2)) ");
+
+// this is the order the rows will be stored on disk
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('8', 'a')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('6', 'a')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('23', 'b')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('23', 'bb')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('2', 'a')");
+stmt.execute("UPSERT INTO " + tableName + " (COL1, COL2) VALUES 
('17', 'a')");
+
+
+// test values in the skip scan filter which are prefixes of 
another value, eg 1,12 and 2,23
+String sql = "select COL1, COL2 from " + tableName + " where 
COL1='1' OR COL1='2' OR COL1='3' OR COL1='4' " +
+"OR COL1='5' OR COL1='6' OR COL1='8' OR COL1='17' OR 
COL1='12' OR COL1='23'";
+
+ResultSet rs = stmt.executeQuery(sql);
+assertTrue(rs.next());
+
+QueryPlan plan = 
stmt.unwrap(PhoenixStatement.class).getQueryPlan();
+assertEquals("Expected a single scan ", 1, plan.getScans().size());
+assertEquals("Expected a single scan ", 1, 
plan.getScans().get(0).size());
+Scan scan = plan.getScans().get(0).get(0);
+FilterList filterList = (FilterList)scan.getFilter();
+boolean skipScanFilterFound = false;
+for (Filter f : filterList.getFilters()) {
+if (f instanceof SkipScanFilter) {
+skipScanFilterFound = true;
+SkipScanFilter skipScanFilter = (SkipScanFilter) f;
+assertEquals("Expected a single slot ", 
skipScanFilter.getSlots().size(), 1);
+assertEquals("Number of key ranges should match number of 
or filters ",
+skipScanFilter.getSlots().get(0).size(), 10);
+}
+}
+assertTrue("Should use skip scan filter", skipScanFilterFound);
+
+   

[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5316 Use callable instead of runnable so that Pherf exceptions cause tests to fail

2019-06-06 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new bbf451f  PHOENIX-5316 Use callable instead of runnable so that Pherf 
exceptions cause tests to fail
bbf451f is described below

commit bbf451f99062b752dd65c706d5c508168c382089
Author: Thomas D'Silva 
AuthorDate: Tue Jun 4 16:46:43 2019 -0700

PHOENIX-5316 Use callable instead of runnable so that Pherf exceptions 
cause tests to fail
---
 .../java/org/apache/phoenix/pherf/PherfMainIT.java | 17 +--
 .../main/java/org/apache/phoenix/pherf/Pherf.java  |  7 ++-
 .../pherf/configuration/DataTypeMapping.java   |  4 +-
 .../phoenix/pherf/configuration/Scenario.java  |  2 +
 .../pherf/configuration/XMLConfigParser.java   |  3 +-
 .../apache/phoenix/pherf/jmx/MonitorManager.java   |  9 ++--
 .../apache/phoenix/pherf/rules/RulesApplier.java   | 13 +++--
 .../pherf/workload/MultiThreadedRunner.java| 27 ++-
 .../pherf/workload/MultithreadedDiffer.java|  6 ++-
 .../phoenix/pherf/workload/QueryExecutor.java  | 56 ++
 .../apache/phoenix/pherf/workload/Workload.java|  4 +-
 .../phoenix/pherf/workload/WorkloadExecutor.java   |  5 +-
 .../phoenix/pherf/workload/WriteWorkload.java  | 53 
 .../scenario/prod_test_unsalted_scenario.xml   |  6 +--
 14 files changed, 129 insertions(+), 83 deletions(-)

diff --git 
a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java 
b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
index 2407ef4..6dc900e 100644
--- a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
+++ b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
@@ -22,15 +22,24 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.contrib.java.lang.system.ExpectedSystemExit;
 
+import java.util.concurrent.Future;
+
 public class PherfMainIT extends ResultBaseTestIT {
 @Rule
 public final ExpectedSystemExit exit = ExpectedSystemExit.none();
 
 @Test
-public void testPherfMain() {
-String[] args = { "-q",
-"--scenarioFile", ".*prod_test_unsalted_scenario.*",
+public void testPherfMain() throws Exception {
+String[] args = { "-q", "-l",
+"--schemaFile", ".*create_prod_test_unsalted.sql",
+"--scenarioFile", ".*prod_test_unsalted_scenario.xml",
 "-m", "--monitorFrequency", "10" };
-Pherf.main(args);
+Pherf pherf = new Pherf(args);
+pherf.run();
+
+// verify that none of the scenarios threw any exceptions
+for (Future future : pherf.workloadExecutor.jobs.values()) {
+future.get();
+}
 }
 }
\ No newline at end of file
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
index d92ffde..51d6743 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
@@ -24,6 +24,8 @@ import java.util.Collection;
 import java.util.List;
 import java.util.Properties;
 
+import com.google.common.annotations.VisibleForTesting;
+import jline.internal.TestAccessible;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.CommandLineParser;
 import org.apache.commons.cli.HelpFormatter;
@@ -115,6 +117,9 @@ public class Pherf {
 private final boolean thinDriver;
 private final String queryServerUrl;
 
+@VisibleForTesting
+WorkloadExecutor workloadExecutor;
+
 public Pherf(String[] args) throws Exception {
 CommandLineParser parser = new PosixParser();
 CommandLine command = null;
@@ -201,7 +206,7 @@ public class Pherf {
 public void run() throws Exception {
 MonitorManager monitorManager = null;
 List workloads = new ArrayList<>();
-WorkloadExecutor workloadExecutor = new WorkloadExecutor(properties, 
workloads, !isFunctional);
+workloadExecutor = new WorkloadExecutor(properties, workloads, 
!isFunctional);
 try {
 if (listFiles) {
 ResourceList list = new 
ResourceList(PherfConstants.RESOURCE_DATAMODEL);
diff --git 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java
 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java
index 0476df2..129bdc2 100644
--- 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java
+++ 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java
@@ -29,7 +2

[phoenix] branch master updated: PHOENIX-5316 Use callable instead of runnable so that Pherf exceptions cause tests to fail

2019-06-06 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 13fd777  PHOENIX-5316 Use callable instead of runnable so that Pherf 
exceptions cause tests to fail
13fd777 is described below

commit 13fd7776d7629c222af63d9a4f38a426fc5ed163
Author: Thomas D'Silva 
AuthorDate: Tue Jun 4 16:46:43 2019 -0700

PHOENIX-5316 Use callable instead of runnable so that Pherf exceptions 
cause tests to fail
---
 .../java/org/apache/phoenix/pherf/PherfMainIT.java | 17 +--
 .../main/java/org/apache/phoenix/pherf/Pherf.java  |  7 ++-
 .../pherf/configuration/DataTypeMapping.java   |  4 +-
 .../phoenix/pherf/configuration/Scenario.java  |  2 +
 .../pherf/configuration/XMLConfigParser.java   |  3 +-
 .../apache/phoenix/pherf/jmx/MonitorManager.java   |  9 ++--
 .../apache/phoenix/pherf/rules/RulesApplier.java   | 13 +++--
 .../pherf/workload/MultiThreadedRunner.java| 27 ++-
 .../pherf/workload/MultithreadedDiffer.java|  6 ++-
 .../phoenix/pherf/workload/QueryExecutor.java  | 56 ++
 .../apache/phoenix/pherf/workload/Workload.java|  4 +-
 .../phoenix/pherf/workload/WorkloadExecutor.java   |  5 +-
 .../phoenix/pherf/workload/WriteWorkload.java  | 53 
 .../scenario/prod_test_unsalted_scenario.xml   |  6 +--
 14 files changed, 129 insertions(+), 83 deletions(-)

diff --git 
a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java 
b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
index 2407ef4..6dc900e 100644
--- a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
+++ b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
@@ -22,15 +22,24 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.contrib.java.lang.system.ExpectedSystemExit;
 
+import java.util.concurrent.Future;
+
 public class PherfMainIT extends ResultBaseTestIT {
 @Rule
 public final ExpectedSystemExit exit = ExpectedSystemExit.none();
 
 @Test
-public void testPherfMain() {
-String[] args = { "-q",
-"--scenarioFile", ".*prod_test_unsalted_scenario.*",
+public void testPherfMain() throws Exception {
+String[] args = { "-q", "-l",
+"--schemaFile", ".*create_prod_test_unsalted.sql",
+"--scenarioFile", ".*prod_test_unsalted_scenario.xml",
 "-m", "--monitorFrequency", "10" };
-Pherf.main(args);
+Pherf pherf = new Pherf(args);
+pherf.run();
+
+// verify that none of the scenarios threw any exceptions
+for (Future future : pherf.workloadExecutor.jobs.values()) {
+future.get();
+}
 }
 }
\ No newline at end of file
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
index d92ffde..51d6743 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
@@ -24,6 +24,8 @@ import java.util.Collection;
 import java.util.List;
 import java.util.Properties;
 
+import com.google.common.annotations.VisibleForTesting;
+import jline.internal.TestAccessible;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.CommandLineParser;
 import org.apache.commons.cli.HelpFormatter;
@@ -115,6 +117,9 @@ public class Pherf {
 private final boolean thinDriver;
 private final String queryServerUrl;
 
+@VisibleForTesting
+WorkloadExecutor workloadExecutor;
+
 public Pherf(String[] args) throws Exception {
 CommandLineParser parser = new PosixParser();
 CommandLine command = null;
@@ -201,7 +206,7 @@ public class Pherf {
 public void run() throws Exception {
 MonitorManager monitorManager = null;
 List workloads = new ArrayList<>();
-WorkloadExecutor workloadExecutor = new WorkloadExecutor(properties, 
workloads, !isFunctional);
+workloadExecutor = new WorkloadExecutor(properties, workloads, 
!isFunctional);
 try {
 if (listFiles) {
 ResourceList list = new 
ResourceList(PherfConstants.RESOURCE_DATAMODEL);
diff --git 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java
 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java
index 0476df2..129bdc2 100644
--- 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java
+++ 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java
@@ -29,7 +29,9 @@ public 

[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5316 Use callable instead of runnable so that Pherf exceptions cause tests to fail

2019-06-06 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new a438558  PHOENIX-5316 Use callable instead of runnable so that Pherf 
exceptions cause tests to fail
a438558 is described below

commit a438558b9806195b62c1364a45898de0d3a42029
Author: Thomas D'Silva 
AuthorDate: Tue Jun 4 16:46:43 2019 -0700

PHOENIX-5316 Use callable instead of runnable so that Pherf exceptions 
cause tests to fail
---
 .../java/org/apache/phoenix/pherf/PherfMainIT.java | 17 +--
 .../main/java/org/apache/phoenix/pherf/Pherf.java  |  7 ++-
 .../pherf/configuration/DataTypeMapping.java   |  4 +-
 .../phoenix/pherf/configuration/Scenario.java  |  2 +
 .../pherf/configuration/XMLConfigParser.java   |  3 +-
 .../apache/phoenix/pherf/jmx/MonitorManager.java   |  9 ++--
 .../apache/phoenix/pherf/rules/RulesApplier.java   | 13 +++--
 .../pherf/workload/MultiThreadedRunner.java| 27 ++-
 .../pherf/workload/MultithreadedDiffer.java|  6 ++-
 .../phoenix/pherf/workload/QueryExecutor.java  | 56 ++
 .../apache/phoenix/pherf/workload/Workload.java|  4 +-
 .../phoenix/pherf/workload/WorkloadExecutor.java   |  5 +-
 .../phoenix/pherf/workload/WriteWorkload.java  | 53 
 .../scenario/prod_test_unsalted_scenario.xml   |  6 +--
 14 files changed, 129 insertions(+), 83 deletions(-)

diff --git 
a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java 
b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
index 2407ef4..6dc900e 100644
--- a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
+++ b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
@@ -22,15 +22,24 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.contrib.java.lang.system.ExpectedSystemExit;
 
+import java.util.concurrent.Future;
+
 public class PherfMainIT extends ResultBaseTestIT {
 @Rule
 public final ExpectedSystemExit exit = ExpectedSystemExit.none();
 
 @Test
-public void testPherfMain() {
-String[] args = { "-q",
-"--scenarioFile", ".*prod_test_unsalted_scenario.*",
+public void testPherfMain() throws Exception {
+String[] args = { "-q", "-l",
+"--schemaFile", ".*create_prod_test_unsalted.sql",
+"--scenarioFile", ".*prod_test_unsalted_scenario.xml",
 "-m", "--monitorFrequency", "10" };
-Pherf.main(args);
+Pherf pherf = new Pherf(args);
+pherf.run();
+
+// verify that none of the scenarios threw any exceptions
+for (Future future : pherf.workloadExecutor.jobs.values()) {
+future.get();
+}
 }
 }
\ No newline at end of file
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
index d92ffde..51d6743 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
@@ -24,6 +24,8 @@ import java.util.Collection;
 import java.util.List;
 import java.util.Properties;
 
+import com.google.common.annotations.VisibleForTesting;
+import jline.internal.TestAccessible;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.CommandLineParser;
 import org.apache.commons.cli.HelpFormatter;
@@ -115,6 +117,9 @@ public class Pherf {
 private final boolean thinDriver;
 private final String queryServerUrl;
 
+@VisibleForTesting
+WorkloadExecutor workloadExecutor;
+
 public Pherf(String[] args) throws Exception {
 CommandLineParser parser = new PosixParser();
 CommandLine command = null;
@@ -201,7 +206,7 @@ public class Pherf {
 public void run() throws Exception {
 MonitorManager monitorManager = null;
 List workloads = new ArrayList<>();
-WorkloadExecutor workloadExecutor = new WorkloadExecutor(properties, 
workloads, !isFunctional);
+workloadExecutor = new WorkloadExecutor(properties, workloads, 
!isFunctional);
 try {
 if (listFiles) {
 ResourceList list = new 
ResourceList(PherfConstants.RESOURCE_DATAMODEL);
diff --git 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java
 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java
index 0476df2..129bdc2 100644
--- 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java
+++ 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java
@@ -29,7 +2

[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5316 Use callable instead of runnable so that Pherf exceptions cause tests to fail

2019-06-06 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 2c98e22  PHOENIX-5316 Use callable instead of runnable so that Pherf 
exceptions cause tests to fail
2c98e22 is described below

commit 2c98e22ce7b2858d6d6a84593e50fa2291891817
Author: Thomas D'Silva 
AuthorDate: Tue Jun 4 16:46:43 2019 -0700

PHOENIX-5316 Use callable instead of runnable so that Pherf exceptions 
cause tests to fail
---
 .../java/org/apache/phoenix/pherf/PherfMainIT.java | 17 +--
 .../main/java/org/apache/phoenix/pherf/Pherf.java  |  7 ++-
 .../pherf/configuration/DataTypeMapping.java   |  4 +-
 .../phoenix/pherf/configuration/Scenario.java  |  2 +
 .../pherf/configuration/XMLConfigParser.java   |  3 +-
 .../apache/phoenix/pherf/jmx/MonitorManager.java   |  9 ++--
 .../apache/phoenix/pherf/rules/RulesApplier.java   | 13 +++--
 .../pherf/workload/MultiThreadedRunner.java| 27 ++-
 .../pherf/workload/MultithreadedDiffer.java|  6 ++-
 .../phoenix/pherf/workload/QueryExecutor.java  | 56 ++
 .../apache/phoenix/pherf/workload/Workload.java|  4 +-
 .../phoenix/pherf/workload/WorkloadExecutor.java   |  5 +-
 .../phoenix/pherf/workload/WriteWorkload.java  | 53 
 .../scenario/prod_test_unsalted_scenario.xml   |  6 +--
 14 files changed, 129 insertions(+), 83 deletions(-)

diff --git 
a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java 
b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
index 2407ef4..6dc900e 100644
--- a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
+++ b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
@@ -22,15 +22,24 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.contrib.java.lang.system.ExpectedSystemExit;
 
+import java.util.concurrent.Future;
+
 public class PherfMainIT extends ResultBaseTestIT {
 @Rule
 public final ExpectedSystemExit exit = ExpectedSystemExit.none();
 
 @Test
-public void testPherfMain() {
-String[] args = { "-q",
-"--scenarioFile", ".*prod_test_unsalted_scenario.*",
+public void testPherfMain() throws Exception {
+String[] args = { "-q", "-l",
+"--schemaFile", ".*create_prod_test_unsalted.sql",
+"--scenarioFile", ".*prod_test_unsalted_scenario.xml",
 "-m", "--monitorFrequency", "10" };
-Pherf.main(args);
+Pherf pherf = new Pherf(args);
+pherf.run();
+
+// verify that none of the scenarios threw any exceptions
+for (Future future : pherf.workloadExecutor.jobs.values()) {
+future.get();
+}
 }
 }
\ No newline at end of file
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
index d92ffde..51d6743 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
@@ -24,6 +24,8 @@ import java.util.Collection;
 import java.util.List;
 import java.util.Properties;
 
+import com.google.common.annotations.VisibleForTesting;
+import jline.internal.TestAccessible;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.CommandLineParser;
 import org.apache.commons.cli.HelpFormatter;
@@ -115,6 +117,9 @@ public class Pherf {
 private final boolean thinDriver;
 private final String queryServerUrl;
 
+@VisibleForTesting
+WorkloadExecutor workloadExecutor;
+
 public Pherf(String[] args) throws Exception {
 CommandLineParser parser = new PosixParser();
 CommandLine command = null;
@@ -201,7 +206,7 @@ public class Pherf {
 public void run() throws Exception {
 MonitorManager monitorManager = null;
 List workloads = new ArrayList<>();
-WorkloadExecutor workloadExecutor = new WorkloadExecutor(properties, 
workloads, !isFunctional);
+workloadExecutor = new WorkloadExecutor(properties, workloads, 
!isFunctional);
 try {
 if (listFiles) {
 ResourceList list = new 
ResourceList(PherfConstants.RESOURCE_DATAMODEL);
diff --git 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java
 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java
index 0476df2..129bdc2 100644
--- 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java
+++ 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java
@@ -29,7 +2

[phoenix] branch 4.14-HBase-1.4 updated: PHOENIX-5316 Use callable instead of runnable so that Pherf exceptions cause tests to fail

2019-06-06 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.4 by this push:
 new b22e9a2  PHOENIX-5316 Use callable instead of runnable so that Pherf 
exceptions cause tests to fail
b22e9a2 is described below

commit b22e9a2d7464453383f416097841c541b727478c
Author: Thomas D'Silva 
AuthorDate: Tue Jun 4 16:46:43 2019 -0700

PHOENIX-5316 Use callable instead of runnable so that Pherf exceptions 
cause tests to fail
---
 .../java/org/apache/phoenix/pherf/PherfMainIT.java | 17 +--
 .../main/java/org/apache/phoenix/pherf/Pherf.java  |  7 ++-
 .../pherf/configuration/DataTypeMapping.java   |  4 +-
 .../phoenix/pherf/configuration/Scenario.java  |  2 +
 .../pherf/configuration/XMLConfigParser.java   |  3 +-
 .../apache/phoenix/pherf/jmx/MonitorManager.java   |  9 ++--
 .../apache/phoenix/pherf/rules/RulesApplier.java   | 13 +++--
 .../pherf/workload/MultiThreadedRunner.java| 27 ++-
 .../pherf/workload/MultithreadedDiffer.java|  6 ++-
 .../phoenix/pherf/workload/QueryExecutor.java  | 56 ++
 .../apache/phoenix/pherf/workload/Workload.java|  4 +-
 .../phoenix/pherf/workload/WorkloadExecutor.java   |  5 +-
 .../phoenix/pherf/workload/WriteWorkload.java  | 53 
 .../scenario/prod_test_unsalted_scenario.xml   |  6 +--
 14 files changed, 129 insertions(+), 83 deletions(-)

diff --git 
a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java 
b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
index 2407ef4..6dc900e 100644
--- a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
+++ b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
@@ -22,15 +22,24 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.contrib.java.lang.system.ExpectedSystemExit;
 
+import java.util.concurrent.Future;
+
 public class PherfMainIT extends ResultBaseTestIT {
 @Rule
 public final ExpectedSystemExit exit = ExpectedSystemExit.none();
 
 @Test
-public void testPherfMain() {
-String[] args = { "-q",
-"--scenarioFile", ".*prod_test_unsalted_scenario.*",
+public void testPherfMain() throws Exception {
+String[] args = { "-q", "-l",
+"--schemaFile", ".*create_prod_test_unsalted.sql",
+"--scenarioFile", ".*prod_test_unsalted_scenario.xml",
 "-m", "--monitorFrequency", "10" };
-Pherf.main(args);
+Pherf pherf = new Pherf(args);
+pherf.run();
+
+// verify that none of the scenarios threw any exceptions
+for (Future future : pherf.workloadExecutor.jobs.values()) {
+future.get();
+}
 }
 }
\ No newline at end of file
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
index d92ffde..51d6743 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
@@ -24,6 +24,8 @@ import java.util.Collection;
 import java.util.List;
 import java.util.Properties;
 
+import com.google.common.annotations.VisibleForTesting;
+import jline.internal.TestAccessible;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.CommandLineParser;
 import org.apache.commons.cli.HelpFormatter;
@@ -115,6 +117,9 @@ public class Pherf {
 private final boolean thinDriver;
 private final String queryServerUrl;
 
+@VisibleForTesting
+WorkloadExecutor workloadExecutor;
+
 public Pherf(String[] args) throws Exception {
 CommandLineParser parser = new PosixParser();
 CommandLine command = null;
@@ -201,7 +206,7 @@ public class Pherf {
 public void run() throws Exception {
 MonitorManager monitorManager = null;
 List workloads = new ArrayList<>();
-WorkloadExecutor workloadExecutor = new WorkloadExecutor(properties, 
workloads, !isFunctional);
+workloadExecutor = new WorkloadExecutor(properties, workloads, 
!isFunctional);
 try {
 if (listFiles) {
 ResourceList list = new 
ResourceList(PherfConstants.RESOURCE_DATAMODEL);
diff --git 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java
 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java
index 0476df2..129bdc2 100644
--- 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java
+++ 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java
@@ -29,7 +2

[phoenix] branch 4.14-HBase-1.3 updated: PHOENIX-5316 Use callable instead of runnable so that Pherf exceptions cause tests to fail

2019-06-06 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.3 by this push:
 new 8512ece  PHOENIX-5316 Use callable instead of runnable so that Pherf 
exceptions cause tests to fail
8512ece is described below

commit 8512ecefd4370dfc4e3fc547de858ac8abe11984
Author: Thomas D'Silva 
AuthorDate: Tue Jun 4 16:46:43 2019 -0700

PHOENIX-5316 Use callable instead of runnable so that Pherf exceptions 
cause tests to fail
---
 .../java/org/apache/phoenix/pherf/PherfMainIT.java | 17 +--
 .../main/java/org/apache/phoenix/pherf/Pherf.java  |  7 ++-
 .../pherf/configuration/DataTypeMapping.java   |  4 +-
 .../phoenix/pherf/configuration/Scenario.java  |  2 +
 .../pherf/configuration/XMLConfigParser.java   |  3 +-
 .../apache/phoenix/pherf/jmx/MonitorManager.java   |  9 ++--
 .../apache/phoenix/pherf/rules/RulesApplier.java   | 13 +++--
 .../pherf/workload/MultiThreadedRunner.java| 27 ++-
 .../pherf/workload/MultithreadedDiffer.java|  6 ++-
 .../phoenix/pherf/workload/QueryExecutor.java  | 56 ++
 .../apache/phoenix/pherf/workload/Workload.java|  4 +-
 .../phoenix/pherf/workload/WorkloadExecutor.java   |  5 +-
 .../phoenix/pherf/workload/WriteWorkload.java  | 53 
 .../scenario/prod_test_unsalted_scenario.xml   |  6 +--
 14 files changed, 129 insertions(+), 83 deletions(-)

diff --git 
a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java 
b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
index 2407ef4..6dc900e 100644
--- a/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
+++ b/phoenix-pherf/src/it/java/org/apache/phoenix/pherf/PherfMainIT.java
@@ -22,15 +22,24 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.contrib.java.lang.system.ExpectedSystemExit;
 
+import java.util.concurrent.Future;
+
 public class PherfMainIT extends ResultBaseTestIT {
 @Rule
 public final ExpectedSystemExit exit = ExpectedSystemExit.none();
 
 @Test
-public void testPherfMain() {
-String[] args = { "-q",
-"--scenarioFile", ".*prod_test_unsalted_scenario.*",
+public void testPherfMain() throws Exception {
+String[] args = { "-q", "-l",
+"--schemaFile", ".*create_prod_test_unsalted.sql",
+"--scenarioFile", ".*prod_test_unsalted_scenario.xml",
 "-m", "--monitorFrequency", "10" };
-Pherf.main(args);
+Pherf pherf = new Pherf(args);
+pherf.run();
+
+// verify that none of the scenarios threw any exceptions
+for (Future future : pherf.workloadExecutor.jobs.values()) {
+future.get();
+}
 }
 }
\ No newline at end of file
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
index d92ffde..51d6743 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
@@ -24,6 +24,8 @@ import java.util.Collection;
 import java.util.List;
 import java.util.Properties;
 
+import com.google.common.annotations.VisibleForTesting;
+import jline.internal.TestAccessible;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.CommandLineParser;
 import org.apache.commons.cli.HelpFormatter;
@@ -115,6 +117,9 @@ public class Pherf {
 private final boolean thinDriver;
 private final String queryServerUrl;
 
+@VisibleForTesting
+WorkloadExecutor workloadExecutor;
+
 public Pherf(String[] args) throws Exception {
 CommandLineParser parser = new PosixParser();
 CommandLine command = null;
@@ -201,7 +206,7 @@ public class Pherf {
 public void run() throws Exception {
 MonitorManager monitorManager = null;
 List workloads = new ArrayList<>();
-WorkloadExecutor workloadExecutor = new WorkloadExecutor(properties, 
workloads, !isFunctional);
+workloadExecutor = new WorkloadExecutor(properties, workloads, 
!isFunctional);
 try {
 if (listFiles) {
 ResourceList list = new 
ResourceList(PherfConstants.RESOURCE_DATAMODEL);
diff --git 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java
 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java
index 0476df2..129bdc2 100644
--- 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java
+++ 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/DataTypeMapping.java
@@ -29,7 +2

svn commit: r1860423 - in /phoenix/site: publish/language/datatypes.html publish/language/functions.html publish/language/index.html publish/views.html source/src/site/markdown/views.md

2019-05-30 Thread tdsilva
Author: tdsilva
Date: Fri May 31 03:52:16 2019
New Revision: 1860423

URL: http://svn.apache.org/viewvc?rev=1860423=rev
Log:
PHOENIX-5306 Misleading statement in document (William Shen)

Modified:
phoenix/site/publish/language/datatypes.html
phoenix/site/publish/language/functions.html
phoenix/site/publish/language/index.html
phoenix/site/publish/views.html
phoenix/site/source/src/site/markdown/views.md

Modified: phoenix/site/publish/language/datatypes.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/language/datatypes.html?rev=1860423=1860422=1860423=diff
==
--- phoenix/site/publish/language/datatypes.html (original)
+++ phoenix/site/publish/language/datatypes.html Fri May 31 03:52:16 2019
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/language/functions.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/language/functions.html?rev=1860423=1860422=1860423=diff
==
--- phoenix/site/publish/language/functions.html (original)
+++ phoenix/site/publish/language/functions.html Fri May 31 03:52:16 2019
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/language/index.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/language/index.html?rev=1860423=1860422=1860423=diff
==
--- phoenix/site/publish/language/index.html (original)
+++ phoenix/site/publish/language/index.html Fri May 31 03:52:16 2019
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/views.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/views.html?rev=1860423=1860422=1860423=diff
==
--- phoenix/site/publish/views.html (original)
+++ phoenix/site/publish/views.html Fri May 31 03:52:16 2019
@@ -1,7 +1,7 @@
 
 
 
 
@@ -165,7 +165,7 @@
 
  Views
  
-The standard SQL view syntax (with some limitations) is now supported by 
Phoenix to enable multiple virtual tables to all share the same underlying 
physical HBase table. This is especially important in HBase, as you cannot 
realistically expect to have more than perhaps up to a hundred physical tables 
and continue to get reasonable performance from HBase. 
+The standard SQL view syntax (with some limitations) is now supported by 
Phoenix to enable multiple virtual tables to all share the same underlying 
physical HBase table. This is important in HBase as there are limits to the 
number of Regions which HBase can manage. Limiting the number of tables can 
help limit the number of Regions in a cluster. 
 For example, given the following table definition that defines a base table 
to collect product metrics: 
  
  CREATE  TABLE product_metrics (
@@ -471,7 +471,7 @@ VALUES('John Doe', CURRENT_DATE(), NEXT


Back to 
top
-   Copyright 2018 http://www.apache.org;>Apache Software Foundation. All Rights 
Reserved.
+   Copyright 2019 http://www.apache.org;>Apache Software Foundation. All Rights 
Reserved.




Modified: phoenix/site/source/src/site/markdown/views.md
URL: 
http://svn.apache.org/viewvc/phoenix/site/source/src/site/markdown/views.md?rev=1860423=1860422=1860423=diff
==
--- phoenix/site/source/src/site/markdown/views.md (original)
+++ phoenix/site/source/src/site/markdown/views.md Fri May 31 03:52:16 2019
@@ -1,6 +1,6 @@
 #Views
 
-The standard SQL view syntax (with some limitations) is now supported by 
Phoenix to enable multiple virtual tables to all share the same underlying 
physical HBase table. This is especially important in HBase, as you cannot 
realistically expect to have more than perhaps up to a hundred physical tables 
and continue to get reasonable performance from HBase.
+The standard SQL view syntax (with some limitations) is now supported by 
Phoenix to enable multiple virtual tables to all share the same underlying 
physical HBase table. This is important in HBase as there are limits to the 
number of Regions which HBase can manage. Limiting the number of tables can 
help limit the number of Regions in a cluster.
 
 For example, given the following table definition that defines a base table to 
collect product metrics:
 




[phoenix] branch 4.14-HBase-1.4 updated: PHOENIX-5300 NoopStatisticsCollector shouldn't scan any rows (Rushabh Shah)

2019-05-30 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.4 by this push:
 new 28fb23f  PHOENIX-5300 NoopStatisticsCollector shouldn't scan any rows 
(Rushabh Shah)
28fb23f is described below

commit 28fb23fa9485b63c0184e92e7cf44d13b7494bd2
Author: Thomas D'Silva 
AuthorDate: Thu May 30 12:04:42 2019 -0700

PHOENIX-5300 NoopStatisticsCollector shouldn't scan any rows (Rushabh Shah)
---
 .../UngroupedAggregateRegionObserver.java  | 34 ++
 1 file changed, 34 insertions(+)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 72ee4a3..c10dd07 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -117,6 +117,7 @@ import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.schema.ValueSchema.Field;
+import org.apache.phoenix.schema.stats.NoOpStatisticsCollector;
 import org.apache.phoenix.schema.stats.StatisticsCollectionRunTracker;
 import org.apache.phoenix.schema.stats.StatisticsCollector;
 import org.apache.phoenix.schema.stats.StatisticsCollectorFactory;
@@ -1148,6 +1149,39 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 
 private RegionScanner collectStats(final RegionScanner innerScanner, 
StatisticsCollector stats,
 final Region region, final Scan scan, Configuration config) throws 
IOException {
+if (stats instanceof  NoOpStatisticsCollector) {
+logger.info("UPDATE STATISTICS didn't run because stats is not 
enabled");
+
+return new BaseRegionScanner(innerScanner) {
+@Override
+public HRegionInfo getRegionInfo() {
+return region.getRegionInfo();
+}
+
+@Override
+public boolean isFilterDone() {
+return true;
+}
+
+@Override
+public void close() throws IOException {
+super.close();
+}
+
+@Override
+public boolean next(List results) throws IOException {
+byte[] rowCountBytes = 
PLong.INSTANCE.toBytes(Long.valueOf(0));
+
results.add(KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, 
SINGLE_COLUMN_FAMILY,
+SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, 
rowCountBytes.length));
+return false;
+}
+
+@Override
+public long getMaxResultSize() {
+return scan.getMaxResultSize();
+}
+};
+}
 StatsCollectionCallable callable =
 new StatsCollectionCallable(stats, region, innerScanner, 
config, scan);
 byte[] asyncBytes = 
scan.getAttribute(BaseScannerRegionObserver.RUN_UPDATE_STATS_ASYNC_ATTRIB);



[phoenix] branch 4.14-HBase-1.3 updated: PHOENIX-5300 NoopStatisticsCollector shouldn't scan any rows (Rushabh Shah)

2019-05-30 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.3 by this push:
 new 4223aff  PHOENIX-5300 NoopStatisticsCollector shouldn't scan any rows 
(Rushabh Shah)
4223aff is described below

commit 4223affeb66179ca19ae23aa6898c4f91cbe5d45
Author: Thomas D'Silva 
AuthorDate: Thu May 30 12:04:42 2019 -0700

PHOENIX-5300 NoopStatisticsCollector shouldn't scan any rows (Rushabh Shah)
---
 .../UngroupedAggregateRegionObserver.java  | 34 ++
 1 file changed, 34 insertions(+)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 72ee4a3..c10dd07 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -117,6 +117,7 @@ import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.schema.ValueSchema.Field;
+import org.apache.phoenix.schema.stats.NoOpStatisticsCollector;
 import org.apache.phoenix.schema.stats.StatisticsCollectionRunTracker;
 import org.apache.phoenix.schema.stats.StatisticsCollector;
 import org.apache.phoenix.schema.stats.StatisticsCollectorFactory;
@@ -1148,6 +1149,39 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 
 private RegionScanner collectStats(final RegionScanner innerScanner, 
StatisticsCollector stats,
 final Region region, final Scan scan, Configuration config) throws 
IOException {
+if (stats instanceof  NoOpStatisticsCollector) {
+logger.info("UPDATE STATISTICS didn't run because stats is not 
enabled");
+
+return new BaseRegionScanner(innerScanner) {
+@Override
+public HRegionInfo getRegionInfo() {
+return region.getRegionInfo();
+}
+
+@Override
+public boolean isFilterDone() {
+return true;
+}
+
+@Override
+public void close() throws IOException {
+super.close();
+}
+
+@Override
+public boolean next(List results) throws IOException {
+byte[] rowCountBytes = 
PLong.INSTANCE.toBytes(Long.valueOf(0));
+
results.add(KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, 
SINGLE_COLUMN_FAMILY,
+SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, 
rowCountBytes.length));
+return false;
+}
+
+@Override
+public long getMaxResultSize() {
+return scan.getMaxResultSize();
+}
+};
+}
 StatsCollectionCallable callable =
 new StatsCollectionCallable(stats, region, innerScanner, 
config, scan);
 byte[] asyncBytes = 
scan.getAttribute(BaseScannerRegionObserver.RUN_UPDATE_STATS_ASYNC_ATTRIB);



svn commit: r1860306 - in /phoenix/site: publish/release.html source/src/site/markdown/release.md

2019-05-28 Thread tdsilva
Author: tdsilva
Date: Tue May 28 22:20:19 2019
New Revision: 1860306

URL: http://svn.apache.org/viewvc?rev=1860306=rev
Log:
Fix typo

Modified:
phoenix/site/publish/release.html
phoenix/site/source/src/site/markdown/release.md

Modified: phoenix/site/publish/release.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/release.html?rev=1860306=1860305=1860306=diff
==
--- phoenix/site/publish/release.html (original)
+++ phoenix/site/publish/release.html Tue May 28 22:20:19 2019
@@ -212,7 +212,7 @@ git tag -a v4.11.0-HBase-0.98 v4.11.0-HB
  
 
   Remove any obsolete releases on https://dist.apache.org/repos/dist/release/phoenix;>https://dist.apache.org/repos/dist/release/phoenix
 given the current release. 
-   Ensure you ~/.m2/settings.xml is setup correctly:  
+   Ensure your ~/.m2/settings.xml is setup correctly:  
 
 server
   idapache.releases.https/id

Modified: phoenix/site/source/src/site/markdown/release.md
URL: 
http://svn.apache.org/viewvc/phoenix/site/source/src/site/markdown/release.md?rev=1860306=1860305=1860306=diff
==
--- phoenix/site/source/src/site/markdown/release.md (original)
+++ phoenix/site/source/src/site/markdown/release.md Tue May 28 22:20:19 2019
@@ -35,7 +35,7 @@ Follow the instructions. Signed binary a
 
 3. Remove any obsolete releases on 
https://dist.apache.org/repos/dist/release/phoenix given the current release.
 
-4. Ensure you ~/.m2/settings.xml is setup correctly: 
+4. Ensure your ~/.m2/settings.xml is setup correctly: 
 
 ```





svn commit: r1860305 - in /phoenix/site: publish/release.html source/src/site/markdown/release.md

2019-05-28 Thread tdsilva
Author: tdsilva
Date: Tue May 28 22:19:29 2019
New Revision: 1860305

URL: http://svn.apache.org/viewvc?rev=1860305=rev
Log:
Update how to release to include instructions on ~/.m2/settings.xml

Modified:
phoenix/site/publish/release.html
phoenix/site/source/src/site/markdown/release.md

Modified: phoenix/site/publish/release.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/release.html?rev=1860305=1860304=1860305=diff
==
--- phoenix/site/publish/release.html (original)
+++ phoenix/site/publish/release.html Tue May 28 22:19:29 2019
@@ -1,7 +1,7 @@
 
 
 
 
@@ -212,6 +212,15 @@ git tag -a v4.11.0-HBase-0.98 v4.11.0-HB
  
 
   Remove any obsolete releases on https://dist.apache.org/repos/dist/release/phoenix;>https://dist.apache.org/repos/dist/release/phoenix
 given the current release. 
+   Ensure you ~/.m2/settings.xml is setup correctly:  
+
+server
+  idapache.releases.https/id
+  username !-- YOUR APACHE USERNAME -- /username
+  password !-- YOUR APACHE PASSWORD -- /password
+/server
+ 
+
Release to maven (remove release directory from local repro if 
present):  
 
 
@@ -480,7 +489,7 @@ mvn versions:set -DnewVersion=4.12.0-HBa


Back to 
top
-   Copyright 2018 http://www.apache.org;>Apache Software Foundation. All Rights 
Reserved.
+   Copyright 2019 http://www.apache.org;>Apache Software Foundation. All Rights 
Reserved.




Modified: phoenix/site/source/src/site/markdown/release.md
URL: 
http://svn.apache.org/viewvc/phoenix/site/source/src/site/markdown/release.md?rev=1860305=1860304=1860305=diff
==
--- phoenix/site/source/src/site/markdown/release.md (original)
+++ phoenix/site/source/src/site/markdown/release.md Tue May 28 22:19:29 2019
@@ -35,15 +35,25 @@ Follow the instructions. Signed binary a
 
 3. Remove any obsolete releases on 
https://dist.apache.org/repos/dist/release/phoenix given the current release.
 
-4. Release to maven (remove release directory from local repro if present): 
+4. Ensure you ~/.m2/settings.xml is setup correctly: 
+
+```
+   
+ apache.releases.https
+   
+   
+   
+```
+
+5. Release to maven (remove release directory from local repro if present): 
 
 
 mvn clean deploy gpg:sign -DperformRelease=true 
-Dgpg.passphrase=[your_pass_phrase_here]
 -Dgpg.keyname=[your_key_here] -DskipTests -P release -pl 
phoenix-core,phoenix-pig,phoenix-tracing-webapp,
 
phoenix-queryserver,phoenix-spark,phoenix-flume,phoenix-pherf,phoenix-queryserver-client,phoenix-hive,phoenix-client,phoenix-server
 -am
 
-5. Go to https://repository.apache.org/#stagingRepositories and 
close -> release the staged artifacts.
-6. Set version back to upcoming SNAPSHOT and commit: 
+6. Go to https://repository.apache.org/#stagingRepositories and 
close -> release the staged artifacts.
+7. Set version back to upcoming SNAPSHOT and commit: 
 
 
 mvn versions:set -DnewVersion=4.12.0-HBase-0.98-SNAPSHOT 
-DgenerateBackupPoms=false




svn commit: r1860304 - in /phoenix/site: publish/download.html source/src/site/markdown/download.md

2019-05-28 Thread tdsilva
Author: tdsilva
Date: Tue May 28 22:08:40 2019
New Revision: 1860304

URL: http://svn.apache.org/viewvc?rev=1860304=rev
Log:
Fix typo

Modified:
phoenix/site/publish/download.html
phoenix/site/source/src/site/markdown/download.md

Modified: phoenix/site/publish/download.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/download.html?rev=1860304=1860303=1860304=diff
==
--- phoenix/site/publish/download.html (original)
+++ phoenix/site/publish/download.html Tue May 28 22:08:40 2019
@@ -183,8 +183,8 @@
 

svn commit: r1860303 - in /phoenix/site: publish/download.html publish/language/datatypes.html publish/language/functions.html publish/language/index.html source/src/site/markdown/download.md

2019-05-28 Thread tdsilva
Author: tdsilva
Date: Tue May 28 22:07:43 2019
New Revision: 1860303

URL: http://svn.apache.org/viewvc?rev=1860303=rev
Log:
Update download page for 4.14.2 release

Modified:
phoenix/site/publish/download.html
phoenix/site/publish/language/datatypes.html
phoenix/site/publish/language/functions.html
phoenix/site/publish/language/index.html
phoenix/site/source/src/site/markdown/download.md

Modified: phoenix/site/publish/download.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/download.html?rev=1860303=1860302=1860303=diff
==
--- phoenix/site/publish/download.html (original)
+++ phoenix/site/publish/download.html Tue May 28 22:07:43 2019
@@ -1,7 +1,7 @@
 
 
 
 
@@ -166,7 +166,7 @@
  Phoenix Downloads
  
 The below table lists mirrored release artifacts and their associated 
hashes and signatures available ONLY at apache.org. The keys used to sign 
releases can be found in our published https://www.apache.org/dist/phoenix/KEYS;>KEYS file. See our 
installation instructions here, our release 
notes here, and a list of fixes and new 
features https://issues.apache.org/jira/secure/ReleaseNote.jspa?version=12334393projectId=12315120;>here.
 Follow https://www.apache.org/dyn/closer.cgi#verify;>Verify the Integrity of the 
Files for how to verify your mirrored downloads. 
-Current release 4.14.1 can run on Apache HBase 0.98, 1.1, 1.2, 1.3 and 1.4. 
CDH HBase 5.11, 5.12, 5.13 and 5.14 is supported by 4.14.0. Apache HBase 2.0 is 
supported by 5.0.0. Please follow the appropriate link depending on your HBase 
version.  
+Current release 4.14.2 can run on Apache HBase 1.3 and 1.4. CDH HBase 5.11, 
5.12, 5.13 and 5.14 is supported by 4.14.0. Apache HBase 2.0 is supported by 
5.0.0. Please follow the appropriate link depending on your HBase version.  
  
   

@@ -183,11 +183,8 @@
 

[phoenix] annotated tag v4.14.2-HBase-1.4 created (now 54d12ee)

2019-05-28 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a change to annotated tag v4.14.2-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


  at 54d12ee  (tag)
 tagging 473ed83771f64138fec219346fc6c214f98367b3 (tag)
  length 169 bytes
  by Thomas D'Silva
  on Tue May 28 14:50:53 2019 -0700

- Log -
Phoenix v4.14.2-HBase-1.4 release
---

No new revisions were added by this update.



[phoenix] branch 4.14-HBase-1.3 updated: Set version to 4.14.3-HBase-1.3-SNAPSHOT

2019-05-28 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.3 by this push:
 new 3573fc2  Set version to 4.14.3-HBase-1.3-SNAPSHOT
3573fc2 is described below

commit 3573fc2ab3ef44633902f501ae4be5d81581711f
Author: Thomas D'Silva 
AuthorDate: Tue May 28 14:48:12 2019 -0700

Set version to 4.14.3-HBase-1.3-SNAPSHOT
---
 phoenix-assembly/pom.xml   | 2 +-
 phoenix-client/pom.xml | 2 +-
 phoenix-core/pom.xml   | 2 +-
 phoenix-flume/pom.xml  | 2 +-
 phoenix-hive/pom.xml   | 2 +-
 phoenix-kafka/pom.xml  | 2 +-
 phoenix-load-balancer/pom.xml  | 2 +-
 phoenix-pherf/pom.xml  | 2 +-
 phoenix-pig/pom.xml| 2 +-
 phoenix-queryserver-client/pom.xml | 2 +-
 phoenix-queryserver/pom.xml| 2 +-
 phoenix-server/pom.xml | 2 +-
 phoenix-spark/pom.xml  | 2 +-
 phoenix-tracing-webapp/pom.xml | 2 +-
 pom.xml| 2 +-
 15 files changed, 15 insertions(+), 15 deletions(-)

diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index 7f7d78f..c3f7689 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.2-HBase-1.3
+4.14.3-HBase-1.3-SNAPSHOT
   
   phoenix-assembly
   Phoenix Assembly
diff --git a/phoenix-client/pom.xml b/phoenix-client/pom.xml
index d0ec982..40e178f 100644
--- a/phoenix-client/pom.xml
+++ b/phoenix-client/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.2-HBase-1.3
+4.14.3-HBase-1.3-SNAPSHOT
   
   phoenix-client
   Phoenix Client
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 2734056..5267dcf 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -4,7 +4,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.2-HBase-1.3
+4.14.3-HBase-1.3-SNAPSHOT
   
   phoenix-core
   Phoenix Core
diff --git a/phoenix-flume/pom.xml b/phoenix-flume/pom.xml
index dc62381..88bc4e1 100644
--- a/phoenix-flume/pom.xml
+++ b/phoenix-flume/pom.xml
@@ -26,7 +26,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.2-HBase-1.3
+4.14.3-HBase-1.3-SNAPSHOT
   
   phoenix-flume
   Phoenix - Flume
diff --git a/phoenix-hive/pom.xml b/phoenix-hive/pom.xml
index 2162e8c..66bec76 100644
--- a/phoenix-hive/pom.xml
+++ b/phoenix-hive/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.2-HBase-1.3
+4.14.3-HBase-1.3-SNAPSHOT
   
   phoenix-hive
   Phoenix - Hive
diff --git a/phoenix-kafka/pom.xml b/phoenix-kafka/pom.xml
index da89a8a..17c6c98 100644
--- a/phoenix-kafka/pom.xml
+++ b/phoenix-kafka/pom.xml
@@ -26,7 +26,7 @@

org.apache.phoenix
phoenix
-   4.14.2-HBase-1.3
+   4.14.3-HBase-1.3-SNAPSHOT

phoenix-kafka
Phoenix - Kafka
diff --git a/phoenix-load-balancer/pom.xml b/phoenix-load-balancer/pom.xml
index df7a50b..4bd6c9d 100644
--- a/phoenix-load-balancer/pom.xml
+++ b/phoenix-load-balancer/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.2-HBase-1.3
+4.14.3-HBase-1.3-SNAPSHOT
   
   phoenix-load-balancer
   Phoenix Load Balancer
diff --git a/phoenix-pherf/pom.xml b/phoenix-pherf/pom.xml
index f5d570f..8ae2c3f 100644
--- a/phoenix-pherf/pom.xml
+++ b/phoenix-pherf/pom.xml
@@ -15,7 +15,7 @@

org.apache.phoenix
phoenix
-   4.14.2-HBase-1.3
+   4.14.3-HBase-1.3-SNAPSHOT

 
phoenix-pherf
diff --git a/phoenix-pig/pom.xml b/phoenix-pig/pom.xml
index 80901ba..99e6c00 100644
--- a/phoenix-pig/pom.xml
+++ b/phoenix-pig/pom.xml
@@ -26,7 +26,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.2-HBase-1.3
+4.14.3-HBase-1.3-SNAPSHOT
   
   phoenix-pig
   Phoenix - Pig
diff --git a/phoenix-queryserver-client/pom.xml 
b/phoenix-queryserver-client/pom.xml
index 07ec210..5ee6a1a 100644
--- a/phoenix-queryserver-client/pom.xml
+++ b/phoenix-queryserver-client/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.2-HBase-1.3
+4.14.3-HBase-1.3-SNAPSHOT
   
   phoenix-queryserver-client
   Phoenix Query Server Client
diff --git a/phoenix-queryserver/pom.xml b/phoenix-queryserver/pom.xml
index 2e3d567..a2a10da 100644
--- a/phoenix-queryserver/pom.xml
+++ b/phoenix-queryserver/pom.xml
@@ -26,7 +26,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.2-HBase-1.3
+4.14.3-HBase-1.3-SNAPSHOT
   
   phoenix-queryserver
   Phoenix Query Server
diff --git a/phoenix-server/pom.xml b/phoenix-server/pom.xml
index 3263940..af16c55 100644
--- a/phoenix-server/pom.xml
+++ b/phoenix-server/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.2-HBase-1.3
+4.14.3-HBase-1.3-SNAPSHOT

svn commit: r34295 - /release/phoenix/KEYS

2019-05-28 Thread tdsilva
Author: tdsilva
Date: Tue May 28 19:26:08 2019
New Revision: 34295

Log:
Added key for tdsi...@apache.org


Modified:
release/phoenix/KEYS

Modified: release/phoenix/KEYS
==
--- release/phoenix/KEYS (original)
+++ release/phoenix/KEYS Tue May 28 19:26:08 2019
@@ -706,3 +706,61 @@ Zlf0WWO0W8ULaPmNd4XK/oHzxdyR7OPc0LM22VaL
 Fx6bhHulGA==
 =PgYX
 -END PGP PUBLIC KEY BLOCK-
+pub   4096R/30E0F400 2019-05-28
+uid  Thomas D'Silva (CODE SIGNING KEY) 
+sig 330E0F400 2019-05-28  Thomas D'Silva (CODE SIGNING KEY) 

+sub   4096R/7C9B246A 2019-05-28
+sig  30E0F400 2019-05-28  Thomas D'Silva (CODE SIGNING KEY) 

+
+-BEGIN PGP PUBLIC KEY BLOCK-
+Version: GnuPG v1
+
+mQINBFztfiEBEADHubvHSks7I7vTZy12GjnMagYJy/j89xE6n4g/OlU5qq8euzus
+N8slciKOr/zPCOzmhPBCmv6WUdBvI9+dZl6ZYq6C7cLTsTIvkzCZ+hxkZR4zE0r2
+aY8KL3SbzQapEppuqCZUE/sfqkBTypE8Gk0S3QRPb7LF52q69ukfnixlIB7to3jo
+nca+5GfkieNMO+dZc8/kB1CkWXkbymOKQVSBMWpD89aBF85wHwahmJLKIj+Qy3eZ
+GKgY4HG8Edv9BqwutEtIdk/wmuLUpZ4uhKzpzv1/rhiHENmXxBisples7DoruUT3
+pVmu7/0NznH2MeGGmocJOdUoxWLQ3odCu/qdc5GDJyEaM7Znn58Jhhb5/UMgJlIZ
+Gu/+imGp90nEWkhhc8WEVGdyvCsNKjOM9qFAjhWtO+RTMuVqtiW2hwaKXCyfnE0L
+0YSYF7qkyfVAfsAbCNtZ49p69QRYQdoxc2t5wa5NaF5CDWcmY5WEgMvBBijI3uca
+xSvD1Rn89da9F2AjcIbrnO7ErgltVA904G6fnGpTDQTZE7lRX/XV8JOb1iJjjyZh
+nPiC5vDd19CcFPpfvUqL7hMeNER/ENf2Cqyike9SwYUEgDQysUTELJ8EWwBKToLC
+72uz87zNv7irWcpxu+kWcOAno5JOYdfc4s5IpRNIeuNnGDa3ZC2t7invIQARAQAB
+tDZUaG9tYXMgRCdTaWx2YSAoQ09ERSBTSUdOSU5HIEtFWSkgPHRkc2lsdmFAYXBh
+Y2hlLm9yZz6JAjgEEwECACIFAlztfiECGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4B
+AheAAAoJEAjEhQsw4PQAzRMQAJZ0NfRIcfMX5ixWqoEUS3w8KK+NkAinqW3x8UZL
+K25afx37nT4ThVzrh9kXO3dXdbeolf9TtU1Y75qdj5N0wQQ8V/AlF2Dl7O1LtBva
+mEZMh5xlAEtf/nmNC7HqRCrm3Qyos1wCnYGyymsqdqcrs40BEmGoTGsF4FEAkzJj
+p/AQ01bxdI8/Uq6alQ7qZysz82uZdsOrfrpkV5kQw4MOMd0wanmuCVIcTFnTe/Z7
+bUogouZUoXrvrUooCLoKT1F4eMUUPmUP2QixElshPHXNVf7+Oq7ARL8znjVNvu0H
+EgeuyngUD6DnKZvctURsLcNC+dX2nzES8+BHqORWq7Mf3C/l7SyRBtSF1AnoCBsX
+50r5Ow3XmdouivqoH4RkPpSkanckG2Bu+Aqe1Wqut+APkEJMh7haWVQmsgEc2v/u
+zCRCOPMLBsUcsa4rwxVNAzcq0B4y+XZcJ3SZCZNEC7acVuZlbSw1BpVM/OaTQ4gr
+SKbJwCM64rJgEqh1HmV0nNJThpylFn7CEcwHVGOeSlsBtEcuWW/dtc7IpLk2ErnA
+1j9SKV1ZlhTYu+V/Cg0FIedrXpLzfpey8mQmpuLo7Y569rI0/hY7MttGeYHT3bIc
+TV7GwdwN4JXE8ssh/2vVaLeewvkxQ7pzA2kY+caNZDJRTbW1LZqAugXse5KdByii
+LYFguQINBFztfiEBEACrLPVgbMxpDuhhogGfw/PQmCZYj9R1MtAxLFZ+VZek9c8L
+QpmtBV59Vk+JSA/nKjlJ2ilPmK0ZuJU8zjWtlGXWJa4U11jwLDk9VeOex19jkUbL
+a+N5YzZY9LWlkG2o5srFQGyaUBI4uBNaAgJYeIteqg/p4vPs9v9iT/fxhBE1ZH6e
+P2/qpKqEC/YAO1c1bV+qDyB7jscfBbQpH2ooU0BCS4QDLKzO1y1pCtE+FTYgRqaF
+3MxEISfzODd8SLSUjFdVfqKoCBFEkylToTfaZ9UIUPF5NQdAC8joUEGyZQEIOW6y
+b/4/vkQXU6o3EGxG2AJUpOymKnsVhlFj8cRYcYi1UQmldH7TJjqDFN9MYsUoHVM4
+naVLaL9f3ZyVowktmNKdmt1HWN9erFGxraWtAEeL7WreuWOR1mOub1a3NlWmCxWZ
+CO9Whw68tWuLn78b8rlehtMw6/2YSOi4q232REGI8ePyFB0iAY4AYcEjQ/1Bu5HX
+XcI1LSQnTZFkOoWKC6ungdbZCKK8LX+CrZeT2W45GAOgrsyV6YFWpEOroAg/kQyW
+FC7TeIZ5cRBHkFyoQQ+YrSk05Bvh/itAI6oO4MCrLGDuDdFcQPOheBkPItlJz1rn
+Xjr03ry9oTDu0UHE2pXFIzvLJxey8u024G8MEgij4xVVDUf/RCJPbjmDEYXNXQAR
+AQABiQIfBBgBAgAJBQJc7X4hAhsMAAoJEAjEhQsw4PQAg7cQALyBjuoe652bVKO/
+kUYCAZS97K9/sRXbEUUuUksjfOpqvLFNitGxPtXe6i9ChYL8fvrRhR220soAoirT
+OVIKqMVLx45GMBJtUX2jpuq8vLcPzcar9dxL6S3gEXYPOkwiUeddZi0KBm6GyhU0
+o1V3Q5AZKNttZYSfv/jn4E3rrAQezySVUFj4Jag6hAwUUzS/nYxRql8HVRIEmpnH
+Fcu1N8ZLB0kwjisRh5xdCsTWjpHjast/Ybku9zGYIlQ15aFLtDk96bYj5tBUQ/lG
+LClJFAJFxyM/I64WO3B5cp5Rx+9AltMwr0E2TVQvOJKvlnpZAtbdHtS6EZPnQzz3
+5320J+IMn3//G/qpTcWOCbxdOLUvPqFUBpztwjMc+towpacgKxbKHSHXFGHI2PhR
+HMKD40aHYhGr/rM709GKUtXzdYNZVdN+QXxkcky/MUaK7yWAXVZYYyqpvlp2ohJK
+V35yHJDNvvLe/7gg2hAmqOYdHHCo1lQvCC0OIfDk2SqCcO5vrMFCLc5OPsa08TJF
+LsUWDlzZOzRxSIIYfxGBlu6JbTgsmVBvPOTDwZxMu5LNsfOP0+cyYbXIK2i/rZIO
+fZdnmTL0dHP4Grd0UV/LOn4xeDZo5V8cE1No6iD0hTZ1NBV5pks1t2YWomGN1/f5
+ZMxGKYlvAPKDQdShT3q0B1Z6F8bB
+=pnAa
+-END PGP PUBLIC KEY BLOCK-




svn commit: r34293 - /dev/phoenix/KEYS /release/phoenix/KEYS

2019-05-28 Thread tdsilva
Author: tdsilva
Date: Tue May 28 18:21:35 2019
New Revision: 34293

Log:
Move dev KEYS to release KEYS

Added:
release/phoenix/KEYS
  - copied unchanged from r34292, dev/phoenix/KEYS
Removed:
dev/phoenix/KEYS



svn commit: r34292 - /release/phoenix/KEYS

2019-05-28 Thread tdsilva
Author: tdsilva
Date: Tue May 28 18:20:53 2019
New Revision: 34292

Log:
Replace release KEYS with dev KEYS

Removed:
release/phoenix/KEYS



[phoenix] annotated tag v4.14.2-HBase-1.3 created (now 31dfa04)

2019-05-26 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a change to annotated tag v4.14.2-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


  at 31dfa04  (tag)
 tagging f08aa7d2c586335bb52f3efed940e9cee7f7e152 (tag)
  length 169 bytes
  by Thomas D'Silva
  on Sun May 26 21:06:50 2019 -0700

- Log -
Phoenix v4.14.2-HBase-1.3 release
---

No new revisions were added by this update.



svn commit: r34224 - /release/phoenix/apache-phoenix-4.14.1-HBase-1.2/

2019-05-26 Thread tdsilva
Author: tdsilva
Date: Mon May 27 04:02:10 2019
New Revision: 34224

Log:
Remove old release

Removed:
release/phoenix/apache-phoenix-4.14.1-HBase-1.2/



svn commit: r34225 - /release/phoenix/apache-phoenix-4.14.1-HBase-1.3/

2019-05-26 Thread tdsilva
Author: tdsilva
Date: Mon May 27 04:02:23 2019
New Revision: 34225

Log:
Remove old release

Removed:
release/phoenix/apache-phoenix-4.14.1-HBase-1.3/



svn commit: r34226 - /release/phoenix/apache-phoenix-4.14.1-HBase-1.4/

2019-05-26 Thread tdsilva
Author: tdsilva
Date: Mon May 27 04:02:33 2019
New Revision: 34226

Log:
Remove old release

Removed:
release/phoenix/apache-phoenix-4.14.1-HBase-1.4/



svn commit: r34222 - /release/phoenix/apache-phoenix-4.14.1-HBase-0.98/

2019-05-26 Thread tdsilva
Author: tdsilva
Date: Mon May 27 04:01:30 2019
New Revision: 34222

Log:
Remove old release


Removed:
release/phoenix/apache-phoenix-4.14.1-HBase-0.98/



svn commit: r34223 - /release/phoenix/apache-phoenix-4.14.1-HBase-1.1/

2019-05-26 Thread tdsilva
Author: tdsilva
Date: Mon May 27 04:01:55 2019
New Revision: 34223

Log:
Remove old release


Removed:
release/phoenix/apache-phoenix-4.14.1-HBase-1.1/



svn commit: r34221 - /dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc2/ /release/phoenix/apache-phoenix-4.14.2-HBase-1.4/

2019-05-26 Thread tdsilva
Author: tdsilva
Date: Mon May 27 04:00:04 2019
New Revision: 34221

Log:
phoenix-4.14.2-HBase-1.4 release 


Added:
release/phoenix/apache-phoenix-4.14.2-HBase-1.4/
  - copied from r34220, dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc2/
Removed:
dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc2/



svn commit: r34220 - /dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc2/ /release/phoenix/apache-phoenix-4.14.2-HBase-1.3/

2019-05-26 Thread tdsilva
Author: tdsilva
Date: Mon May 27 03:59:09 2019
New Revision: 34220

Log:
phoenix-4.14.2-HBase-1.3 release


Added:
release/phoenix/apache-phoenix-4.14.2-HBase-1.3/
  - copied from r34219, dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc2/
Removed:
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc2/



[phoenix] branch master updated: Add cdh branches and 4.x-HBase-1.5 to pre commit

2019-05-24 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 0d0efd4  Add cdh branches and 4.x-HBase-1.5 to pre commit
0d0efd4 is described below

commit 0d0efd467871582ce298d15891cce5adf50c92e4
Author: Thomas D'Silva 
AuthorDate: Fri May 24 15:39:20 2019 -0700

Add cdh branches and 4.x-HBase-1.5 to pre commit
---
 dev/test-patch.properties | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/dev/test-patch.properties b/dev/test-patch.properties
index e342a42..f82b1e8 100644
--- a/dev/test-patch.properties
+++ b/dev/test-patch.properties
@@ -27,7 +27,7 @@ MAX_LINE_LENGTH=100
 # All supported branches for testing with precommit build
 # be sure to consider branch name prefixes in the order, ie, 4.x should appear
 # before 4 since the latter is a prefix
-BRANCH_NAMES="4.x-HBase-1.2 4.x-HBase-1.3 4.x-HBase-1.4 4.14-HBase-1.4 master"
+BRANCH_NAMES="4.x-HBase-1.3 4.x-HBase-1.4 4.x-HBase-1.5 4.x-cdh5.11 
4.x-cdh5.11.2 4.x-cdh5.12 4.x-cdh5.13 4.x-cdh5.14 4.14-HBase-1.3 4.14-HBase-1.4 
4.14-cdh5.11 4.14-cdh5.12 4.14-cdh5.13 4.14-cdh5.14 master"
 
 
 # All supported Hadoop versions that we want to test the compilation with



svn commit: r34198 - in /dev/phoenix: apache-phoenix-4.14.2-HBase-1.3-rc2/ apache-phoenix-4.14.2-HBase-1.3-rc2/bin/ apache-phoenix-4.14.2-HBase-1.3-rc2/src/ apache-phoenix-4.14.2-HBase-1.4-rc2/ apache

2019-05-23 Thread tdsilva
Author: tdsilva
Date: Thu May 23 21:05:17 2019
New Revision: 34198

Log:
Phoenix 4.14.2 rc2

Added:
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc2/
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc2/bin/

dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc2/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz
   (with props)

dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc2/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz.asc

dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc2/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz.sha256

dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc2/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz.sha512
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc2/src/

dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc2/src/apache-phoenix-4.14.2-HBase-1.3-src.tar.gz
   (with props)

dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc2/src/apache-phoenix-4.14.2-HBase-1.3-src.tar.gz.asc

dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc2/src/apache-phoenix-4.14.2-HBase-1.3-src.tar.gz.sha256

dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc2/src/apache-phoenix-4.14.2-HBase-1.3-src.tar.gz.sha512
dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc2/
dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc2/bin/

dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc2/bin/apache-phoenix-4.14.2-HBase-1.4-bin.tar.gz
   (with props)

dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc2/bin/apache-phoenix-4.14.2-HBase-1.4-bin.tar.gz.asc

dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc2/bin/apache-phoenix-4.14.2-HBase-1.4-bin.tar.gz.sha256

dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc2/bin/apache-phoenix-4.14.2-HBase-1.4-bin.tar.gz.sha512
dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc2/src/

dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc2/src/apache-phoenix-4.14.2-HBase-1.4-src.tar.gz
   (with props)

dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc2/src/apache-phoenix-4.14.2-HBase-1.4-src.tar.gz.asc

dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc2/src/apache-phoenix-4.14.2-HBase-1.4-src.tar.gz.sha256

dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc2/src/apache-phoenix-4.14.2-HBase-1.4-src.tar.gz.sha512

Added: 
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc2/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz
==
Binary file - no diff available.

Propchange: 
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc2/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz
--
svn:mime-type = application/octet-stream

Added: 
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc2/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz.asc
==
--- 
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc2/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz.asc
 (added)
+++ 
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc2/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz.asc
 Thu May 23 21:05:17 2019
@@ -0,0 +1,17 @@
+-BEGIN PGP SIGNATURE-
+Version: GnuPG v1
+
+iQIcBAABAgAGBQJc5wfgAAoJEGkcZhzf2GwCNRYP/jK59zzJ24LCo0Tt3FDC6iC0
+mnkJSxv77QWI+R/g8BWouVNGIZT/76kYoxJ4ECvZjFZMNSIEaeLVNzEYCJW/2yVW
+zC879W2Zifa406KzGRf/NuYzoP+aEXk5LcgR0yiB+6RMzylqAno7gDfdd0TbdHy9
+1fkrVdOKfQmbQP1YH3f4Pfc44PGDulwiz9cgaU7aSCu6yp8hr/+UN7DTfIIPn8QK
+XCF34upbWPzQCh1HEMjlxPy574UGPAGzD3tKEJtTn9bO9xS71FeE3fq+1/1Rhkvg
+AdUpLKdr2Qyqw2o5cnzy721SSpMDLcyYl8AI0zQpESEBt0au0evY2X/qO40tqVbF
+dCcRLfEUZEhRo4jAbAD0w92V58pqL1G2bdSHfymfOBoj0RvqZodypvDrZ99/7TuI
+tZ7oMl/PaZLobiripxU3QVfNjcNDVt1IA7f99EzqKliAsHu8MR4ERpLPGuPaWi2Y
+d/OH9eUBsOTOd9bfvcHTYFmofE4VMiGR8zjB5ZCZtg2UZVAnP785btvgPMDWTGBT
+WW/Z7pvmNilOnWQAIn+lLa3X0hdNw6TCTzut4Ey2MhcYPpQ+kgtrPQ5lhpckazQa
+znjWitx88oVEaP0E9UjSKlbe7tW32SbxCNy+FAEtUICdcyioC3ZSk8XH0bLdfjS+
+hJUeRQFo67EkW1LlYDFH
+=8w3J
+-END PGP SIGNATURE-

Added: 
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc2/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz.sha256
==
--- 
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc2/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz.sha256
 (added)
+++ 
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc2/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz.sha256
 Thu May 23 21:05:17 2019
@@ -0,0 +1 @@
+9572966c4c6460451190ba3d070aa2302186bd540981cd903c54df8cfd5c9d27 
*apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz

Added: 
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc2/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz.sha512
==
--- 
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc2/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz.sha512
 (added)
+++ 
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc2/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz.sha512
 Thu May 23 21:05:17 2019
@@ -0,0 +1

[phoenix] annotated tag v4.14.2-HBase-1.4-rc2 created (now 473ed83)

2019-05-23 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a change to annotated tag v4.14.2-HBase-1.4-rc2
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


  at 473ed83  (tag)
 tagging a86206a006c85afd5884a015d6fe452d823ac626 (commit)
 replaces v4.14.2-HBase-1.4-rc1
  by Thomas D'Silva
  on Thu May 23 14:02:49 2019 -0700

- Log -
v4.14.2-HBase-1.4-rc2
---

No new revisions were added by this update.



[phoenix] annotated tag v4.14.2-HBase-1.3-rc2 created (now f08aa7d)

2019-05-23 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a change to annotated tag v4.14.2-HBase-1.3-rc2
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


  at f08aa7d  (tag)
 tagging 13ce47cf4214c9c78e8fec5f96fb861410677817 (commit)
 replaces v4.14.2-HBase-1.3-rc1
  by Thomas D'Silva
  on Thu May 23 13:53:09 2019 -0700

- Log -
v4.14.2-HBase-1.3-rc2
---

No new revisions were added by this update.



[phoenix] 02/02: PHOENIX-5291 Ensure that Phoenix coprocessor close all scanners.

2019-05-23 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit a86206a006c85afd5884a015d6fe452d823ac626
Author: Lars Hofhansl 
AuthorDate: Wed May 22 22:38:20 2019 -0700

PHOENIX-5291 Ensure that Phoenix coprocessor close all scanners.
---
 .../coprocessor/UngroupedAggregateRegionObserver.java   | 17 ++---
 1 file changed, 14 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index f0ce5b2..72ee4a3 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -1158,7 +1158,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 long rowCount = 0; // in case of async, we report 0 as number of rows 
updated
 StatisticsCollectionRunTracker statsRunTracker =
 StatisticsCollectionRunTracker.getInstance(config);
-boolean runUpdateStats = 
statsRunTracker.addUpdateStatsCommandRegion(region.getRegionInfo(),scan.getFamilyMap().keySet());
+final boolean runUpdateStats = 
statsRunTracker.addUpdateStatsCommandRegion(region.getRegionInfo(),scan.getFamilyMap().keySet());
 if (runUpdateStats) {
 if (!async) {
 rowCount = callable.call();
@@ -1187,8 +1187,11 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 
 @Override
 public void close() throws IOException {
-// No-op because we want to manage closing of the inner 
scanner ourselves.
-// This happens inside StatsCollectionCallable.
+// If we ran/scheduled StatsCollectionCallable the delegate
+// scanner is closed there. Otherwise close it here.
+if (!runUpdateStats) {
+super.close();
+}
 }
 
 @Override
@@ -1425,6 +1428,14 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 + fullTableName);
 Scan scan = new Scan();
 scan.setMaxVersions();
+
+// close the passed scanner since we are 
returning a brand-new one
+try {
+if (s != null) {
+s.close();
+}
+} catch (IOException ignore) {}
+
 return new StoreScanner(store, 
store.getScanInfo(), scan, scanners,
 ScanType.COMPACT_RETAIN_DELETES, 
store.getSmallestReadPoint(),
 HConstants.OLDEST_TIMESTAMP);



[phoenix] 01/02: PHOENIX-5269 use AccessChecker to check for user permisssions

2019-05-23 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit da10001e648a0d36a40dda42d441dbc98a131c06
Author: Kiran Kumar Maturi 
AuthorDate: Tue May 14 09:43:29 2019 +0530

PHOENIX-5269 use AccessChecker to check for user permisssions
---
 .../apache/phoenix/end2end/PermissionsCacheIT.java | 99 ++
 .../coprocessor/PhoenixAccessController.java   | 92 ++--
 pom.xml|  2 +-
 3 files changed, 185 insertions(+), 8 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionsCacheIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionsCacheIT.java
new file mode 100644
index 000..3605a6e
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionsCacheIT.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.junit.Assert.assertTrue;
+
+import java.security.PrivilegedExceptionAction;
+import java.sql.Connection;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.AuthUtil;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.security.access.AccessControlLists;
+import org.apache.hadoop.hbase.security.access.Permission.Action;
+import org.apache.hadoop.hbase.security.access.TablePermission;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.Test;
+
+import com.google.common.collect.ListMultimap;
+
+public class PermissionsCacheIT extends BasePermissionsIT {
+
+public PermissionsCacheIT(boolean isNamespaceMapped) throws Exception {
+super(isNamespaceMapped);
+}
+
+@Test
+public void testPermissionsCachedWithAccessChecker() throws Throwable {
+if (!isNamespaceMapped) {
+return;
+}
+startNewMiniCluster();
+final String schema = generateUniqueName();
+final String tableName = generateUniqueName();
+final String phoenixTableName = SchemaUtil.getTableName(schema, 
tableName);
+try (Connection conn = getConnection()) {
+grantPermissions(regularUser1.getShortName(), 
PHOENIX_NAMESPACE_MAPPED_SYSTEM_TABLES,
+Action.READ, Action.EXEC);
+grantPermissions(regularUser1.getShortName(), 
Collections.singleton("SYSTEM:SEQUENCE"),
+Action.WRITE, Action.READ, Action.EXEC);
+superUser1.runAs(new PrivilegedExceptionAction() {
+@Override
+public Void run() throws Exception {
+try {
+verifyAllowed(createSchema(schema), superUser1);
+grantPermissions(regularUser1.getShortName(), schema, 
Action.CREATE);
+
grantPermissions(AuthUtil.toGroupEntry(GROUP_SYSTEM_ACCESS), schema,
+Action.CREATE);
+} catch (Throwable e) {
+if (e instanceof Exception) {
+throw (Exception) e;
+} else {
+throw new Exception(e);
+}
+}
+return null;
+}
+});
+verifyAllowed(createTable(phoenixTableName), regularUser1);
+HBaseTestingUtility utility = getUtility();
+Configuration conf = utility.getConfiguration();
+ZooKeeperWatcher zkw = 
HBaseTestingUtility.getZooKeeperWatcher(utility);
+String aclZnodeParent = conf.get("zookeeper.znode.acl.parent", 
"acl");
+String aclZNode = ZKUtil.joinZNode(zkw.baseZNode, aclZnodeParent);
+String tableZNode = ZKUtil.joinZNode(aclZNode, "@" + schema);
+byte[] data = ZKUtil.getData

[phoenix] branch 4.14-HBase-1.4 updated (4b71782 -> a86206a)

2019-05-23 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a change to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from 4b71782  PHOENIX-4296: reverse scan in ChunkedResultIterator
 new da10001  PHOENIX-5269 use AccessChecker to check for user permisssions
 new a86206a  PHOENIX-5291 Ensure that Phoenix coprocessor close all 
scanners.

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../apache/phoenix/end2end/PermissionsCacheIT.java | 99 ++
 .../coprocessor/PhoenixAccessController.java   | 92 ++--
 .../UngroupedAggregateRegionObserver.java  | 17 +++-
 pom.xml|  2 +-
 4 files changed, 199 insertions(+), 11 deletions(-)
 create mode 100644 
phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionsCacheIT.java



[phoenix] branch 4.14-HBase-1.3 updated: PHOENIX-5291 Ensure that Phoenix coprocessor close all scanners.

2019-05-23 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.3 by this push:
 new 13ce47c  PHOENIX-5291 Ensure that Phoenix coprocessor close all 
scanners.
13ce47c is described below

commit 13ce47cf4214c9c78e8fec5f96fb861410677817
Author: Lars Hofhansl 
AuthorDate: Wed May 22 22:40:34 2019 -0700

PHOENIX-5291 Ensure that Phoenix coprocessor close all scanners.
---
 .../coprocessor/UngroupedAggregateRegionObserver.java   | 17 ++---
 1 file changed, 14 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index f0ce5b2..72ee4a3 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -1158,7 +1158,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 long rowCount = 0; // in case of async, we report 0 as number of rows 
updated
 StatisticsCollectionRunTracker statsRunTracker =
 StatisticsCollectionRunTracker.getInstance(config);
-boolean runUpdateStats = 
statsRunTracker.addUpdateStatsCommandRegion(region.getRegionInfo(),scan.getFamilyMap().keySet());
+final boolean runUpdateStats = 
statsRunTracker.addUpdateStatsCommandRegion(region.getRegionInfo(),scan.getFamilyMap().keySet());
 if (runUpdateStats) {
 if (!async) {
 rowCount = callable.call();
@@ -1187,8 +1187,11 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 
 @Override
 public void close() throws IOException {
-// No-op because we want to manage closing of the inner 
scanner ourselves.
-// This happens inside StatsCollectionCallable.
+// If we ran/scheduled StatsCollectionCallable the delegate
+// scanner is closed there. Otherwise close it here.
+if (!runUpdateStats) {
+super.close();
+}
 }
 
 @Override
@@ -1425,6 +1428,14 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 + fullTableName);
 Scan scan = new Scan();
 scan.setMaxVersions();
+
+// close the passed scanner since we are 
returning a brand-new one
+try {
+if (s != null) {
+s.close();
+}
+} catch (IOException ignore) {}
+
 return new StoreScanner(store, 
store.getScanInfo(), scan, scanners,
 ScanType.COMPACT_RETAIN_DELETES, 
store.getSmallestReadPoint(),
 HConstants.OLDEST_TIMESTAMP);



svn commit: r34091 - in /dev/phoenix: apache-phoenix-4.14.2-HBase-1.3-rc1/ apache-phoenix-4.14.2-HBase-1.3-rc1/bin/ apache-phoenix-4.14.2-HBase-1.3-rc1/src/ apache-phoenix-4.14.2-HBase-1.4-rc1/ apache

2019-05-18 Thread tdsilva
Author: tdsilva
Date: Sat May 18 07:11:55 2019
New Revision: 34091

Log:
Phoenix 4.14.2 rc1

Added:
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc1/
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc1/bin/

dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc1/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz
   (with props)

dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc1/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz.asc

dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc1/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz.sha256

dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc1/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz.sha512
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc1/src/

dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc1/src/apache-phoenix-4.14.2-HBase-1.3-src.tar.gz
   (with props)

dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc1/src/apache-phoenix-4.14.2-HBase-1.3-src.tar.gz.asc

dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc1/src/apache-phoenix-4.14.2-HBase-1.3-src.tar.gz.sha256

dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc1/src/apache-phoenix-4.14.2-HBase-1.3-src.tar.gz.sha512
dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc1/
dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc1/bin/

dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc1/bin/apache-phoenix-4.14.2-HBase-1.4-bin.tar.gz
   (with props)

dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc1/bin/apache-phoenix-4.14.2-HBase-1.4-bin.tar.gz.asc

dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc1/bin/apache-phoenix-4.14.2-HBase-1.4-bin.tar.gz.sha256

dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc1/bin/apache-phoenix-4.14.2-HBase-1.4-bin.tar.gz.sha512
dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc1/src/

dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc1/src/apache-phoenix-4.14.2-HBase-1.4-src.tar.gz
   (with props)

dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc1/src/apache-phoenix-4.14.2-HBase-1.4-src.tar.gz.asc

dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc1/src/apache-phoenix-4.14.2-HBase-1.4-src.tar.gz.sha256

dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc1/src/apache-phoenix-4.14.2-HBase-1.4-src.tar.gz.sha512

Added: 
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc1/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz
==
Binary file - no diff available.

Propchange: 
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc1/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz
--
svn:mime-type = application/octet-stream

Added: 
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc1/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz.asc
==
--- 
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc1/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz.asc
 (added)
+++ 
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc1/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz.asc
 Sat May 18 07:11:55 2019
@@ -0,0 +1,17 @@
+-BEGIN PGP SIGNATURE-
+Version: GnuPG v1
+
+iQIcBAABAgAGBQJc367pAAoJEGkcZhzf2GwCdZoQAJoQjx/KfDuBsG6y31qXhomm
+7lk4nKFdwv5Cg8dU9Qd/m1J6RtROswwd4u9Uc+V6BKSjBYgMUWVHSyvKAXy7BTD0
+nmZtyGLpTDdOn7jzbaLrIoaCF6d+JcxzomQjbliCJImZ59Pca16cztZUgFUX/5IT
+FGv/vomwfcYpoKFqEgpEVG5tcHsPeBWN9ywZztdE6R5a2V0myYPFwLS5fdkdwci/
+sMr3rCTpyhq5k7wBVFmypOYFDSi3a+xuoX8LEnPFd6po/hag03QvC6Vq4mTYB9RG
+JKr7eNMlMb2Y2Q3C8hf0+3gf+eghPbaua1c35nnwEsTdw0AsL3a1ehkU2lRbzR/j
+S7adaHCGG1RODf6FDRyhRq3NMUZbItCVGLj1vhgiWmHLfCrpRVJZwUoMa/+cGY3n
+1WfDn5LAR8X7iiTssAoELV8cpyIdssl3asL0Xqp0PmX7oVkZZ9jaxgxeltORVUO8
+wMgFQe248pI2YiwJzT/sZK40qRBu0F80d+2w0+p//Kn/ONpO2jnb0e2GgG6CLKbu
+tbQTreCjt9I8AzoINkqAIcNIfKXsEQVjGufRWei3U1KcCU/uv8rW4SxvLA0o0x/9
+Ax0MP8rhZLvjEmwMvyl+rk7RAcSKMkjowEyy1A1h4mA16w7xbPXZziGO1+VhLDu3
++m3a7oJ4jq378tmM/Gzi
+=AAmO
+-END PGP SIGNATURE-

Added: 
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc1/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz.sha256
==
--- 
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc1/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz.sha256
 (added)
+++ 
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc1/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz.sha256
 Sat May 18 07:11:55 2019
@@ -0,0 +1 @@
+4771216d03e493187fba56f3ce9478f926b16f4b517e57edd2c4d69514659fb3 
*apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz

Added: 
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc1/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz.sha512
==
--- 
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc1/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz.sha512
 (added)
+++ 
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc1/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz.sha512
 Sat May 18 07:11:55 2019
@@ -0,0 +1

[phoenix] annotated tag v4.14.2-HBase-1.3-rc1 created (now dab05b2)

2019-05-18 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a change to annotated tag v4.14.2-HBase-1.3-rc1
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


  at dab05b2  (tag)
 tagging e3505d91e46de1a1756a145d396f27a3c70e927f (commit)
 replaces 4.14.2-HBase-1.3-rc0
  by Thomas D'Silva
  on Sat May 18 00:07:58 2019 -0700

- Log -
v4.14.2-HBase-1.3-rc1
---

No new revisions were added by this update.



[phoenix] annotated tag v4.14.2-HBase-1.4-rc1 created (now 6cb7844)

2019-05-18 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a change to annotated tag v4.14.2-HBase-1.4-rc1
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


  at 6cb7844  (tag)
 tagging 4b717825fb284c1f9fbdeee3d0e5391f5baf13bb (commit)
 replaces 4.14.2-HBase-1.4-rc0
  by Thomas D'Silva
  on Fri May 17 23:56:02 2019 -0700

- Log -
v4.14.2-HBase-1.4-rc1
---

No new revisions were added by this update.



[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-4296: reverse scan in ChunkedResultIterator

2019-05-16 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 59d3c05  PHOENIX-4296: reverse scan in ChunkedResultIterator
59d3c05 is described below

commit 59d3c053e82bffc58259694e6ee33cf8c5bacefe
Author: chfeng 
AuthorDate: Thu May 16 18:41:41 2019 +0800

PHOENIX-4296: reverse scan in ChunkedResultIterator
---
 .../phoenix/iterate/ChunkedResultIterator.java | 13 +++-
 .../phoenix/iterate/ChunkedResultIteratorTest.java | 73 ++
 2 files changed, 83 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
index acb6c04..1aab2d5 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
@@ -58,6 +58,7 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 
 private final ParallelIteratorFactory delegateIteratorFactory;
 private ImmutableBytesWritable lastKey = new ImmutableBytesWritable();
+private ImmutableBytesWritable prevLastKey = new ImmutableBytesWritable();
 private final StatementContext context;
 private final TableRef tableRef;
 private final long chunkSize;
@@ -96,8 +97,9 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 }
 }
 
-private ChunkedResultIterator(ParallelIteratorFactory 
delegateIteratorFactory, MutationState mutationState,
-   StatementContext context, TableRef tableRef, Scan scan, long 
chunkSize, ResultIterator scanner, QueryPlan plan) throws SQLException {
+private ChunkedResultIterator(ParallelIteratorFactory 
delegateIteratorFactory,
+MutationState mutationState, StatementContext context, TableRef 
tableRef, Scan scan,
+long chunkSize, ResultIterator scanner, QueryPlan plan) throws 
SQLException {
 this.delegateIteratorFactory = delegateIteratorFactory;
 this.context = context;
 this.tableRef = tableRef;
@@ -138,8 +140,12 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 if (resultIterator.peek() == null && lastKey != null) {
 resultIterator.close();
 scan = ScanUtil.newScan(scan);
-if(ScanUtil.isLocalIndex(scan)) {
+if (ScanUtil.isLocalIndex(scan)) {
 scan.setAttribute(SCAN_START_ROW_SUFFIX, 
ByteUtil.copyKeyBytesIfNecessary(lastKey));
+} else if (ScanUtil.isReversed(scan)) {
+// lastKey is the last row the previous iterator meet but not 
returned.
+// for reverse scan, use prevLastKey as the new stopRow.
+scan.setStopRow(ByteUtil.copyKeyBytesIfNecessary(prevLastKey));
 } else {
 scan.setStartRow(ByteUtil.copyKeyBytesIfNecessary(lastKey));
 }
@@ -212,6 +218,7 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 byte[] currentKey = lastKey.get();
 int offset = lastKey.getOffset();
 int length = lastKey.getLength();
+prevLastKey.set(lastKey.copyBytes());
 newTuple.getKey(lastKey);
 
 return Bytes.compareTo(currentKey, offset, length, lastKey.get(), 
lastKey.getOffset(), lastKey.getLength()) != 0;
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/iterate/ChunkedResultIteratorTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/iterate/ChunkedResultIteratorTest.java
new file mode 100644
index 000..18402f0
--- /dev/null
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/iterate/ChunkedResultIteratorTest.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.phoenix.iterate;
+
+import static org.apache.phoenix.util.TestUtil.PHOENIX_JDBC_URL;
+import static org.junit.Assert.assertEquals;
+import static

[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-4296: reverse scan in ChunkedResultIterator

2019-05-16 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 1d99ff0  PHOENIX-4296: reverse scan in ChunkedResultIterator
1d99ff0 is described below

commit 1d99ff049807ce98bc4a831eeb219e793f705793
Author: chfeng 
AuthorDate: Thu May 16 18:41:41 2019 +0800

PHOENIX-4296: reverse scan in ChunkedResultIterator
---
 .../phoenix/iterate/ChunkedResultIterator.java | 13 +++-
 .../phoenix/iterate/ChunkedResultIteratorTest.java | 73 ++
 2 files changed, 83 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
index acb6c04..1aab2d5 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
@@ -58,6 +58,7 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 
 private final ParallelIteratorFactory delegateIteratorFactory;
 private ImmutableBytesWritable lastKey = new ImmutableBytesWritable();
+private ImmutableBytesWritable prevLastKey = new ImmutableBytesWritable();
 private final StatementContext context;
 private final TableRef tableRef;
 private final long chunkSize;
@@ -96,8 +97,9 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 }
 }
 
-private ChunkedResultIterator(ParallelIteratorFactory 
delegateIteratorFactory, MutationState mutationState,
-   StatementContext context, TableRef tableRef, Scan scan, long 
chunkSize, ResultIterator scanner, QueryPlan plan) throws SQLException {
+private ChunkedResultIterator(ParallelIteratorFactory 
delegateIteratorFactory,
+MutationState mutationState, StatementContext context, TableRef 
tableRef, Scan scan,
+long chunkSize, ResultIterator scanner, QueryPlan plan) throws 
SQLException {
 this.delegateIteratorFactory = delegateIteratorFactory;
 this.context = context;
 this.tableRef = tableRef;
@@ -138,8 +140,12 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 if (resultIterator.peek() == null && lastKey != null) {
 resultIterator.close();
 scan = ScanUtil.newScan(scan);
-if(ScanUtil.isLocalIndex(scan)) {
+if (ScanUtil.isLocalIndex(scan)) {
 scan.setAttribute(SCAN_START_ROW_SUFFIX, 
ByteUtil.copyKeyBytesIfNecessary(lastKey));
+} else if (ScanUtil.isReversed(scan)) {
+// lastKey is the last row the previous iterator meet but not 
returned.
+// for reverse scan, use prevLastKey as the new stopRow.
+scan.setStopRow(ByteUtil.copyKeyBytesIfNecessary(prevLastKey));
 } else {
 scan.setStartRow(ByteUtil.copyKeyBytesIfNecessary(lastKey));
 }
@@ -212,6 +218,7 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 byte[] currentKey = lastKey.get();
 int offset = lastKey.getOffset();
 int length = lastKey.getLength();
+prevLastKey.set(lastKey.copyBytes());
 newTuple.getKey(lastKey);
 
 return Bytes.compareTo(currentKey, offset, length, lastKey.get(), 
lastKey.getOffset(), lastKey.getLength()) != 0;
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/iterate/ChunkedResultIteratorTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/iterate/ChunkedResultIteratorTest.java
new file mode 100644
index 000..18402f0
--- /dev/null
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/iterate/ChunkedResultIteratorTest.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.phoenix.iterate;
+
+import static org.apache.phoenix.util.TestUtil.PHOENIX_JDBC_URL;
+import static org.junit.Assert.assertEquals;
+import static

[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-4296: reverse scan in ChunkedResultIterator

2019-05-16 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new f93a74a  PHOENIX-4296: reverse scan in ChunkedResultIterator
f93a74a is described below

commit f93a74ae656a1ca92111477d2611b80b00db59e7
Author: chfeng 
AuthorDate: Thu May 16 18:41:41 2019 +0800

PHOENIX-4296: reverse scan in ChunkedResultIterator
---
 .../phoenix/iterate/ChunkedResultIterator.java | 13 +++-
 .../phoenix/iterate/ChunkedResultIteratorTest.java | 73 ++
 2 files changed, 83 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
index acb6c04..1aab2d5 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
@@ -58,6 +58,7 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 
 private final ParallelIteratorFactory delegateIteratorFactory;
 private ImmutableBytesWritable lastKey = new ImmutableBytesWritable();
+private ImmutableBytesWritable prevLastKey = new ImmutableBytesWritable();
 private final StatementContext context;
 private final TableRef tableRef;
 private final long chunkSize;
@@ -96,8 +97,9 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 }
 }
 
-private ChunkedResultIterator(ParallelIteratorFactory 
delegateIteratorFactory, MutationState mutationState,
-   StatementContext context, TableRef tableRef, Scan scan, long 
chunkSize, ResultIterator scanner, QueryPlan plan) throws SQLException {
+private ChunkedResultIterator(ParallelIteratorFactory 
delegateIteratorFactory,
+MutationState mutationState, StatementContext context, TableRef 
tableRef, Scan scan,
+long chunkSize, ResultIterator scanner, QueryPlan plan) throws 
SQLException {
 this.delegateIteratorFactory = delegateIteratorFactory;
 this.context = context;
 this.tableRef = tableRef;
@@ -138,8 +140,12 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 if (resultIterator.peek() == null && lastKey != null) {
 resultIterator.close();
 scan = ScanUtil.newScan(scan);
-if(ScanUtil.isLocalIndex(scan)) {
+if (ScanUtil.isLocalIndex(scan)) {
 scan.setAttribute(SCAN_START_ROW_SUFFIX, 
ByteUtil.copyKeyBytesIfNecessary(lastKey));
+} else if (ScanUtil.isReversed(scan)) {
+// lastKey is the last row the previous iterator meet but not 
returned.
+// for reverse scan, use prevLastKey as the new stopRow.
+scan.setStopRow(ByteUtil.copyKeyBytesIfNecessary(prevLastKey));
 } else {
 scan.setStartRow(ByteUtil.copyKeyBytesIfNecessary(lastKey));
 }
@@ -212,6 +218,7 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 byte[] currentKey = lastKey.get();
 int offset = lastKey.getOffset();
 int length = lastKey.getLength();
+prevLastKey.set(lastKey.copyBytes());
 newTuple.getKey(lastKey);
 
 return Bytes.compareTo(currentKey, offset, length, lastKey.get(), 
lastKey.getOffset(), lastKey.getLength()) != 0;
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/iterate/ChunkedResultIteratorTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/iterate/ChunkedResultIteratorTest.java
new file mode 100644
index 000..18402f0
--- /dev/null
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/iterate/ChunkedResultIteratorTest.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.phoenix.iterate;
+
+import static org.apache.phoenix.util.TestUtil.PHOENIX_JDBC_URL;
+import static org.junit.Assert.assertEquals;
+import static

[phoenix] branch master updated: PHOENIX-4296: reverse scan in ChunkedResultIterator

2019-05-16 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new accd0af  PHOENIX-4296: reverse scan in ChunkedResultIterator
accd0af is described below

commit accd0af195b521cd572219d93033f94ad3b3401e
Author: chfeng 
AuthorDate: Thu May 16 18:41:41 2019 +0800

PHOENIX-4296: reverse scan in ChunkedResultIterator
---
 .../phoenix/iterate/ChunkedResultIterator.java | 13 +++-
 .../phoenix/iterate/ChunkedResultIteratorTest.java | 73 ++
 2 files changed, 83 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
index acb6c04..1aab2d5 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
@@ -58,6 +58,7 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 
 private final ParallelIteratorFactory delegateIteratorFactory;
 private ImmutableBytesWritable lastKey = new ImmutableBytesWritable();
+private ImmutableBytesWritable prevLastKey = new ImmutableBytesWritable();
 private final StatementContext context;
 private final TableRef tableRef;
 private final long chunkSize;
@@ -96,8 +97,9 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 }
 }
 
-private ChunkedResultIterator(ParallelIteratorFactory 
delegateIteratorFactory, MutationState mutationState,
-   StatementContext context, TableRef tableRef, Scan scan, long 
chunkSize, ResultIterator scanner, QueryPlan plan) throws SQLException {
+private ChunkedResultIterator(ParallelIteratorFactory 
delegateIteratorFactory,
+MutationState mutationState, StatementContext context, TableRef 
tableRef, Scan scan,
+long chunkSize, ResultIterator scanner, QueryPlan plan) throws 
SQLException {
 this.delegateIteratorFactory = delegateIteratorFactory;
 this.context = context;
 this.tableRef = tableRef;
@@ -138,8 +140,12 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 if (resultIterator.peek() == null && lastKey != null) {
 resultIterator.close();
 scan = ScanUtil.newScan(scan);
-if(ScanUtil.isLocalIndex(scan)) {
+if (ScanUtil.isLocalIndex(scan)) {
 scan.setAttribute(SCAN_START_ROW_SUFFIX, 
ByteUtil.copyKeyBytesIfNecessary(lastKey));
+} else if (ScanUtil.isReversed(scan)) {
+// lastKey is the last row the previous iterator meet but not 
returned.
+// for reverse scan, use prevLastKey as the new stopRow.
+scan.setStopRow(ByteUtil.copyKeyBytesIfNecessary(prevLastKey));
 } else {
 scan.setStartRow(ByteUtil.copyKeyBytesIfNecessary(lastKey));
 }
@@ -212,6 +218,7 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 byte[] currentKey = lastKey.get();
 int offset = lastKey.getOffset();
 int length = lastKey.getLength();
+prevLastKey.set(lastKey.copyBytes());
 newTuple.getKey(lastKey);
 
 return Bytes.compareTo(currentKey, offset, length, lastKey.get(), 
lastKey.getOffset(), lastKey.getLength()) != 0;
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/iterate/ChunkedResultIteratorTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/iterate/ChunkedResultIteratorTest.java
new file mode 100644
index 000..18402f0
--- /dev/null
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/iterate/ChunkedResultIteratorTest.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.phoenix.iterate;
+
+import static org.apache.phoenix.util.TestUtil.PHOENIX_JDBC_URL;
+import static org.junit.Assert.assertEquals;
+import static org.junit.

[phoenix] branch 4.14-HBase-1.3 updated: PHOENIX-4296: reverse scan in ChunkedResultIterator

2019-05-16 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.3 by this push:
 new e3505d9  PHOENIX-4296: reverse scan in ChunkedResultIterator
e3505d9 is described below

commit e3505d91e46de1a1756a145d396f27a3c70e927f
Author: chfeng 
AuthorDate: Thu May 16 18:41:41 2019 +0800

PHOENIX-4296: reverse scan in ChunkedResultIterator
---
 .../phoenix/iterate/ChunkedResultIterator.java | 13 +++-
 .../phoenix/iterate/ChunkedResultIteratorTest.java | 73 ++
 2 files changed, 83 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
index acb6c04..1aab2d5 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
@@ -58,6 +58,7 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 
 private final ParallelIteratorFactory delegateIteratorFactory;
 private ImmutableBytesWritable lastKey = new ImmutableBytesWritable();
+private ImmutableBytesWritable prevLastKey = new ImmutableBytesWritable();
 private final StatementContext context;
 private final TableRef tableRef;
 private final long chunkSize;
@@ -96,8 +97,9 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 }
 }
 
-private ChunkedResultIterator(ParallelIteratorFactory 
delegateIteratorFactory, MutationState mutationState,
-   StatementContext context, TableRef tableRef, Scan scan, long 
chunkSize, ResultIterator scanner, QueryPlan plan) throws SQLException {
+private ChunkedResultIterator(ParallelIteratorFactory 
delegateIteratorFactory,
+MutationState mutationState, StatementContext context, TableRef 
tableRef, Scan scan,
+long chunkSize, ResultIterator scanner, QueryPlan plan) throws 
SQLException {
 this.delegateIteratorFactory = delegateIteratorFactory;
 this.context = context;
 this.tableRef = tableRef;
@@ -138,8 +140,12 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 if (resultIterator.peek() == null && lastKey != null) {
 resultIterator.close();
 scan = ScanUtil.newScan(scan);
-if(ScanUtil.isLocalIndex(scan)) {
+if (ScanUtil.isLocalIndex(scan)) {
 scan.setAttribute(SCAN_START_ROW_SUFFIX, 
ByteUtil.copyKeyBytesIfNecessary(lastKey));
+} else if (ScanUtil.isReversed(scan)) {
+// lastKey is the last row the previous iterator meet but not 
returned.
+// for reverse scan, use prevLastKey as the new stopRow.
+scan.setStopRow(ByteUtil.copyKeyBytesIfNecessary(prevLastKey));
 } else {
 scan.setStartRow(ByteUtil.copyKeyBytesIfNecessary(lastKey));
 }
@@ -212,6 +218,7 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 byte[] currentKey = lastKey.get();
 int offset = lastKey.getOffset();
 int length = lastKey.getLength();
+prevLastKey.set(lastKey.copyBytes());
 newTuple.getKey(lastKey);
 
 return Bytes.compareTo(currentKey, offset, length, lastKey.get(), 
lastKey.getOffset(), lastKey.getLength()) != 0;
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/iterate/ChunkedResultIteratorTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/iterate/ChunkedResultIteratorTest.java
new file mode 100644
index 000..18402f0
--- /dev/null
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/iterate/ChunkedResultIteratorTest.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.phoenix.iterate;
+
+import static org.apache.phoenix.util.TestUtil.PHOENIX_JDBC_URL;
+import static org.junit.Assert.assertEquals;
+import static

[phoenix] branch 4.14-HBase-1.4 updated: PHOENIX-4296: reverse scan in ChunkedResultIterator

2019-05-16 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.4 by this push:
 new 4b71782  PHOENIX-4296: reverse scan in ChunkedResultIterator
4b71782 is described below

commit 4b717825fb284c1f9fbdeee3d0e5391f5baf13bb
Author: chfeng 
AuthorDate: Thu May 16 18:41:41 2019 +0800

PHOENIX-4296: reverse scan in ChunkedResultIterator
---
 .../phoenix/iterate/ChunkedResultIterator.java | 13 +++-
 .../phoenix/iterate/ChunkedResultIteratorTest.java | 73 ++
 2 files changed, 83 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
index acb6c04..1aab2d5 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
@@ -58,6 +58,7 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 
 private final ParallelIteratorFactory delegateIteratorFactory;
 private ImmutableBytesWritable lastKey = new ImmutableBytesWritable();
+private ImmutableBytesWritable prevLastKey = new ImmutableBytesWritable();
 private final StatementContext context;
 private final TableRef tableRef;
 private final long chunkSize;
@@ -96,8 +97,9 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 }
 }
 
-private ChunkedResultIterator(ParallelIteratorFactory 
delegateIteratorFactory, MutationState mutationState,
-   StatementContext context, TableRef tableRef, Scan scan, long 
chunkSize, ResultIterator scanner, QueryPlan plan) throws SQLException {
+private ChunkedResultIterator(ParallelIteratorFactory 
delegateIteratorFactory,
+MutationState mutationState, StatementContext context, TableRef 
tableRef, Scan scan,
+long chunkSize, ResultIterator scanner, QueryPlan plan) throws 
SQLException {
 this.delegateIteratorFactory = delegateIteratorFactory;
 this.context = context;
 this.tableRef = tableRef;
@@ -138,8 +140,12 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 if (resultIterator.peek() == null && lastKey != null) {
 resultIterator.close();
 scan = ScanUtil.newScan(scan);
-if(ScanUtil.isLocalIndex(scan)) {
+if (ScanUtil.isLocalIndex(scan)) {
 scan.setAttribute(SCAN_START_ROW_SUFFIX, 
ByteUtil.copyKeyBytesIfNecessary(lastKey));
+} else if (ScanUtil.isReversed(scan)) {
+// lastKey is the last row the previous iterator meet but not 
returned.
+// for reverse scan, use prevLastKey as the new stopRow.
+scan.setStopRow(ByteUtil.copyKeyBytesIfNecessary(prevLastKey));
 } else {
 scan.setStartRow(ByteUtil.copyKeyBytesIfNecessary(lastKey));
 }
@@ -212,6 +218,7 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 byte[] currentKey = lastKey.get();
 int offset = lastKey.getOffset();
 int length = lastKey.getLength();
+prevLastKey.set(lastKey.copyBytes());
 newTuple.getKey(lastKey);
 
 return Bytes.compareTo(currentKey, offset, length, lastKey.get(), 
lastKey.getOffset(), lastKey.getLength()) != 0;
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/iterate/ChunkedResultIteratorTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/iterate/ChunkedResultIteratorTest.java
new file mode 100644
index 000..18402f0
--- /dev/null
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/iterate/ChunkedResultIteratorTest.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.phoenix.iterate;
+
+import static org.apache.phoenix.util.TestUtil.PHOENIX_JDBC_URL;
+import static org.junit.Assert.assertEquals;
+import static

[phoenix] branch 4.x-HBase-1.2 updated: PHOENIX-5273 Singleton ConnectionQueryServices for UpdateCacheAcrossDifferentClientsIT

2019-05-08 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.2 by this push:
 new ca81027  PHOENIX-5273 Singleton ConnectionQueryServices for 
UpdateCacheAcrossDifferentClientsIT
ca81027 is described below

commit ca810278b3405ef2a194de079a3ecd2849da06da
Author: Thomas D'Silva 
AuthorDate: Tue May 7 14:29:56 2019 -0700

PHOENIX-5273 Singleton ConnectionQueryServices for 
UpdateCacheAcrossDifferentClientsIT
---
 .../UpdateCacheAcrossDifferentClientsIT.java   | 91 +-
 1 file changed, 53 insertions(+), 38 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpdateCacheAcrossDifferentClientsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpdateCacheAcrossDifferentClientsIT.java
index 25e2367..8be6f5b 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpdateCacheAcrossDifferentClientsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpdateCacheAcrossDifferentClientsIT.java
@@ -10,18 +10,12 @@
  */
 package org.apache.phoenix.end2end;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.util.Map;
-import java.util.Properties;
-
+import com.google.common.base.Throwables;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.phoenix.exception.PhoenixIOException;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixDriver;
@@ -34,26 +28,35 @@ import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
-import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import com.google.common.collect.Maps;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.util.Properties;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 public class UpdateCacheAcrossDifferentClientsIT extends 
BaseUniqueNamesOwnClusterIT {
 
 @BeforeClass
 public static void doSetup() throws Exception {
-Map props = Maps.newConcurrentMap();
-props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.TRUE.toString());
-props.put(QueryServices.MUTATE_BATCH_SIZE_ATTRIB, 
Integer.toString(3000));
-//When we run all tests together we are using global cluster(driver)
-//so to make drop work we need to re register driver with 
DROP_METADATA_ATTRIB property
-destroyDriver();
-setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
-//Registering real Phoenix driver to have multiple 
ConnectionQueryServices created across connections
-//so that metadata changes doesn't get propagated across connections
+Configuration conf = HBaseConfiguration.create();
+HBaseTestingUtility hbaseTestUtil = new HBaseTestingUtility(conf);
+setUpConfigForMiniCluster(conf);
+conf.set(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, 
QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
+conf.set(QueryServices.DROP_METADATA_ATTRIB, Boolean.TRUE.toString());
+conf.set(QueryServices.MUTATE_BATCH_SIZE_ATTRIB, 
Integer.toString(3000));
+hbaseTestUtil.startMiniCluster();
+// establish url and quorum. Need to use PhoenixDriver and not 
PhoenixTestDriver
+String zkQuorum = "localhost:" + 
hbaseTestUtil.getZkCluster().getClientPort();
+url = PhoenixRuntime.JDBC_PROTOCOL + 
PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + zkQuorum;
 DriverManager.registerDriver(PhoenixDriver.INSTANCE);
 }
 
@@ -64,8 +67,8 @@ public class UpdateCacheAcrossDifferentClientsIT extends 
BaseUniqueNamesOwnClust
 longRunningProps.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB,
 QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
 longRunningProps.put(QueryServices.DROP_METADATA_ATTRIB, 
Boolean.TRUE.toString());
-Connection conn1 = DriverManager.getConnection(getUrl(), 
longRunningProps);
-String url2 = getUrl() + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + 
"LongRunningQueries";
+Connection conn1 = DriverM

[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5273 Singleton ConnectionQueryServices for UpdateCacheAcrossDifferentClientsIT

2019-05-08 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new a73efce  PHOENIX-5273 Singleton ConnectionQueryServices for 
UpdateCacheAcrossDifferentClientsIT
a73efce is described below

commit a73efcea80a9d20334d60404df9b8ed7b4f9932a
Author: Thomas D'Silva 
AuthorDate: Tue May 7 14:29:56 2019 -0700

PHOENIX-5273 Singleton ConnectionQueryServices for 
UpdateCacheAcrossDifferentClientsIT
---
 .../UpdateCacheAcrossDifferentClientsIT.java   | 91 +-
 1 file changed, 53 insertions(+), 38 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpdateCacheAcrossDifferentClientsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpdateCacheAcrossDifferentClientsIT.java
index 25e2367..8be6f5b 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpdateCacheAcrossDifferentClientsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpdateCacheAcrossDifferentClientsIT.java
@@ -10,18 +10,12 @@
  */
 package org.apache.phoenix.end2end;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.util.Map;
-import java.util.Properties;
-
+import com.google.common.base.Throwables;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.phoenix.exception.PhoenixIOException;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixDriver;
@@ -34,26 +28,35 @@ import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
-import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import com.google.common.collect.Maps;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.util.Properties;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 public class UpdateCacheAcrossDifferentClientsIT extends 
BaseUniqueNamesOwnClusterIT {
 
 @BeforeClass
 public static void doSetup() throws Exception {
-Map props = Maps.newConcurrentMap();
-props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.TRUE.toString());
-props.put(QueryServices.MUTATE_BATCH_SIZE_ATTRIB, 
Integer.toString(3000));
-//When we run all tests together we are using global cluster(driver)
-//so to make drop work we need to re register driver with 
DROP_METADATA_ATTRIB property
-destroyDriver();
-setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
-//Registering real Phoenix driver to have multiple 
ConnectionQueryServices created across connections
-//so that metadata changes doesn't get propagated across connections
+Configuration conf = HBaseConfiguration.create();
+HBaseTestingUtility hbaseTestUtil = new HBaseTestingUtility(conf);
+setUpConfigForMiniCluster(conf);
+conf.set(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, 
QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
+conf.set(QueryServices.DROP_METADATA_ATTRIB, Boolean.TRUE.toString());
+conf.set(QueryServices.MUTATE_BATCH_SIZE_ATTRIB, 
Integer.toString(3000));
+hbaseTestUtil.startMiniCluster();
+// establish url and quorum. Need to use PhoenixDriver and not 
PhoenixTestDriver
+String zkQuorum = "localhost:" + 
hbaseTestUtil.getZkCluster().getClientPort();
+url = PhoenixRuntime.JDBC_PROTOCOL + 
PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + zkQuorum;
 DriverManager.registerDriver(PhoenixDriver.INSTANCE);
 }
 
@@ -64,8 +67,8 @@ public class UpdateCacheAcrossDifferentClientsIT extends 
BaseUniqueNamesOwnClust
 longRunningProps.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB,
 QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
 longRunningProps.put(QueryServices.DROP_METADATA_ATTRIB, 
Boolean.TRUE.toString());
-Connection conn1 = DriverManager.getConnection(getUrl(), 
longRunningProps);
-String url2 = getUrl() + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + 
"LongRunningQueries";
+Connection conn1 = DriverM

[phoenix] branch master updated: PHOENIX-5273 Singleton ConnectionQueryServices for UpdateCacheAcrossDifferentClientsIT

2019-05-08 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 700c643  PHOENIX-5273 Singleton ConnectionQueryServices for 
UpdateCacheAcrossDifferentClientsIT
700c643 is described below

commit 700c6436984f23c0a9783e3ea37dd1251b824528
Author: Thomas D'Silva 
AuthorDate: Tue May 7 14:29:56 2019 -0700

PHOENIX-5273 Singleton ConnectionQueryServices for 
UpdateCacheAcrossDifferentClientsIT
---
 .../UpdateCacheAcrossDifferentClientsIT.java   | 91 +-
 1 file changed, 53 insertions(+), 38 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpdateCacheAcrossDifferentClientsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpdateCacheAcrossDifferentClientsIT.java
index 4c85a0c..1d48858 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpdateCacheAcrossDifferentClientsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpdateCacheAcrossDifferentClientsIT.java
@@ -10,18 +10,12 @@
  */
 package org.apache.phoenix.end2end;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.util.Map;
-import java.util.Properties;
-
+import com.google.common.base.Throwables;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.phoenix.exception.PhoenixIOException;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixDriver;
@@ -34,26 +28,35 @@ import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
-import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import com.google.common.collect.Maps;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.util.Properties;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 public class UpdateCacheAcrossDifferentClientsIT extends 
BaseUniqueNamesOwnClusterIT {
 
 @BeforeClass
 public static void doSetup() throws Exception {
-Map props = Maps.newConcurrentMap();
-props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.TRUE.toString());
-props.put(QueryServices.MUTATE_BATCH_SIZE_ATTRIB, 
Integer.toString(3000));
-//When we run all tests together we are using global cluster(driver)
-//so to make drop work we need to re register driver with 
DROP_METADATA_ATTRIB property
-destroyDriver();
-setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
-//Registering real Phoenix driver to have multiple 
ConnectionQueryServices created across connections
-//so that metadata changes doesn't get propagated across connections
+Configuration conf = HBaseConfiguration.create();
+HBaseTestingUtility hbaseTestUtil = new HBaseTestingUtility(conf);
+setUpConfigForMiniCluster(conf);
+conf.set(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, 
QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
+conf.set(QueryServices.DROP_METADATA_ATTRIB, Boolean.TRUE.toString());
+conf.set(QueryServices.MUTATE_BATCH_SIZE_ATTRIB, 
Integer.toString(3000));
+hbaseTestUtil.startMiniCluster();
+// establish url and quorum. Need to use PhoenixDriver and not 
PhoenixTestDriver
+String zkQuorum = "localhost:" + 
hbaseTestUtil.getZkCluster().getClientPort();
+url = PhoenixRuntime.JDBC_PROTOCOL + 
PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + zkQuorum;
 DriverManager.registerDriver(PhoenixDriver.INSTANCE);
 }
 
@@ -64,8 +67,8 @@ public class UpdateCacheAcrossDifferentClientsIT extends 
BaseUniqueNamesOwnClust
 longRunningProps.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB,
 QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
 longRunningProps.put(QueryServices.DROP_METADATA_ATTRIB, 
Boolean.TRUE.toString());
-Connection conn1 = DriverManager.getConnection(getUrl(), 
longRunningProps);
-String url2 = getUrl() + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + 
"LongRunningQueries";
+Connection conn1 = DriverManager.getConnection(u

[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5273 Singleton ConnectionQueryServices for UpdateCacheAcrossDifferentClientsIT

2019-05-08 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 788bb66  PHOENIX-5273 Singleton ConnectionQueryServices for 
UpdateCacheAcrossDifferentClientsIT
788bb66 is described below

commit 788bb66c3d744b2260faa6008e647a2829299832
Author: Thomas D'Silva 
AuthorDate: Tue May 7 14:29:56 2019 -0700

PHOENIX-5273 Singleton ConnectionQueryServices for 
UpdateCacheAcrossDifferentClientsIT
---
 .../UpdateCacheAcrossDifferentClientsIT.java   | 91 +-
 1 file changed, 53 insertions(+), 38 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpdateCacheAcrossDifferentClientsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpdateCacheAcrossDifferentClientsIT.java
index 25e2367..8be6f5b 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpdateCacheAcrossDifferentClientsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpdateCacheAcrossDifferentClientsIT.java
@@ -10,18 +10,12 @@
  */
 package org.apache.phoenix.end2end;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.util.Map;
-import java.util.Properties;
-
+import com.google.common.base.Throwables;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.phoenix.exception.PhoenixIOException;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixDriver;
@@ -34,26 +28,35 @@ import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
-import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import com.google.common.collect.Maps;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.util.Properties;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 public class UpdateCacheAcrossDifferentClientsIT extends 
BaseUniqueNamesOwnClusterIT {
 
 @BeforeClass
 public static void doSetup() throws Exception {
-Map props = Maps.newConcurrentMap();
-props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.TRUE.toString());
-props.put(QueryServices.MUTATE_BATCH_SIZE_ATTRIB, 
Integer.toString(3000));
-//When we run all tests together we are using global cluster(driver)
-//so to make drop work we need to re register driver with 
DROP_METADATA_ATTRIB property
-destroyDriver();
-setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
-//Registering real Phoenix driver to have multiple 
ConnectionQueryServices created across connections
-//so that metadata changes doesn't get propagated across connections
+Configuration conf = HBaseConfiguration.create();
+HBaseTestingUtility hbaseTestUtil = new HBaseTestingUtility(conf);
+setUpConfigForMiniCluster(conf);
+conf.set(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, 
QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
+conf.set(QueryServices.DROP_METADATA_ATTRIB, Boolean.TRUE.toString());
+conf.set(QueryServices.MUTATE_BATCH_SIZE_ATTRIB, 
Integer.toString(3000));
+hbaseTestUtil.startMiniCluster();
+// establish url and quorum. Need to use PhoenixDriver and not 
PhoenixTestDriver
+String zkQuorum = "localhost:" + 
hbaseTestUtil.getZkCluster().getClientPort();
+url = PhoenixRuntime.JDBC_PROTOCOL + 
PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + zkQuorum;
 DriverManager.registerDriver(PhoenixDriver.INSTANCE);
 }
 
@@ -64,8 +67,8 @@ public class UpdateCacheAcrossDifferentClientsIT extends 
BaseUniqueNamesOwnClust
 longRunningProps.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB,
 QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
 longRunningProps.put(QueryServices.DROP_METADATA_ATTRIB, 
Boolean.TRUE.toString());
-Connection conn1 = DriverManager.getConnection(getUrl(), 
longRunningProps);
-String url2 = getUrl() + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + 
"LongRunningQueries";
+Connection conn1 = DriverM

[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-3413 Ineffective null check in LiteralExpression#newConstant()

2019-05-06 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 512cbb3  PHOENIX-3413 Ineffective null check in 
LiteralExpression#newConstant()
512cbb3 is described below

commit 512cbb3fb5c405f6958c0c932d70a28b98729a60
Author: kliewkliew 
AuthorDate: Mon Feb 6 19:53:58 2017 -0800

PHOENIX-3413 Ineffective null check in LiteralExpression#newConstant()
---
 .../src/main/java/org/apache/phoenix/expression/LiteralExpression.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
index f20d7e2..110177a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
@@ -184,6 +184,7 @@ public class LiteralExpression extends 
BaseTerminalExpression {
 return getBooleanLiteralExpression((Boolean)value, determinism);
 }
 PDataType actualType = PDataType.fromLiteral(value);
+type = type == null ? actualType : type;
 try {
 value = type.toObject(value, actualType);
 } catch (IllegalDataException e) {
@@ -208,7 +209,7 @@ public class LiteralExpression extends 
BaseTerminalExpression {
 return getTypedNullLiteralExpression(type, determinism);
 }
 if (maxLength == null) {
-maxLength = type == null || !type.isFixedWidth() ? null : 
type.getMaxLength(value);
+maxLength = type.isFixedWidth() ? type.getMaxLength(value) : null;
 }
 return new LiteralExpression(value, type, b, maxLength, scale, 
sortOrder, determinism);
 }



[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-3413 Ineffective null check in LiteralExpression#newConstant()

2019-05-06 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 613f32e  PHOENIX-3413 Ineffective null check in 
LiteralExpression#newConstant()
613f32e is described below

commit 613f32e1991b6456451f6cfd3f618ec5f1b0bbb9
Author: kliewkliew 
AuthorDate: Mon Feb 6 19:53:58 2017 -0800

PHOENIX-3413 Ineffective null check in LiteralExpression#newConstant()
---
 .../src/main/java/org/apache/phoenix/expression/LiteralExpression.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
index f20d7e2..110177a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
@@ -184,6 +184,7 @@ public class LiteralExpression extends 
BaseTerminalExpression {
 return getBooleanLiteralExpression((Boolean)value, determinism);
 }
 PDataType actualType = PDataType.fromLiteral(value);
+type = type == null ? actualType : type;
 try {
 value = type.toObject(value, actualType);
 } catch (IllegalDataException e) {
@@ -208,7 +209,7 @@ public class LiteralExpression extends 
BaseTerminalExpression {
 return getTypedNullLiteralExpression(type, determinism);
 }
 if (maxLength == null) {
-maxLength = type == null || !type.isFixedWidth() ? null : 
type.getMaxLength(value);
+maxLength = type.isFixedWidth() ? type.getMaxLength(value) : null;
 }
 return new LiteralExpression(value, type, b, maxLength, scale, 
sortOrder, determinism);
 }



[phoenix] branch master updated: PHOENIX-3413 Ineffective null check in LiteralExpression#newConstant()

2019-05-06 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new d9057de  PHOENIX-3413 Ineffective null check in 
LiteralExpression#newConstant()
d9057de is described below

commit d9057de0e5ec7f1b89b6586f6c03f73163fd01ee
Author: kliewkliew 
AuthorDate: Mon Feb 6 19:53:58 2017 -0800

PHOENIX-3413 Ineffective null check in LiteralExpression#newConstant()
---
 .../src/main/java/org/apache/phoenix/expression/LiteralExpression.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
index f20d7e2..110177a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
@@ -184,6 +184,7 @@ public class LiteralExpression extends 
BaseTerminalExpression {
 return getBooleanLiteralExpression((Boolean)value, determinism);
 }
 PDataType actualType = PDataType.fromLiteral(value);
+type = type == null ? actualType : type;
 try {
 value = type.toObject(value, actualType);
 } catch (IllegalDataException e) {
@@ -208,7 +209,7 @@ public class LiteralExpression extends 
BaseTerminalExpression {
 return getTypedNullLiteralExpression(type, determinism);
 }
 if (maxLength == null) {
-maxLength = type == null || !type.isFixedWidth() ? null : 
type.getMaxLength(value);
+maxLength = type.isFixedWidth() ? type.getMaxLength(value) : null;
 }
 return new LiteralExpression(value, type, b, maxLength, scale, 
sortOrder, determinism);
 }



[phoenix] branch 4.14-HBase-1.4 updated: PHOENIX-3413 Ineffective null check in LiteralExpression#newConstant()

2019-05-06 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.4 by this push:
 new 4abc497  PHOENIX-3413 Ineffective null check in 
LiteralExpression#newConstant()
4abc497 is described below

commit 4abc497dddbb0cc6a50483136885247b47658348
Author: kliewkliew 
AuthorDate: Mon Feb 6 19:53:58 2017 -0800

PHOENIX-3413 Ineffective null check in LiteralExpression#newConstant()
---
 .../src/main/java/org/apache/phoenix/expression/LiteralExpression.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
index f20d7e2..110177a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
@@ -184,6 +184,7 @@ public class LiteralExpression extends 
BaseTerminalExpression {
 return getBooleanLiteralExpression((Boolean)value, determinism);
 }
 PDataType actualType = PDataType.fromLiteral(value);
+type = type == null ? actualType : type;
 try {
 value = type.toObject(value, actualType);
 } catch (IllegalDataException e) {
@@ -208,7 +209,7 @@ public class LiteralExpression extends 
BaseTerminalExpression {
 return getTypedNullLiteralExpression(type, determinism);
 }
 if (maxLength == null) {
-maxLength = type == null || !type.isFixedWidth() ? null : 
type.getMaxLength(value);
+maxLength = type.isFixedWidth() ? type.getMaxLength(value) : null;
 }
 return new LiteralExpression(value, type, b, maxLength, scale, 
sortOrder, determinism);
 }



[phoenix] branch 4.14-HBase-1.3 updated: PHOENIX-3413 Ineffective null check in LiteralExpression#newConstant()

2019-05-06 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.3 by this push:
 new 08df693  PHOENIX-3413 Ineffective null check in 
LiteralExpression#newConstant()
08df693 is described below

commit 08df69389b4e3732f8cd5fc5007e04ca22ada229
Author: kliewkliew 
AuthorDate: Mon Feb 6 19:53:58 2017 -0800

PHOENIX-3413 Ineffective null check in LiteralExpression#newConstant()
---
 .../src/main/java/org/apache/phoenix/expression/LiteralExpression.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
index f20d7e2..110177a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
@@ -184,6 +184,7 @@ public class LiteralExpression extends 
BaseTerminalExpression {
 return getBooleanLiteralExpression((Boolean)value, determinism);
 }
 PDataType actualType = PDataType.fromLiteral(value);
+type = type == null ? actualType : type;
 try {
 value = type.toObject(value, actualType);
 } catch (IllegalDataException e) {
@@ -208,7 +209,7 @@ public class LiteralExpression extends 
BaseTerminalExpression {
 return getTypedNullLiteralExpression(type, determinism);
 }
 if (maxLength == null) {
-maxLength = type == null || !type.isFixedWidth() ? null : 
type.getMaxLength(value);
+maxLength = type.isFixedWidth() ? type.getMaxLength(value) : null;
 }
 return new LiteralExpression(value, type, b, maxLength, scale, 
sortOrder, determinism);
 }



[phoenix] branch 4.14-HBase-1.2 updated: PHOENIX-3413 Ineffective null check in LiteralExpression#newConstant()

2019-05-06 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.2 by this push:
 new 3ee0727  PHOENIX-3413 Ineffective null check in 
LiteralExpression#newConstant()
3ee0727 is described below

commit 3ee07276333842cfef94f7f79d741d578b8b9062
Author: kliewkliew 
AuthorDate: Mon Feb 6 19:53:58 2017 -0800

PHOENIX-3413 Ineffective null check in LiteralExpression#newConstant()
---
 .../src/main/java/org/apache/phoenix/expression/LiteralExpression.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
index f20d7e2..110177a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
@@ -184,6 +184,7 @@ public class LiteralExpression extends 
BaseTerminalExpression {
 return getBooleanLiteralExpression((Boolean)value, determinism);
 }
 PDataType actualType = PDataType.fromLiteral(value);
+type = type == null ? actualType : type;
 try {
 value = type.toObject(value, actualType);
 } catch (IllegalDataException e) {
@@ -208,7 +209,7 @@ public class LiteralExpression extends 
BaseTerminalExpression {
 return getTypedNullLiteralExpression(type, determinism);
 }
 if (maxLength == null) {
-maxLength = type == null || !type.isFixedWidth() ? null : 
type.getMaxLength(value);
+maxLength = type.isFixedWidth() ? type.getMaxLength(value) : null;
 }
 return new LiteralExpression(value, type, b, maxLength, scale, 
sortOrder, determinism);
 }



[phoenix] branch 4.x-HBase-1.2 updated: PHOENIX-3413 Ineffective null check in LiteralExpression#newConstant()

2019-05-03 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.2 by this push:
 new 494f269  PHOENIX-3413 Ineffective null check in 
LiteralExpression#newConstant()
494f269 is described below

commit 494f2698e5a1fecd7528d56e84725300a1f6229e
Author: kliewkliew 
AuthorDate: Mon Feb 6 19:53:58 2017 -0800

PHOENIX-3413 Ineffective null check in LiteralExpression#newConstant()
---
 .../src/main/java/org/apache/phoenix/expression/LiteralExpression.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
index f20d7e2..110177a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/LiteralExpression.java
@@ -184,6 +184,7 @@ public class LiteralExpression extends 
BaseTerminalExpression {
 return getBooleanLiteralExpression((Boolean)value, determinism);
 }
 PDataType actualType = PDataType.fromLiteral(value);
+type = type == null ? actualType : type;
 try {
 value = type.toObject(value, actualType);
 } catch (IllegalDataException e) {
@@ -208,7 +209,7 @@ public class LiteralExpression extends 
BaseTerminalExpression {
 return getTypedNullLiteralExpression(type, determinism);
 }
 if (maxLength == null) {
-maxLength = type == null || !type.isFixedWidth() ? null : 
type.getMaxLength(value);
+maxLength = type.isFixedWidth() ? type.getMaxLength(value) : null;
 }
 return new LiteralExpression(value, type, b, maxLength, scale, 
sortOrder, determinism);
 }



[phoenix] branch phoenix-stats updated: Fix slotSpan appending to schema issue in scanRanges.getRowKeyRanges()

2019-05-03 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch phoenix-stats
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/phoenix-stats by this push:
 new 2487d9d  Fix slotSpan appending to schema issue in 
scanRanges.getRowKeyRanges()
 new 4d4e9c8  Merge pull request #496 from dbwong/phoenix-stats
2487d9d is described below

commit 2487d9d729b12f808266bf343b1e36d2cfcd8089
Author: Daniel Wong 
AuthorDate: Mon Apr 29 18:58:23 2019 -0700

Fix slotSpan appending to schema issue in scanRanges.getRowKeyRanges()
---
 .../org/apache/phoenix/compile/ScanRanges.java |  18 ++-
 .../phoenix/compile/ScanRangesIntersectTest.java   | 123 +
 2 files changed, 137 insertions(+), 4 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java
index c802678..1b695eb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java
@@ -719,13 +719,23 @@ public class ScanRanges {
 
 int[] slotSpans = this.getSlotSpans();
 
-// If the ranges here do not qualify all the keys then those keys 
are unbound
-if (newRanges.size() < schema.getMaxFields()) {
+int fieldSpan = 0;
+for(int i = 0; i < slotSpans.length; i++){
+//Account for a slot covering multiple fields
+fieldSpan = fieldSpan + slotSpans[i] + 1;
+}
+
+// If the spans here do not qualify all the keys then those keys 
are unbound
+int maxFields = schema.getMaxFields();
+if (fieldSpan < maxFields) {
 int originalSize = newRanges.size();
-for (int i = 0; i < schema.getMaxFields() - originalSize; i++) 
{
+for (int i = 0; i < maxFields - originalSize; i++) {
 
newRanges.add(Lists.newArrayList(KeyRange.EVERYTHING_RANGE));
 }
-slotSpans = new int[schema.getMaxFields()];
+//extend slots
+int slotsToAdd = (maxFields - fieldSpan);
+int newSlotLength = slotSpans.length + slotsToAdd;
+slotSpans = new int[newSlotLength];
 System.arraycopy(this.getSlotSpans(), 0, slotSpans, 0, 
this.getSlotSpans().length);
 }
 
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/compile/ScanRangesIntersectTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/ScanRangesIntersectTest.java
index accc28e..df1bade 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/compile/ScanRangesIntersectTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/ScanRangesIntersectTest.java
@@ -36,6 +36,9 @@ import org.apache.phoenix.schema.RowKeySchema;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.types.PChar;
 import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PDouble;
+import org.apache.phoenix.schema.types.PSmallint;
+import org.apache.phoenix.schema.types.PUnsignedTinyint;
 import org.apache.phoenix.schema.types.PVarchar;
 import org.junit.Test;
 
@@ -112,6 +115,33 @@ public class ScanRangesIntersectTest {
 }
 };
 
+private static PDatum SIMPLE_TINYINT = new PDatum() {
+@Override
+public boolean isNullable() {
+return false;
+}
+
+@Override
+public PDataType getDataType() {
+return PUnsignedTinyint.INSTANCE;
+}
+
+@Override
+public Integer getMaxLength() {
+return 1;
+}
+
+@Override
+public Integer getScale() {
+return null;
+}
+
+@Override
+public SortOrder getSortOrder() {
+return SortOrder.getDefault();
+}
+};
+
 // Does not handle some edge conditions like empty string
 private String handleScanNextKey(String key) {
 char lastChar = key.charAt(key.length() - 1);
@@ -485,6 +515,96 @@ public class ScanRangesIntersectTest {
 
assertEquals(singleKeyToScanRange("BD"),rowKeyRanges.get(3).toString());
 }
 
+@Test
+public void getRowKeyRangesMultipleFieldsSingleSlot() {
+int rowKeySchemaFields = 3;
+RowKeySchema schema = buildSimpleRowKeySchema(rowKeySchemaFields);
+
+int[] slotSpan = new int[1];
+slotSpan[0] = 2;
+
+KeyRange keyRange1 = KeyRange.getKeyRange(stringToByteArray("ABC"));
+KeyRange keyRange2 = KeyRange.getKeyRange(stringToByteArray("DEF"));
+List> ranges = new ArrayList<>();
+ranges.add(Lists.newArrayList(keyRange1,keyRange2));
+
+ScanRanges scanRanges = ScanR

[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE

2019-05-03 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 7ae9737  PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE
7ae9737 is described below

commit 7ae97373259153e71a656ae8fd9c0185b4237724
Author: Thomas D'Silva 
AuthorDate: Thu May 2 16:33:20 2019 -0700

PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE
---
 .../org/apache/phoenix/monitoring/PhoenixMetricsIT.java  | 16 
 .../apache/phoenix/iterate/ScanningResultIterator.java   |  5 +++--
 2 files changed, 19 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
index e00fab3..8f1abf0 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
@@ -271,6 +271,22 @@ public class PhoenixMetricsIT extends BasePhoenixMetricsIT 
{
 conn.createStatement().execute(ddl);
 }
 
+// See PHOENIX-5101
+@Test
+public void testMetricsLargeQuery() throws Exception {
+String tableName = "MY_TABLE";
+String ddl = "CREATE TABLE " + tableName + " (K VARCHAR NOT NULL 
PRIMARY KEY, V VARCHAR)";
+Connection conn = DriverManager.getConnection(getUrl());
+conn.createStatement().execute(ddl);
+long numRows = 18750;
+insertRowsInTable(tableName, numRows);
+String query = "SELECT * FROM " + tableName;
+Statement stmt = conn.createStatement();
+ResultSet rs = stmt.executeQuery(query);
+while (rs.next()) {}
+rs.close();
+}
+
 @Test
 public void testReadMetricsForSelect() throws Exception {
 String tableName = generateUniqueName();
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
index 9a656ee..d86a27a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
@@ -72,8 +72,9 @@ public class ScanningResultIterator implements ResultIterator 
{
 
 @Override
 public void close() throws SQLException {
-getScanMetrics();
+// close the scanner so that metrics are available
 scanner.close();
+updateMetrics();
 }
 
 private void changeMetric(CombinableMetric metric, Long value) {
@@ -88,7 +89,7 @@ public class ScanningResultIterator implements ResultIterator 
{
 }
 }
 
-private void getScanMetrics() {
+private void updateMetrics() {
 
 if (scanMetricsEnabled && !scanMetricsUpdated) {
 ScanMetrics scanMetrics = scanner.getScanMetrics();



[phoenix] branch master updated: PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE

2019-05-03 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new bb1327e  PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE
bb1327e is described below

commit bb1327ef89fb0844094470ada74cbe5071b43a0d
Author: Thomas D'Silva 
AuthorDate: Thu May 2 16:33:20 2019 -0700

PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE
---
 .../org/apache/phoenix/monitoring/PhoenixMetricsIT.java  | 16 
 .../apache/phoenix/iterate/ScanningResultIterator.java   |  5 +++--
 2 files changed, 19 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
index e00fab3..8f1abf0 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
@@ -271,6 +271,22 @@ public class PhoenixMetricsIT extends BasePhoenixMetricsIT 
{
 conn.createStatement().execute(ddl);
 }
 
+// See PHOENIX-5101
+@Test
+public void testMetricsLargeQuery() throws Exception {
+String tableName = "MY_TABLE";
+String ddl = "CREATE TABLE " + tableName + " (K VARCHAR NOT NULL 
PRIMARY KEY, V VARCHAR)";
+Connection conn = DriverManager.getConnection(getUrl());
+conn.createStatement().execute(ddl);
+long numRows = 18750;
+insertRowsInTable(tableName, numRows);
+String query = "SELECT * FROM " + tableName;
+Statement stmt = conn.createStatement();
+ResultSet rs = stmt.executeQuery(query);
+while (rs.next()) {}
+rs.close();
+}
+
 @Test
 public void testReadMetricsForSelect() throws Exception {
 String tableName = generateUniqueName();
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
index 9a656ee..d86a27a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
@@ -72,8 +72,9 @@ public class ScanningResultIterator implements ResultIterator 
{
 
 @Override
 public void close() throws SQLException {
-getScanMetrics();
+// close the scanner so that metrics are available
 scanner.close();
+updateMetrics();
 }
 
 private void changeMetric(CombinableMetric metric, Long value) {
@@ -88,7 +89,7 @@ public class ScanningResultIterator implements ResultIterator 
{
 }
 }
 
-private void getScanMetrics() {
+private void updateMetrics() {
 
 if (scanMetricsEnabled && !scanMetricsUpdated) {
 ScanMetrics scanMetrics = scanner.getScanMetrics();



[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE

2019-05-03 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new df62802  PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE
df62802 is described below

commit df62802cb0d53c990821a365223819fc3f170e7c
Author: Thomas D'Silva 
AuthorDate: Thu May 2 16:33:20 2019 -0700

PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE
---
 .../org/apache/phoenix/monitoring/PhoenixMetricsIT.java  | 16 
 .../apache/phoenix/iterate/ScanningResultIterator.java   |  5 +++--
 2 files changed, 19 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
index 9dddece..74e69f6 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
@@ -212,6 +212,22 @@ public class PhoenixMetricsIT extends BasePhoenixMetricsIT 
{
 conn.createStatement().execute(ddl);
 }
 
+// See PHOENIX-5101
+@Test
+public void testMetricsLargeQuery() throws Exception {
+String tableName = "MY_TABLE";
+String ddl = "CREATE TABLE " + tableName + " (K VARCHAR NOT NULL 
PRIMARY KEY, V VARCHAR)";
+Connection conn = DriverManager.getConnection(getUrl());
+conn.createStatement().execute(ddl);
+long numRows = 18750;
+insertRowsInTable(tableName, numRows);
+String query = "SELECT * FROM " + tableName;
+Statement stmt = conn.createStatement();
+ResultSet rs = stmt.executeQuery(query);
+while (rs.next()) {}
+rs.close();
+}
+
 @Test
 public void testReadMetricsForSelect() throws Exception {
 String tableName = generateUniqueName();
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
index 1422455..1a3d073 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
@@ -77,8 +77,9 @@ public class ScanningResultIterator implements ResultIterator 
{
 
 @Override
 public void close() throws SQLException {
-getScanMetrics();
+// close the scanner so that metrics are available
 scanner.close();
+updateMetrics();
 }
 
 private void changeMetric(CombinableMetric metric, Long value) {
@@ -93,7 +94,7 @@ public class ScanningResultIterator implements ResultIterator 
{
 }
 }
 
-private void getScanMetrics() {
+private void updateMetrics() {
 
 if (scanMetricsEnabled && !scanMetricsUpdated) {
 ScanMetrics scanMetrics = scan.getScanMetrics();



[phoenix] branch 4.x-HBase-1.2 updated: PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE

2019-05-03 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.2 by this push:
 new 06f7ccf  PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE
06f7ccf is described below

commit 06f7ccfc06704e387635cc40fd519dca7b57fde6
Author: Thomas D'Silva 
AuthorDate: Thu May 2 16:33:20 2019 -0700

PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE
---
 .../org/apache/phoenix/monitoring/PhoenixMetricsIT.java  | 16 
 .../apache/phoenix/iterate/ScanningResultIterator.java   |  5 +++--
 2 files changed, 19 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
index 9dddece..74e69f6 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
@@ -212,6 +212,22 @@ public class PhoenixMetricsIT extends BasePhoenixMetricsIT 
{
 conn.createStatement().execute(ddl);
 }
 
+// See PHOENIX-5101
+@Test
+public void testMetricsLargeQuery() throws Exception {
+String tableName = "MY_TABLE";
+String ddl = "CREATE TABLE " + tableName + " (K VARCHAR NOT NULL 
PRIMARY KEY, V VARCHAR)";
+Connection conn = DriverManager.getConnection(getUrl());
+conn.createStatement().execute(ddl);
+long numRows = 18750;
+insertRowsInTable(tableName, numRows);
+String query = "SELECT * FROM " + tableName;
+Statement stmt = conn.createStatement();
+ResultSet rs = stmt.executeQuery(query);
+while (rs.next()) {}
+rs.close();
+}
+
 @Test
 public void testReadMetricsForSelect() throws Exception {
 String tableName = generateUniqueName();
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
index 8a1fe5a..f4bbbf1 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
@@ -77,8 +77,9 @@ public class ScanningResultIterator implements ResultIterator 
{
 
 @Override
 public void close() throws SQLException {
-getScanMetrics();
+// close the scanner so that metrics are available
 scanner.close();
+updateMetrics();
 }
 
 private void changeMetric(CombinableMetric metric, Long value) {
@@ -93,7 +94,7 @@ public class ScanningResultIterator implements ResultIterator 
{
 }
 }
 
-private void getScanMetrics() {
+private void updateMetrics() {
 
 if (scanMetricsEnabled && !scanMetricsUpdated) {
 ScanMetrics scanMetrics = scan.getScanMetrics();



[phoenix] branch 4.14-HBase-1.4 updated: PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE

2019-05-03 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.4 by this push:
 new 27e08f1  PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE
27e08f1 is described below

commit 27e08f1d7858ccde2888c761e98568070026cfa5
Author: Thomas D'Silva 
AuthorDate: Thu May 2 16:33:20 2019 -0700

PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE
---
 .../org/apache/phoenix/monitoring/PhoenixMetricsIT.java  | 16 
 .../apache/phoenix/iterate/ScanningResultIterator.java   |  5 +++--
 2 files changed, 19 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
index 0882cec..923673b 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
@@ -212,6 +212,22 @@ public class PhoenixMetricsIT extends BasePhoenixMetricsIT 
{
 conn.createStatement().execute(ddl);
 }
 
+// See PHOENIX-5101
+@Test
+public void testMetricsLargeQuery() throws Exception {
+String tableName = "MY_TABLE";
+String ddl = "CREATE TABLE " + tableName + " (K VARCHAR NOT NULL 
PRIMARY KEY, V VARCHAR)";
+Connection conn = DriverManager.getConnection(getUrl());
+conn.createStatement().execute(ddl);
+long numRows = 18750;
+insertRowsInTable(tableName, numRows);
+String query = "SELECT * FROM " + tableName;
+Statement stmt = conn.createStatement();
+ResultSet rs = stmt.executeQuery(query);
+while (rs.next()) {}
+rs.close();
+}
+
 @Test
 public void testReadMetricsForSelect() throws Exception {
 String tableName = generateUniqueName();
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
index 8a1fe5a..f4bbbf1 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
@@ -77,8 +77,9 @@ public class ScanningResultIterator implements ResultIterator 
{
 
 @Override
 public void close() throws SQLException {
-getScanMetrics();
+// close the scanner so that metrics are available
 scanner.close();
+updateMetrics();
 }
 
 private void changeMetric(CombinableMetric metric, Long value) {
@@ -93,7 +94,7 @@ public class ScanningResultIterator implements ResultIterator 
{
 }
 }
 
-private void getScanMetrics() {
+private void updateMetrics() {
 
 if (scanMetricsEnabled && !scanMetricsUpdated) {
 ScanMetrics scanMetrics = scan.getScanMetrics();



[phoenix] branch 4.14-HBase-1.3 updated: PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE

2019-05-03 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.3 by this push:
 new 3e34ebe  PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE
3e34ebe is described below

commit 3e34ebee308101564b23bdd227178e28617520ca
Author: Thomas D'Silva 
AuthorDate: Thu May 2 16:33:20 2019 -0700

PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE
---
 .../org/apache/phoenix/monitoring/PhoenixMetricsIT.java  | 16 
 .../apache/phoenix/iterate/ScanningResultIterator.java   |  5 +++--
 2 files changed, 19 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
index 0882cec..923673b 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
@@ -212,6 +212,22 @@ public class PhoenixMetricsIT extends BasePhoenixMetricsIT 
{
 conn.createStatement().execute(ddl);
 }
 
+// See PHOENIX-5101
+@Test
+public void testMetricsLargeQuery() throws Exception {
+String tableName = "MY_TABLE";
+String ddl = "CREATE TABLE " + tableName + " (K VARCHAR NOT NULL 
PRIMARY KEY, V VARCHAR)";
+Connection conn = DriverManager.getConnection(getUrl());
+conn.createStatement().execute(ddl);
+long numRows = 18750;
+insertRowsInTable(tableName, numRows);
+String query = "SELECT * FROM " + tableName;
+Statement stmt = conn.createStatement();
+ResultSet rs = stmt.executeQuery(query);
+while (rs.next()) {}
+rs.close();
+}
+
 @Test
 public void testReadMetricsForSelect() throws Exception {
 String tableName = generateUniqueName();
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
index 8a1fe5a..f4bbbf1 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
@@ -77,8 +77,9 @@ public class ScanningResultIterator implements ResultIterator 
{
 
 @Override
 public void close() throws SQLException {
-getScanMetrics();
+// close the scanner so that metrics are available
 scanner.close();
+updateMetrics();
 }
 
 private void changeMetric(CombinableMetric metric, Long value) {
@@ -93,7 +94,7 @@ public class ScanningResultIterator implements ResultIterator 
{
 }
 }
 
-private void getScanMetrics() {
+private void updateMetrics() {
 
 if (scanMetricsEnabled && !scanMetricsUpdated) {
 ScanMetrics scanMetrics = scan.getScanMetrics();



[phoenix] branch 4.14-HBase-1.2 updated: PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE

2019-05-03 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.2 by this push:
 new f3992ba  PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE
f3992ba is described below

commit f3992ba7486993982624d534024a2335b3d817a9
Author: Thomas D'Silva 
AuthorDate: Thu May 2 16:33:20 2019 -0700

PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE
---
 .../org/apache/phoenix/monitoring/PhoenixMetricsIT.java  | 16 
 .../apache/phoenix/iterate/ScanningResultIterator.java   |  5 +++--
 2 files changed, 19 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
index 0882cec..923673b 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
@@ -212,6 +212,22 @@ public class PhoenixMetricsIT extends BasePhoenixMetricsIT 
{
 conn.createStatement().execute(ddl);
 }
 
+// See PHOENIX-5101
+@Test
+public void testMetricsLargeQuery() throws Exception {
+String tableName = "MY_TABLE";
+String ddl = "CREATE TABLE " + tableName + " (K VARCHAR NOT NULL 
PRIMARY KEY, V VARCHAR)";
+Connection conn = DriverManager.getConnection(getUrl());
+conn.createStatement().execute(ddl);
+long numRows = 18750;
+insertRowsInTable(tableName, numRows);
+String query = "SELECT * FROM " + tableName;
+Statement stmt = conn.createStatement();
+ResultSet rs = stmt.executeQuery(query);
+while (rs.next()) {}
+rs.close();
+}
+
 @Test
 public void testReadMetricsForSelect() throws Exception {
 String tableName = generateUniqueName();
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
index 8a1fe5a..f4bbbf1 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
@@ -77,8 +77,9 @@ public class ScanningResultIterator implements ResultIterator 
{
 
 @Override
 public void close() throws SQLException {
-getScanMetrics();
+// close the scanner so that metrics are available
 scanner.close();
+updateMetrics();
 }
 
 private void changeMetric(CombinableMetric metric, Long value) {
@@ -93,7 +94,7 @@ public class ScanningResultIterator implements ResultIterator 
{
 }
 }
 
-private void getScanMetrics() {
+private void updateMetrics() {
 
 if (scanMetricsEnabled && !scanMetricsUpdated) {
 ScanMetrics scanMetrics = scan.getScanMetrics();



svn commit: r33702 - in /dev/phoenix: apache-phoenix-4.14.2-HBase-1.2-rc0/ apache-phoenix-4.14.2-HBase-1.2-rc0/bin/ apache-phoenix-4.14.2-HBase-1.2-rc0/src/ apache-phoenix-4.14.2-HBase-1.3-rc0/ apache

2019-04-21 Thread tdsilva
Author: tdsilva
Date: Sun Apr 21 07:40:16 2019
New Revision: 33702

Log:
4.14.1 release

Added:
dev/phoenix/apache-phoenix-4.14.2-HBase-1.2-rc0/
dev/phoenix/apache-phoenix-4.14.2-HBase-1.2-rc0/bin/

dev/phoenix/apache-phoenix-4.14.2-HBase-1.2-rc0/bin/apache-phoenix-4.14.2-HBase-1.2-bin.tar.gz
   (with props)

dev/phoenix/apache-phoenix-4.14.2-HBase-1.2-rc0/bin/apache-phoenix-4.14.2-HBase-1.2-bin.tar.gz.asc

dev/phoenix/apache-phoenix-4.14.2-HBase-1.2-rc0/bin/apache-phoenix-4.14.2-HBase-1.2-bin.tar.gz.sha256

dev/phoenix/apache-phoenix-4.14.2-HBase-1.2-rc0/bin/apache-phoenix-4.14.2-HBase-1.2-bin.tar.gz.sha512
dev/phoenix/apache-phoenix-4.14.2-HBase-1.2-rc0/src/

dev/phoenix/apache-phoenix-4.14.2-HBase-1.2-rc0/src/apache-phoenix-4.14.2-HBase-1.2-src.tar.gz
   (with props)

dev/phoenix/apache-phoenix-4.14.2-HBase-1.2-rc0/src/apache-phoenix-4.14.2-HBase-1.2-src.tar.gz.asc

dev/phoenix/apache-phoenix-4.14.2-HBase-1.2-rc0/src/apache-phoenix-4.14.2-HBase-1.2-src.tar.gz.sha256

dev/phoenix/apache-phoenix-4.14.2-HBase-1.2-rc0/src/apache-phoenix-4.14.2-HBase-1.2-src.tar.gz.sha512
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc0/
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc0/bin/

dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc0/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz
   (with props)

dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc0/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz.asc

dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc0/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz.sha256

dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc0/bin/apache-phoenix-4.14.2-HBase-1.3-bin.tar.gz.sha512
dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc0/src/

dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc0/src/apache-phoenix-4.14.2-HBase-1.3-src.tar.gz
   (with props)

dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc0/src/apache-phoenix-4.14.2-HBase-1.3-src.tar.gz.asc

dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc0/src/apache-phoenix-4.14.2-HBase-1.3-src.tar.gz.sha256

dev/phoenix/apache-phoenix-4.14.2-HBase-1.3-rc0/src/apache-phoenix-4.14.2-HBase-1.3-src.tar.gz.sha512
dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc0/
dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc0/bin/

dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc0/bin/apache-phoenix-4.14.2-HBase-1.4-bin.tar.gz
   (with props)

dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc0/bin/apache-phoenix-4.14.2-HBase-1.4-bin.tar.gz.asc

dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc0/bin/apache-phoenix-4.14.2-HBase-1.4-bin.tar.gz.sha256

dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc0/bin/apache-phoenix-4.14.2-HBase-1.4-bin.tar.gz.sha512
dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc0/src/

dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc0/src/apache-phoenix-4.14.2-HBase-1.4-src.tar.gz
   (with props)

dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc0/src/apache-phoenix-4.14.2-HBase-1.4-src.tar.gz.asc

dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc0/src/apache-phoenix-4.14.2-HBase-1.4-src.tar.gz.sha256

dev/phoenix/apache-phoenix-4.14.2-HBase-1.4-rc0/src/apache-phoenix-4.14.2-HBase-1.4-src.tar.gz.sha512

Added: 
dev/phoenix/apache-phoenix-4.14.2-HBase-1.2-rc0/bin/apache-phoenix-4.14.2-HBase-1.2-bin.tar.gz
==
Binary file - no diff available.

Propchange: 
dev/phoenix/apache-phoenix-4.14.2-HBase-1.2-rc0/bin/apache-phoenix-4.14.2-HBase-1.2-bin.tar.gz
--
svn:mime-type = application/octet-stream

Added: 
dev/phoenix/apache-phoenix-4.14.2-HBase-1.2-rc0/bin/apache-phoenix-4.14.2-HBase-1.2-bin.tar.gz.asc
==
--- 
dev/phoenix/apache-phoenix-4.14.2-HBase-1.2-rc0/bin/apache-phoenix-4.14.2-HBase-1.2-bin.tar.gz.asc
 (added)
+++ 
dev/phoenix/apache-phoenix-4.14.2-HBase-1.2-rc0/bin/apache-phoenix-4.14.2-HBase-1.2-bin.tar.gz.asc
 Sun Apr 21 07:40:16 2019
@@ -0,0 +1,17 @@
+-BEGIN PGP SIGNATURE-
+Version: GnuPG v1
+
+iQIcBAABAgAGBQJcvBfrAAoJEGkcZhzf2GwCmAgP+wWUIHfsMhO6EtRgYzdWIa1z
+eDSxa90pFyEEpgG4aFUr8DUznRzsS6AgI1XTNpfVV23gllTozoGT8Xc5bBFOdMEH
+KCJ1g/J3e5zV5nWQOci+S2tnC3RICz4y5vibjn3qKdBgZpYBx965LTkH+byrhR8x
+LFiNHEqJWlaW7dB0J5Ih2/imKJUcitCRALl9Ubhk1DB2e0X7oBnr84/5GMMxN/BU
+bMMmpbIx+TCNqq206rIz8/P44BRuY9cYZBxm++kOV2PycJGjP0AT0T97JokRGIhH
+HZT9nbCGsfWOHtFW0fa2uHz/Erp3T7CVmDYF/XDMOKdMizgeN8beoI3qhyWbvq2q
+JRF0SaA/fQxguNtsLqxNN0M1biusbE6pUbWoTolBjencID0qlAdybyxasRs88Odc
+dWBmLDevWswbBjkDPKftXMX8ZIqOcV54S2xvRAqn1htKwWJu1L1ba/yf5HDNpMUy
+q9XFTMQthjc5JNqCD3B3bsYP9xoWBFnw3fdGnVUMHaY9FCZH7d8QIdap0V9Cvt6U
+66AdTBn1adHgW/gjYT4qNp8kMRcnTD71ZhL/OUv0xcmeR+6oItYgC40yF+DJTSz9
+Zr8MTSCrMPQgfnMJE6E1YiK0mblXOdKm8q+VmDd4TbcAdLLEMG9mqByZnUGy4iHC
+Ap05w/rF9EsyEwaMQrMz
+=su8c
+-END PGP SIGNATURE-

Added: 
dev/phoenix/apache-phoenix-4.14.2-HBase-1.2-rc0/bin

[phoenix] annotated tag 4.14.2-HBase-1.4-rc0 created (now 84dceea)

2019-04-21 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a change to annotated tag 4.14.2-HBase-1.4-rc0
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


  at 84dceea  (tag)
 tagging 6e2e1bed79961a31d9d01db7e53dc481b7ab521c (commit)
 replaces v4.14.1-HBase-1.4
  by Thomas D'Silva
  on Sun Apr 21 00:37:31 2019 -0700

- Log -
4.14.2-HBase-1.4-rc0
---

No new revisions were added by this update.



[phoenix] branch 4.14-HBase-1.4 updated: Set version to 4.14.2-HBase-1.4

2019-04-21 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.4 by this push:
 new 6e2e1be  Set version to 4.14.2-HBase-1.4
6e2e1be is described below

commit 6e2e1bed79961a31d9d01db7e53dc481b7ab521c
Author: Thomas D'Silva 
AuthorDate: Sun Apr 21 00:31:25 2019 -0700

Set version to 4.14.2-HBase-1.4
---
 phoenix-assembly/pom.xml   | 2 +-
 phoenix-client/pom.xml | 2 +-
 phoenix-core/pom.xml   | 2 +-
 phoenix-flume/pom.xml  | 2 +-
 phoenix-hive/pom.xml   | 2 +-
 phoenix-kafka/pom.xml  | 2 +-
 phoenix-load-balancer/pom.xml  | 2 +-
 phoenix-pherf/pom.xml  | 2 +-
 phoenix-pig/pom.xml| 2 +-
 phoenix-queryserver-client/pom.xml | 2 +-
 phoenix-queryserver/pom.xml| 2 +-
 phoenix-server/pom.xml | 2 +-
 phoenix-spark/pom.xml  | 2 +-
 phoenix-tracing-webapp/pom.xml | 2 +-
 pom.xml| 2 +-
 15 files changed, 15 insertions(+), 15 deletions(-)

diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index 4c02f9b..d736d8a 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.4
+4.14.2-HBase-1.4
   
   phoenix-assembly
   Phoenix Assembly
diff --git a/phoenix-client/pom.xml b/phoenix-client/pom.xml
index bcf0022..799eaa2 100644
--- a/phoenix-client/pom.xml
+++ b/phoenix-client/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.4
+4.14.2-HBase-1.4
   
   phoenix-client
   Phoenix Client
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index fe85290..9c48d40 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -4,7 +4,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.4
+4.14.2-HBase-1.4
   
   phoenix-core
   Phoenix Core
diff --git a/phoenix-flume/pom.xml b/phoenix-flume/pom.xml
index e3fb61e..3a73085 100644
--- a/phoenix-flume/pom.xml
+++ b/phoenix-flume/pom.xml
@@ -26,7 +26,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.4
+4.14.2-HBase-1.4
   
   phoenix-flume
   Phoenix - Flume
diff --git a/phoenix-hive/pom.xml b/phoenix-hive/pom.xml
index 9006020..d719eca 100644
--- a/phoenix-hive/pom.xml
+++ b/phoenix-hive/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.4
+4.14.2-HBase-1.4
   
   phoenix-hive
   Phoenix - Hive
diff --git a/phoenix-kafka/pom.xml b/phoenix-kafka/pom.xml
index 83aa35a..47c623e 100644
--- a/phoenix-kafka/pom.xml
+++ b/phoenix-kafka/pom.xml
@@ -26,7 +26,7 @@

org.apache.phoenix
phoenix
-   4.14.1-HBase-1.4
+   4.14.2-HBase-1.4

phoenix-kafka
Phoenix - Kafka
diff --git a/phoenix-load-balancer/pom.xml b/phoenix-load-balancer/pom.xml
index 59376b3..d19ec8d 100644
--- a/phoenix-load-balancer/pom.xml
+++ b/phoenix-load-balancer/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.4
+4.14.2-HBase-1.4
   
   phoenix-load-balancer
   Phoenix Load Balancer
diff --git a/phoenix-pherf/pom.xml b/phoenix-pherf/pom.xml
index ec8d028..8aa4da8 100644
--- a/phoenix-pherf/pom.xml
+++ b/phoenix-pherf/pom.xml
@@ -15,7 +15,7 @@

org.apache.phoenix
phoenix
-   4.14.1-HBase-1.4
+   4.14.2-HBase-1.4

 
phoenix-pherf
diff --git a/phoenix-pig/pom.xml b/phoenix-pig/pom.xml
index 1d5fa06..f2e79d9 100644
--- a/phoenix-pig/pom.xml
+++ b/phoenix-pig/pom.xml
@@ -26,7 +26,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.4
+4.14.2-HBase-1.4
   
   phoenix-pig
   Phoenix - Pig
diff --git a/phoenix-queryserver-client/pom.xml 
b/phoenix-queryserver-client/pom.xml
index 0f60746..78b5204 100644
--- a/phoenix-queryserver-client/pom.xml
+++ b/phoenix-queryserver-client/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.4
+4.14.2-HBase-1.4
   
   phoenix-queryserver-client
   Phoenix Query Server Client
diff --git a/phoenix-queryserver/pom.xml b/phoenix-queryserver/pom.xml
index 2166633..4074cce 100644
--- a/phoenix-queryserver/pom.xml
+++ b/phoenix-queryserver/pom.xml
@@ -26,7 +26,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.4
+4.14.2-HBase-1.4
   
   phoenix-queryserver
   Phoenix Query Server
diff --git a/phoenix-server/pom.xml b/phoenix-server/pom.xml
index 5a84acc..6301061 100644
--- a/phoenix-server/pom.xml
+++ b/phoenix-server/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.4
+4.14.2-HBase-1.4
   
   phoenix-server
   Phoenix Server
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
index 8cf71b0..f87a0ee 100644

  1   2   3   4   5   6   7   8   9   10   >