[phoenix] branch 4.x-cdh5.15 deleted (was 4bea60d)

2019-05-28 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a change to branch 4.x-cdh5.15
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


 was 4bea60d  PHOENIX-5037 Fix maven site reporting warnings on build

The revisions that were on this branch are still contained in
other references; therefore, this change does not discard any commits
from the repository.



[phoenix] 12/18: PHOENIX-5074 DropTableWithViewsIT.testDropTableWithChildViews is flapping

2019-05-28 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 4.x-cdh5.16
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 3f17a89e6c70b8f47023175e585e216dcfec5731
Author: Kadir 
AuthorDate: Thu Dec 20 19:38:44 2018 +

PHOENIX-5074 DropTableWithViewsIT.testDropTableWithChildViews is flapping
---
 .../phoenix/end2end/DropTableWithViewsIT.java  | 56 --
 1 file changed, 30 insertions(+), 26 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java
index 9502218..a4cd354 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java
@@ -20,7 +20,6 @@ package org.apache.phoenix.end2end;
 import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
 
 import java.sql.Connection;
 import java.sql.DriverManager;
@@ -30,14 +29,16 @@ import java.util.Collection;
 
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.phoenix.coprocessor.TableViewFinderResult;
+import org.apache.phoenix.coprocessor.TaskRegionObserver;
 import org.apache.phoenix.coprocessor.ViewFinder;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 
-import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.util.SchemaUtil;
+import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -50,6 +51,20 @@ public class DropTableWithViewsIT extends 
SplitSystemCatalogIT {
 private final boolean columnEncoded;
 private final String TENANT_SPECIFIC_URL1 = getUrl() + ';' + 
TENANT_ID_ATTRIB + "=" + TENANT1;
 
+private static RegionCoprocessorEnvironment TaskRegionEnvironment;
+
+@BeforeClass
+public static void doSetup() throws Exception {
+SplitSystemCatalogIT.doSetup();
+TaskRegionEnvironment =
+getUtility()
+.getRSForFirstRegionInTable(
+
PhoenixDatabaseMetaData.SYSTEM_TASK_HBASE_TABLE_NAME)
+
.getRegions(PhoenixDatabaseMetaData.SYSTEM_TASK_HBASE_TABLE_NAME)
+.get(0).getCoprocessorHost()
+
.findCoprocessorEnvironment(TaskRegionObserver.class.getName());
+}
+
 public DropTableWithViewsIT(boolean isMultiTenant, boolean columnEncoded) {
 this.isMultiTenant = isMultiTenant;
 this.columnEncoded = columnEncoded;
@@ -108,30 +123,19 @@ public class DropTableWithViewsIT extends 
SplitSystemCatalogIT {
 // Drop the base table
 String dropTable = String.format("DROP TABLE IF EXISTS %s 
CASCADE", baseTable);
 conn.createStatement().execute(dropTable);
-
-// Wait for the tasks for dropping child views to complete. The 
depth of the view tree is 2, so we expect that
-// this will be done in two task handling runs, i.e., in tree task 
handling interval at most in general
-// by assuming that each non-root level will be processed in one 
interval. To be on the safe side, we will
-// wait at most 10 intervals.
-long halfTimeInterval = 
config.getLong(QueryServices.TASK_HANDLING_INTERVAL_MS_ATTRIB,
-QueryServicesOptions.DEFAULT_TASK_HANDLING_INTERVAL_MS)/2;
-ResultSet rs = null;
-boolean timedOut = true;
-Thread.sleep(3 * halfTimeInterval);
-for (int i = 3; i < 20; i++) {
-rs = conn.createStatement().executeQuery("SELECT * " +
-" FROM " + 
PhoenixDatabaseMetaData.SYSTEM_TASK_NAME +
-" WHERE " + PhoenixDatabaseMetaData.TASK_TYPE 
+ " = " +
-
PTable.TaskType.DROP_CHILD_VIEWS.getSerializedValue());
-Thread.sleep(halfTimeInterval);
-if (!rs.next()) {
-timedOut = false;
-break;
-}
-}
-if (timedOut) {
-fail("Drop child view task execution timed out!");
-}
+// Run DropChildViewsTask to complete the tasks for dropping child 
views. The depth of the view tree is 2,
+// so we expect that this will be done in two task handling runs 
as each non-root level will be processed
+

[phoenix] 13/18: PHOENIX-5074; fix compilation failure.

2019-05-28 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 4.x-cdh5.16
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 82172a167de8adb709374b03c9a43bc1dc494e74
Author: Lars Hofhansl 
AuthorDate: Tue Dec 25 10:21:35 2018 +

PHOENIX-5074; fix compilation failure.
---
 .../src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java
index a4cd354..6aaf703 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java
@@ -57,10 +57,10 @@ public class DropTableWithViewsIT extends 
SplitSystemCatalogIT {
 public static void doSetup() throws Exception {
 SplitSystemCatalogIT.doSetup();
 TaskRegionEnvironment =
-getUtility()
+(RegionCoprocessorEnvironment)getUtility()
 .getRSForFirstRegionInTable(
 
PhoenixDatabaseMetaData.SYSTEM_TASK_HBASE_TABLE_NAME)
-
.getRegions(PhoenixDatabaseMetaData.SYSTEM_TASK_HBASE_TABLE_NAME)
+
.getOnlineRegions(PhoenixDatabaseMetaData.SYSTEM_TASK_HBASE_TABLE_NAME)
 .get(0).getCoprocessorHost()
 
.findCoprocessorEnvironment(TaskRegionObserver.class.getName());
 }



[phoenix] 18/18: PHOENIX-5059 Use the Datasource v2 api in the spark connector

2019-05-28 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 4.x-cdh5.16
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit b129be998de7eac2bd8b3bf78f5feb7812b8f642
Author: Thomas D'Silva 
AuthorDate: Tue Dec 11 22:59:39 2018 +

PHOENIX-5059 Use the Datasource v2 api in the spark connector
---
 .../phoenix/end2end/salted/BaseSaltedTableIT.java  |   6 +-
 phoenix-spark/pom.xml  |   8 +
 .../java/org/apache/phoenix/spark/OrderByIT.java   | 117 ++---
 .../java/org/apache/phoenix/spark/SparkUtil.java   |  45 +-
 phoenix-spark/src/it/resources/globalSetup.sql |   6 +-
 .../phoenix/spark/AbstractPhoenixSparkIT.scala |  12 +-
 .../org/apache/phoenix/spark/PhoenixSparkIT.scala  | 543 +++--
 .../spark/PhoenixSparkITTenantSpecific.scala   |  18 +-
 .../spark/datasource/v2/PhoenixDataSource.java |  82 
 .../v2/reader/PhoenixDataSourceReadOptions.java|  51 ++
 .../v2/reader/PhoenixDataSourceReader.java | 201 
 .../v2/reader/PhoenixInputPartition.java   |  44 ++
 .../v2/reader/PhoenixInputPartitionReader.java | 168 +++
 .../v2/writer/PhoenixDataSourceWriteOptions.java   | 109 +
 .../datasource/v2/writer/PhoenixDataWriter.java| 100 
 .../v2/writer/PhoenixDataWriterFactory.java|  19 +
 .../v2/writer/PhoenixDatasourceWriter.java |  34 ++
 ...org.apache.spark.sql.sources.DataSourceRegister |   1 +
 .../apache/phoenix/spark/ConfigurationUtil.scala   |   1 +
 .../apache/phoenix/spark/DataFrameFunctions.scala  |   2 +-
 .../org/apache/phoenix/spark/DefaultSource.scala   |   1 +
 ...lation.scala => FilterExpressionCompiler.scala} | 109 ++---
 .../org/apache/phoenix/spark/PhoenixRDD.scala  |  61 +--
 .../phoenix/spark/PhoenixRecordWritable.scala  |   2 +-
 .../org/apache/phoenix/spark/PhoenixRelation.scala |  70 +--
 .../apache/phoenix/spark/ProductRDDFunctions.scala |   1 +
 .../phoenix/spark/SparkContextFunctions.scala  |   1 +
 .../org/apache/phoenix/spark/SparkSchemaUtil.scala |  84 
 .../phoenix/spark/SparkSqlContextFunctions.scala   |   1 +
 .../datasources/jdbc/PhoenixJdbcDialect.scala  |  21 +
 .../execution/datasources/jdbc/SparkJdbcUtil.scala | 309 
 31 files changed, 1664 insertions(+), 563 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/salted/BaseSaltedTableIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/salted/BaseSaltedTableIT.java
index 3051cd6..ef127ac 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/salted/BaseSaltedTableIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/salted/BaseSaltedTableIT.java
@@ -194,7 +194,7 @@ public abstract class BaseSaltedTableIT extends 
ParallelStatsDisabledIT  {
 .setSelectColumns(
 Lists.newArrayList("A_INTEGER", "A_STRING", "A_ID", 
"B_STRING", "B_INTEGER"))
 .setFullTableName(tableName)
-.setWhereClause("a_integer = 1 AND a_string >= 'ab' AND 
a_string < 'de' AND a_id = '123'");
+.setWhereClause("A_INTEGER = 1 AND A_STRING >= 'ab' AND 
A_STRING < 'de' AND A_ID = '123'");
 rs = executeQuery(conn, queryBuilder);
 assertTrue(rs.next());
 assertEquals(1, rs.getInt(1));
@@ -205,7 +205,7 @@ public abstract class BaseSaltedTableIT extends 
ParallelStatsDisabledIT  {
 assertFalse(rs.next());
 
 // all single slots with one value.
-queryBuilder.setWhereClause("a_integer = 1 AND a_string = 'ab' AND 
a_id = '123'");
+queryBuilder.setWhereClause("A_INTEGER = 1 AND A_STRING = 'ab' AND 
A_ID = '123'");
 rs = executeQuery(conn, queryBuilder);
 assertTrue(rs.next());
 assertEquals(1, rs.getInt(1));
@@ -216,7 +216,7 @@ public abstract class BaseSaltedTableIT extends 
ParallelStatsDisabledIT  {
 assertFalse(rs.next());
 
 // all single slots with multiple values.
-queryBuilder.setWhereClause("a_integer in (2, 4) AND a_string = 
'abc' AND a_id = '123'");
+queryBuilder.setWhereClause("A_INTEGER in (2, 4) AND A_STRING = 
'abc' AND A_ID = '123'");
 rs = executeQuery(conn, queryBuilder);
 
 assertTrue(rs.next());
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
index e2790bd..9cc3c3d 100644
--- a/phoenix-spark/pom.xml
+++ b/phoenix-spark/pom.xml
@@ -487,6 +487,14 @@
 src/it/scala
 
src/it/resources
 
+
+org.apache.maven.plugins
+maven-compiler-plugin
+
+1.8
+1.8
+
+
   
 org.codehaus.mojo
 build-helper-maven-plugin
diff --git a/phoenix-spark/src/it/java/org/ap

[phoenix] 17/18: Changes for CDH 5.16.x

2019-05-28 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 4.x-cdh5.16
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit cea84e84d5b294bc1205a9b673037984b700ce63
Author: pboado 
AuthorDate: Tue May 28 23:45:56 2019 +0100

Changes for CDH 5.16.x
---
 phoenix-assembly/pom.xml   |  2 +-
 phoenix-client/pom.xml |  2 +-
 phoenix-core/pom.xml   |  2 +-
 phoenix-flume/pom.xml  |  2 +-
 phoenix-hive/pom.xml   |  2 +-
 phoenix-kafka/pom.xml  |  2 +-
 phoenix-load-balancer/pom.xml  |  2 +-
 phoenix-parcel/pom.xml |  2 +-
 phoenix-pherf/pom.xml  |  2 +-
 phoenix-pig/pom.xml|  2 +-
 phoenix-queryserver-client/pom.xml |  2 +-
 phoenix-queryserver/pom.xml|  2 +-
 phoenix-server/pom.xml |  2 +-
 phoenix-spark/pom.xml  |  2 +-
 phoenix-tracing-webapp/pom.xml |  2 +-
 pom.xml| 10 +-
 16 files changed, 20 insertions(+), 20 deletions(-)

diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index 300b4f6..5c2aeb5 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.15.0-cdh5.15.1
+4.15.0-cdh5.16.2
   
   phoenix-assembly
   Phoenix Assembly
diff --git a/phoenix-client/pom.xml b/phoenix-client/pom.xml
index cfed3ce..3028c81 100644
--- a/phoenix-client/pom.xml
+++ b/phoenix-client/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.15.0-cdh5.15.1
+4.15.0-cdh5.16.2
   
   phoenix-client
   Phoenix Client
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 8caf88f..043505a 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -4,7 +4,7 @@
   
 org.apache.phoenix
 phoenix
-4.15.0-cdh5.15.1
+4.15.0-cdh5.16.2
   
   phoenix-core
   Phoenix Core
diff --git a/phoenix-flume/pom.xml b/phoenix-flume/pom.xml
index c67de23..5711714 100644
--- a/phoenix-flume/pom.xml
+++ b/phoenix-flume/pom.xml
@@ -26,7 +26,7 @@
   
 org.apache.phoenix
 phoenix
-4.15.0-cdh5.15.1
+4.15.0-cdh5.16.2
   
   phoenix-flume
   Phoenix - Flume
diff --git a/phoenix-hive/pom.xml b/phoenix-hive/pom.xml
index 50670e0..8af7c16 100644
--- a/phoenix-hive/pom.xml
+++ b/phoenix-hive/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.15.0-cdh5.15.1
+4.15.0-cdh5.16.2
   
   phoenix-hive
   Phoenix - Hive
diff --git a/phoenix-kafka/pom.xml b/phoenix-kafka/pom.xml
index 460eb5a..6da5a58 100644
--- a/phoenix-kafka/pom.xml
+++ b/phoenix-kafka/pom.xml
@@ -26,7 +26,7 @@

org.apache.phoenix
phoenix
-   4.15.0-cdh5.15.1
+   4.15.0-cdh5.16.2

phoenix-kafka
Phoenix - Kafka
diff --git a/phoenix-load-balancer/pom.xml b/phoenix-load-balancer/pom.xml
index a8319e9..a59ee06 100644
--- a/phoenix-load-balancer/pom.xml
+++ b/phoenix-load-balancer/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.15.0-cdh5.15.1
+4.15.0-cdh5.16.2
   
   phoenix-load-balancer
   Phoenix Load Balancer
diff --git a/phoenix-parcel/pom.xml b/phoenix-parcel/pom.xml
index 417a2db..eb2f254 100644
--- a/phoenix-parcel/pom.xml
+++ b/phoenix-parcel/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.15.0-cdh5.15.1
+4.15.0-cdh5.16.2
   
   phoenix-parcel
   Phoenix Parcels for CDH
diff --git a/phoenix-pherf/pom.xml b/phoenix-pherf/pom.xml
index cb648e4..340bb58 100644
--- a/phoenix-pherf/pom.xml
+++ b/phoenix-pherf/pom.xml
@@ -15,7 +15,7 @@

org.apache.phoenix
phoenix
-   4.15.0-cdh5.15.1
+   4.15.0-cdh5.16.2

 
phoenix-pherf
diff --git a/phoenix-pig/pom.xml b/phoenix-pig/pom.xml
index 48ffb91..8f96d6f 100644
--- a/phoenix-pig/pom.xml
+++ b/phoenix-pig/pom.xml
@@ -26,7 +26,7 @@
   
 org.apache.phoenix
 phoenix
-4.15.0-cdh5.15.1
+4.15.0-cdh5.16.2
   
   phoenix-pig
   Phoenix - Pig
diff --git a/phoenix-queryserver-client/pom.xml 
b/phoenix-queryserver-client/pom.xml
index a87d338..ea386d7 100644
--- a/phoenix-queryserver-client/pom.xml
+++ b/phoenix-queryserver-client/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.15.0-cdh5.15.1
+4.15.0-cdh5.16.2
   
   phoenix-queryserver-client
   Phoenix Query Server Client
diff --git a/phoenix-queryserver/pom.xml b/phoenix-queryserver/pom.xml
index f91fce5..0a19b6d 100644
--- a/phoenix-queryserver/pom.xml
+++ b/phoenix-queryserver/pom.xml
@@ -26,7 +26,7 @@
   
 org.apache.phoenix
 phoenix
-4.15.0-cdh5.15.1
+4.15.0-cdh5.16.2
   
   phoenix-queryserver
   Phoenix Query Server
diff --git a/phoenix-server/pom.xml b/phoenix-server/pom.xml
index def100c..18a5ab9 100644
--- a/phoenix-server/pom.xml
+++ b/phoenix-server/pom.xml
@@ -27,7 +27,7

[phoenix] 11/18: [PHOENIX-3623] Integrate Omid with Phoenix.

2019-05-28 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 4.x-cdh5.16
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 81f850311c4b03cb900a9dea079ee311d9c685fd
Author: Ohad Shacham 
AuthorDate: Thu Dec 20 12:15:03 2018 +

[PHOENIX-3623] Integrate Omid with Phoenix.

This commit finishes the integration of Omid as Phoenix transaction 
processor engine.
More information regarding the integration exists at [PHOENIX-3623] and at 
[OMID-82], which is the corresponding jira in Omid.
---
 bin/omid-env.sh|  43 
 bin/omid-server-configuration.yml  |  25 +++
 bin/omid.sh|  93 +
 phoenix-assembly/pom.xml   |   5 +
 .../build/components/all-common-dependencies.xml   |  28 +++
 phoenix-core/pom.xml   |  46 +
 .../phoenix/coprocessor/OmidGCProcessor.java   |   7 +-
 .../coprocessor/OmidTransactionalProcessor.java|   8 +-
 .../transaction/OmidTransactionContext.java| 217 -
 .../transaction/OmidTransactionProvider.java   | 106 +-
 .../phoenix/transaction/OmidTransactionTable.java  |  64 +-
 .../phoenix/transaction/TransactionFactory.java|   5 +-
 .../phoenix/query/QueryServicesTestImpl.java   |   1 -
 phoenix-server/pom.xml |   1 +
 pom.xml|  47 +
 15 files changed, 665 insertions(+), 31 deletions(-)

diff --git a/bin/omid-env.sh b/bin/omid-env.sh
new file mode 100644
index 000..820cdaa
--- /dev/null
+++ b/bin/omid-env.sh
@@ -0,0 +1,43 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Set the flags to pass to the jvm when running omid
+# export JVM_FLAGS=-Xmx8096m
+# 
-
+# Check if HADOOP_CONF_DIR and HBASE_CONF_DIR are set
+# 
-
+export JVM_FLAGS=-Xmx4096m
+if [ -z ${HADOOP_CONF_DIR+x} ]; then
+if [ -z ${HADOOP_HOME+x} ]; then
+echo "WARNING: HADOOP_HOME or HADOOP_CONF_DIR are unset";
+else
+export HADOOP_CONF_DIR=${HADOOP_HOME}/conf
+fi
+else
+echo "HADOOP_CONF_DIR is set to '$HADOOP_CONF_DIR'";
+fi
+
+if [ -z ${HBASE_CONF_DIR+x} ]; then
+if [ -z ${HBASE_HOME+x} ]; then
+echo "WARNING: HBASE_HOME or HBASE_CONF_DIR are unset";
+else
+export HBASE_CONF_DIR=${HBASE_HOME}/conf
+fi
+else
+echo "HBASE_CONF_DIR is set to '$HBASE_CONF_DIR'";
+fi
diff --git a/bin/omid-server-configuration.yml 
b/bin/omid-server-configuration.yml
new file mode 100644
index 000..8d1616e
--- /dev/null
+++ b/bin/omid-server-configuration.yml
@@ -0,0 +1,25 @@
+# 
=
+#
+# Omid TSO Server Configuration
+# 
-
+#
+# Tune here the default values for TSO server config parameters found in 
'default-omid-server-configuration.yml' file
+#
+# 
=
+
+
+timestampStoreModule: 
!!org.apache.omid.timestamp.storage.HBaseTimestampStorageModule [ ]
+commitTableStoreModule: 
!!org.apache.omid.committable.hbase.HBaseCommitTableStorageModule [ ]
+
+metrics: !!org.apache.omid.metrics.CodahaleMetricsProvider [
+!!org.apache.omid.metrics.CodahaleMetricsConfig {
+  outputFreqInSecs: 10,
+  reporters: !!set {
+!!org.apache.omid.metrics.CodahaleMetricsConfig$Reporter CSV
+  },
+  csvDir: "csvMetrics",
+}
+]
+
+timestampType: WORLD_TIME
+lowLatency: false
diff --git a/bin/omid.sh b/bin/omid.sh
new file mode 100755
index 000..5b33ed5
--- /dev/null
+++ b/bin/omid.sh
@@ -0,0 +1,93 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foun

[phoenix] 16/18: PHOENIX-5055 Split mutations batches probably affects correctness of index data

2019-05-28 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 4.x-cdh5.16
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 0e5a2635ea023d72459e63bd6443f3733642482b
Author: jaanai 
AuthorDate: Sat Jan 5 13:17:42 2019 +

PHOENIX-5055 Split mutations batches probably affects correctness of index 
data
---
 .../apache/phoenix/end2end/MutationStateIT.java| 47 +-
 .../org/apache/phoenix/end2end/QueryMoreIT.java|  6 +--
 .../org/apache/phoenix/execute/MutationState.java  | 41 ++-
 .../apache/phoenix/execute/MutationStateTest.java  | 41 +++
 4 files changed, 122 insertions(+), 13 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java
index 36782c1..5a5fb56 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java
@@ -25,8 +25,14 @@ import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.SQLException;
 import java.sql.Statement;
+import java.util.Iterator;
 import java.util.Properties;
 
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.execute.MutationState;
 import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -157,5 +163,44 @@ public class MutationStateIT extends 
ParallelStatsDisabledIT {
 stmt.execute();
 assertTrue("Mutation state size should decrease", prevEstimatedSize+4 
> state.getEstimatedSize());
 }
-
+
+@Test
+public void testSplitMutationsIntoSameGroupForSingleRow() throws Exception 
{
+String tableName = "TBL_" + generateUniqueName();
+String indexName = "IDX_" + generateUniqueName();
+Properties props = new Properties();
+props.put("phoenix.mutate.batchSize", "2");
+try (PhoenixConnection conn = DriverManager.getConnection(getUrl(), 
props).unwrap(PhoenixConnection.class)) {
+conn.setAutoCommit(false);
+conn.createStatement().executeUpdate(
+"CREATE TABLE "  + tableName + " ("
++ "A VARCHAR NOT NULL PRIMARY KEY,"
++ "B VARCHAR,"
++ "C VARCHAR,"
++ "D VARCHAR) COLUMN_ENCODED_BYTES = 0");
+conn.createStatement().executeUpdate("CREATE INDEX " + indexName + 
" on "  + tableName + " (C) INCLUDE(D)");
+
+conn.createStatement().executeUpdate("UPSERT INTO "  + tableName + 
"(A,B,C,D) VALUES ('A2','B2','C2','D2')");
+conn.createStatement().executeUpdate("UPSERT INTO "  + tableName + 
"(A,B,C,D) VALUES ('A3','B3', 'C3', null)");
+conn.commit();
+
+Table htable = 
conn.getQueryServices().getTable(Bytes.toBytes(tableName));
+Scan scan = new Scan();
+scan.setRaw(true);
+Iterator scannerIter = htable.getScanner(scan).iterator();
+while (scannerIter.hasNext()) {
+long ts = -1;
+Result r = scannerIter.next();
+for (Cell cell : r.listCells()) {
+if (ts == -1) {
+ts = cell.getTimestamp();
+} else {
+assertEquals("(" + cell.toString() + ") has different 
ts", ts, cell.getTimestamp());
+}
+}
+}
+htable.close();
+}
+}
+
 }
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
index 2b1d31e..7c45f1a 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
@@ -493,14 +493,14 @@ public class QueryMoreIT extends ParallelStatsDisabledIT {
 connection.commit();
 assertEquals(2L, connection.getMutationState().getBatchCount());
 
-// set the batch size (rows) to 1 
-
connectionProperties.setProperty(QueryServices.MUTATE_BATCH_SIZE_ATTRIB, "1");
+// set the batch size (rows) to 2 since three are at least 2 mutations 
when updates a single row
+
connectionProperties.setProperty(QueryServices.MUTATE_BATCH_SIZE_ATTRIB, "2");
 
connectionProperties.setProperty(QueryServices.MUTATE_BATCH_SIZE_BY

[phoenix] 15/18: PHOENIX-4820 Optimize OrderBy for ClientAggregatePlan

2019-05-28 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 4.x-cdh5.16
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 460da6136a75245b119d4f0393e08e9f61d579d5
Author: chenglei 
AuthorDate: Sat Jan 5 01:58:00 2019 +

PHOENIX-4820 Optimize OrderBy for ClientAggregatePlan
---
 .../org/apache/phoenix/end2end/AggregateIT.java| 104 +++
 .../apache/phoenix/compile/GroupByCompiler.java|   8 +-
 .../apache/phoenix/compile/OrderByCompiler.java|  18 +-
 .../phoenix/compile/OrderPreservingTracker.java|  53 ++--
 .../org/apache/phoenix/compile/QueryCompiler.java  |  12 +-
 .../org/apache/phoenix/compile/RowProjector.java   |  15 +-
 .../phoenix/expression/BaseCompoundExpression.java |  11 +-
 .../apache/phoenix/expression/BaseExpression.java  |  11 +
 .../phoenix/expression/BaseSingleExpression.java   |   5 +
 .../phoenix/expression/DelegateExpression.java |   5 +
 .../org/apache/phoenix/expression/Expression.java  |   6 +
 .../expression/ProjectedColumnExpression.java  |   8 +-
 .../expression/function/RandomFunction.java|   5 +
 .../expression/visitor/CloneExpressionVisitor.java |   6 +-
 .../CloneNonDeterministicExpressionVisitor.java|  31 --
 .../org/apache/phoenix/util/ExpressionUtil.java| 160 +-
 .../apache/phoenix/compile/QueryCompilerTest.java  | 324 +
 .../expression/ArithmeticOperationTest.java|   8 +-
 18 files changed, 705 insertions(+), 85 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateIT.java
index 8916d4d..d52025e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AggregateIT.java
@@ -227,5 +227,109 @@ public class AggregateIT extends BaseAggregateIT {
 assertEquals(4, rs.getLong(1));
 }
 }
+
+@Test
+public void testOrderByOptimizeForClientAggregatePlanBug4820() throws 
Exception {
+doTestOrderByOptimizeForClientAggregatePlanBug4820(false,false);
+doTestOrderByOptimizeForClientAggregatePlanBug4820(false,true);
+doTestOrderByOptimizeForClientAggregatePlanBug4820(true,false);
+doTestOrderByOptimizeForClientAggregatePlanBug4820(true,true);
+}
+
+private void doTestOrderByOptimizeForClientAggregatePlanBug4820(boolean 
desc ,boolean salted) throws Exception {
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+Connection conn = null;
+try {
+conn = DriverManager.getConnection(getUrl(), props);
+String tableName = generateUniqueName();
+String sql = "create table " + tableName + "( "+
+" pk1 varchar not null , " +
+" pk2 varchar not null, " +
+" pk3 varchar not null," +
+" v1 varchar, " +
+" v2 varchar, " +
+" CONSTRAINT TEST_PK PRIMARY KEY ( "+
+"pk1 "+(desc ? "desc" : "")+", "+
+"pk2 "+(desc ? "desc" : "")+", "+
+"pk3 "+(desc ? "desc" : "")+
+" )) "+(salted ? "SALT_BUCKETS =4" : "split on('b')");
+conn.createStatement().execute(sql);
+
+conn.createStatement().execute("UPSERT INTO "+tableName+" VALUES 
('a11','a12','a13','a14','a15')");
+conn.createStatement().execute("UPSERT INTO "+tableName+" VALUES 
('a21','a22','a23','a24','a25')");
+conn.createStatement().execute("UPSERT INTO "+tableName+" VALUES 
('a31','a32','a33','a34','a35')");
+conn.createStatement().execute("UPSERT INTO "+tableName+" VALUES 
('b11','b12','b13','b14','b15')");
+conn.createStatement().execute("UPSERT INTO "+tableName+" VALUES 
('b21','b22','b23','b24','b25')");
+conn.createStatement().execute("UPSERT INTO "+tableName+" VALUES 
('b31','b32','b33','b34','b35')");
+conn.commit();
+
+sql = "select a.ak3 "+
+  "from (select pk1 ak1,pk2 ak2,pk3 ak3, substr(v1,1,1) 
av1,substr(v2,1,1) av2 from "+tableName+" order by pk2,pk3 limit 10) a "+
+  "group by a.ak3,a.av1 order by a.ak3 desc,a.av1";
+ResultSet rs = conn.prepareStatement(sql).executeQuery();
+assertResultSet(rs, new 
Object[][]{{"b33"},{"b23"},{"b13"},{"a33"},{"a23"},{"a13"}});
+
+  

[phoenix] 02/18: ScanningResultIterator metric RowsScanned not set. PHOENIX-5051

2019-05-28 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 4.x-cdh5.16
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit c360f87cf14c2540137798480cd3d70a933ebbbf
Author: chfeng 
AuthorDate: Wed Dec 5 02:40:29 2018 +

ScanningResultIterator metric RowsScanned not set. PHOENIX-5051
---
 .../main/java/org/apache/phoenix/iterate/ScanningResultIterator.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
index f02e9d3..893eaa2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
@@ -117,7 +117,7 @@ public class ScanningResultIterator implements 
ResultIterator {
 scanMetricsMap.get(RPC_RETRIES_METRIC_NAME));
 changeMetric(scanMetricsHolder.getCountOfRemoteRPCRetries(),
 scanMetricsMap.get(REMOTE_RPC_RETRIES_METRIC_NAME));
-changeMetric(scanMetricsHolder.getCountOfRowsFiltered(),
+changeMetric(scanMetricsHolder.getCountOfRowsScanned(),
 scanMetricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME));
 changeMetric(scanMetricsHolder.getCountOfRowsFiltered(),
 
scanMetricsMap.get(COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME));



[phoenix] 08/18: PHOENIX-4983: Added missing apache license header.

2019-05-28 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 4.x-cdh5.16
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit f530f94659bcb337c8adce997ac7696431c719e2
Author: s.kadam 
AuthorDate: Fri Dec 14 16:04:29 2018 +

PHOENIX-4983: Added missing apache license header.
---
 .../org/apache/phoenix/end2end/UpsertWithSCNIT.java | 17 +
 1 file changed, 17 insertions(+)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
index 6f231ff..40bb883 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.phoenix.end2end;
 
 import org.apache.phoenix.exception.SQLExceptionCode;



[phoenix] 05/18: PHOENIX-5025 Tool to clean up orphan views

2019-05-28 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 4.x-cdh5.16
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit dff179b6c184bfeb4d28c090241cf08577ec4d85
Author: Kadir 
AuthorDate: Tue Nov 13 06:24:10 2018 +

PHOENIX-5025 Tool to clean up orphan views
---
 .../apache/phoenix/end2end/OrphanViewToolIT.java   | 472 +++
 .../apache/phoenix/mapreduce/OrphanViewTool.java   | 879 +
 2 files changed, 1351 insertions(+)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java
new file mode 100644
index 000..f9a1785
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java
@@ -0,0 +1,472 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LINK_TYPE;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_TYPE;
+import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+
+import com.google.common.collect.Lists;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.phoenix.mapreduce.OrphanViewTool;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.AfterClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@RunWith(Parameterized.class)
+public class OrphanViewToolIT extends ParallelStatsDisabledIT {
+private static final Logger LOG = 
LoggerFactory.getLogger(OrphanViewToolIT.class);
+
+private final boolean isMultiTenant;
+private final boolean columnEncoded;
+
+private static final long fanout = 2;
+private static final long childCount = fanout;
+private static final long grandChildCount = fanout * fanout;
+private static final long grandGrandChildCount = fanout * fanout * fanout;
+
+private static final String filePath = "/tmp/";
+private static final String viewFileName = "/tmp/" + 
OrphanViewTool.fileName[OrphanViewTool.VIEW];
+private static final String physicalLinkFileName = "/tmp/" + 
OrphanViewTool.fileName[OrphanViewTool.PHYSICAL_TABLE_LINK];
+private static final String parentLinkFileName = "/tmp/" + 
OrphanViewTool.fileName[OrphanViewTool.PARENT_TABLE_LINK];
+private static final String childLinkFileName = "/tmp/" + 
OrphanViewTool.fileName[OrphanViewTool.CHILD_TABLE_LINK];
+
+protected static String SCHEMA1 = "SCHEMA1";
+protected static String SCHEMA2 = "SCHEMA2";
+protected static String SCHEMA3 = "SCHEMA3";
+protected static String SCHEMA4 = "SCHEMA4";
+
+private final String TENANT_SPECIFIC_URL = getUrl() + ';' + 
TENANT_ID_ATTRIB + "=tenant";
+
+private static final String createBaseTableFirstPartDDL = "CREATE TABLE IF 
NOT EXISTS %s";
+private static final String createBaseTableSecondPartDDL = "(%s PK2 
VARCHAR NOT NULL, V1 VARCHAR, V2 VARCHAR " +
+" CONSTRAINT NAME_PK PRIMARY KEY (%s PK2)) %s";
+private static final String deleteTableRows = "DELETE FROM " + 
SYSTEM_CATALOG_NAME +
+" WHERE &quo

[phoenix] 01/18: PHOENIX-4781 Create artifact jar so that shaded jar replaces it properly

2019-05-28 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 4.x-cdh5.16
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit de4e0416017ae27f78f9cb1bf98f09b88d844cfb
Author: Vincent Poon 
AuthorDate: Sat Dec 1 01:55:34 2018 +

PHOENIX-4781 Create artifact jar so that shaded jar replaces it properly
---
 phoenix-client/pom.xml | 9 +++--
 phoenix-server/pom.xml | 9 +++--
 2 files changed, 6 insertions(+), 12 deletions(-)

diff --git a/phoenix-client/pom.xml b/phoenix-client/pom.xml
index 83c7ad9..cfed3ce 100644
--- a/phoenix-client/pom.xml
+++ b/phoenix-client/pom.xml
@@ -57,12 +57,9 @@
   
 org.apache.maven.plugins
 maven-jar-plugin
-
-  
-default-jar
-none
-  
-
+
+  phoenix-${project.version}-client
+
   
   
 org.apache.maven.plugins
diff --git a/phoenix-server/pom.xml b/phoenix-server/pom.xml
index 648e4d1..e6a7afe 100644
--- a/phoenix-server/pom.xml
+++ b/phoenix-server/pom.xml
@@ -61,12 +61,9 @@
   
 org.apache.maven.plugins
 maven-jar-plugin
-
-  
-default-jar
-none
-  
-
+
+  phoenix-${project.version}-server
+
   
   
 org.apache.maven.plugins



[phoenix] 04/18: PHOENIX-4763: Changing a base table property value should be reflected in child views (if the property wasn't changed)

2019-05-28 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 4.x-cdh5.16
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 622fcf4802d83316111fd19203723e74f843f67d
Author: Chinmay Kulkarni 
AuthorDate: Mon Dec 10 05:07:41 2018 +

PHOENIX-4763: Changing a base table property value should be reflected in 
child views (if the property wasn't changed)
---
 .../phoenix/end2end/AlterTableWithViewsIT.java | 117 +--
 .../end2end/ExplainPlanWithStatsEnabledIT.java |   8 +-
 .../apache/phoenix/end2end/PropertiesInSyncIT.java |   6 +-
 .../IndexHalfStoreFileReaderGenerator.java |   3 +-
 .../org/apache/phoenix/compile/DeleteCompiler.java |   2 +-
 .../org/apache/phoenix/compile/JoinCompiler.java   |   2 +-
 .../phoenix/compile/TupleProjectionCompiler.java   |   3 +-
 .../org/apache/phoenix/compile/UpsertCompiler.java |   2 +-
 .../org/apache/phoenix/compile/WhereOptimizer.java |   3 +-
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  | 134 ++--
 .../phoenix/coprocessor/MetaDataProtocol.java  |  32 +-
 .../coprocessor/generated/MetaDataProtos.java  | 356 ++---
 .../coprocessor/generated/PTableProtos.java|  99 +++---
 .../coprocessor/generated/ServerCachingProtos.java | 122 +++
 .../org/apache/phoenix/index/IndexMaintainer.java  |  16 +-
 .../phoenix/index/PhoenixIndexFailurePolicy.java   |   2 +-
 .../org/apache/phoenix/schema/DelegateTable.java   |  12 +-
 .../org/apache/phoenix/schema/MetaDataClient.java  |  25 +-
 .../java/org/apache/phoenix/schema/PTable.java |   6 +-
 .../java/org/apache/phoenix/schema/PTableImpl.java |  56 +++-
 .../java/org/apache/phoenix/util/MetaDataUtil.java |  46 ++-
 .../org/apache/phoenix/util/MetaDataUtilTest.java  | 115 +--
 phoenix-protocol/src/main/MetaDataService.proto|   4 +-
 phoenix-protocol/src/main/PTable.proto |   2 +-
 .../src/main/ServerCachingService.proto|   2 +-
 25 files changed, 739 insertions(+), 436 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
index 9e7aaa2..82a119f 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
@@ -73,6 +73,8 @@ import org.junit.runners.Parameterized.Parameters;
 import com.google.common.base.Function;
 import com.google.common.collect.Lists;
 
+import static 
org.apache.phoenix.query.QueryServicesOptions.DEFAULT_USE_STATS_FOR_PARALLELIZATION;
+
 @RunWith(Parameterized.class)
 public class AlterTableWithViewsIT extends SplitSystemCatalogIT {
 
@@ -174,41 +176,53 @@ public class AlterTableWithViewsIT extends 
SplitSystemCatalogIT {
 
conn.createStatement().execute(generateDDL("UPDATE_CACHE_FREQUENCY=2", 
ddlFormat));
 viewConn.createStatement().execute("CREATE VIEW " + viewOfTable1 + 
" ( VIEW_COL1 DECIMAL(10,2), VIEW_COL2 VARCHAR ) AS SELECT * FROM " + 
tableName);
 viewConn.createStatement().execute("CREATE VIEW " + viewOfTable2 + 
" ( VIEW_COL1 DECIMAL(10,2), VIEW_COL2 VARCHAR ) AS SELECT * FROM " + 
tableName);
-
-viewConn.createStatement().execute("ALTER VIEW " + viewOfTable2 + 
" SET UPDATE_CACHE_FREQUENCY = 1");
-
-PhoenixConnection phoenixConn = 
conn.unwrap(PhoenixConnection.class);
-PTable table = phoenixConn.getTable(new PTableKey(null, 
tableName));
 PName tenantId = isMultiTenant ? PNameFactory.newName(TENANT1) : 
null;
-assertFalse(table.isImmutableRows());
-assertEquals(2, table.getUpdateCacheFrequency());
+
+// Initially all property values should be the same for the base 
table and its views
+PTable table = conn.unwrap(PhoenixConnection.class).getTable(new 
PTableKey(null, tableName));
 PTable viewTable1 = 
viewConn.unwrap(PhoenixConnection.class).getTable(new PTableKey(tenantId, 
viewOfTable1));
+PTable viewTable2 = 
viewConn.unwrap(PhoenixConnection.class).getTable(new PTableKey(tenantId, 
viewOfTable2));
+assertFalse(table.isImmutableRows());
 assertFalse(viewTable1.isImmutableRows());
+assertFalse(viewTable2.isImmutableRows());
+assertEquals(2, table.getUpdateCacheFrequency());
 assertEquals(2, viewTable1.getUpdateCacheFrequency());
+assertEquals(2, viewTable2.getUpdateCacheFrequency());
+assertNull(table.useStatsForParallelization());
+assertNull(viewTable1.useStatsForParallelization());
+assertNull(viewTable2.useStatsForParallelization());
+
+// Alter a property value for one of the views
+viewConn.createStateme

[phoenix] 09/18: PHOENIX-5025 Tool to clean up orphan views (addendum)

2019-05-28 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 4.x-cdh5.16
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit bd4f52f576d11b779a82d89dd20354188adaf850
Author: Kadir 
AuthorDate: Thu Dec 13 01:53:38 2018 +

PHOENIX-5025 Tool to clean up orphan views (addendum)
---
 .../apache/phoenix/end2end/OrphanViewToolIT.java   | 23 +
 .../apache/phoenix/mapreduce/OrphanViewTool.java   | 24 --
 2 files changed, 28 insertions(+), 19 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java
index f9a1785..ab78ecd 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java
@@ -27,9 +27,9 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 import java.io.File;
+import java.io.FileReader;
 import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Paths;
+import java.io.LineNumberReader;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
@@ -206,9 +206,13 @@ public class OrphanViewToolIT extends 
ParallelStatsDisabledIT {
 }
 
 private void verifyLineCount(String fileName, long lineCount) throws 
IOException {
-if (Files.lines(Paths.get(fileName)).count() != lineCount)
-LOG.debug(Files.lines(Paths.get(fileName)).count() + " != " + 
lineCount);
-assertTrue(Files.lines(Paths.get(fileName)).count() == lineCount);
+LineNumberReader reader = new LineNumberReader(new 
FileReader(fileName));
+while (reader.readLine() != null) {
+}
+int count = reader.getLineNumber();
+if (count != lineCount)
+LOG.debug(count + " != " + lineCount);
+assertTrue(count == lineCount);
 }
 
 private void verifyCountQuery(Connection connection, String query, String 
schemaName, long count)
@@ -238,7 +242,6 @@ public class OrphanViewToolIT extends 
ParallelStatsDisabledIT {
 }
 }
 
-
 private void verifyNoChildLink(Connection connection, String 
viewSchemaName) throws Exception {
 // Verify that there there is no link in the system child link table
 verifyCountQuery(connection, countChildLinksQuery, viewSchemaName, 0);
@@ -264,6 +267,7 @@ public class OrphanViewToolIT extends 
ParallelStatsDisabledIT {
 schemaName == null ? "IS NULL" : "= '" + schemaName + "'"));
 connection.commit();
 }
+
 @Test
 public void testDeleteBaseTableRows() throws Exception {
 String baseTableName = generateUniqueName();
@@ -438,7 +442,8 @@ public class OrphanViewToolIT extends 
ParallelStatsDisabledIT {
 }
 }
 
-public static String[] getArgValues(boolean clean, boolean identify, 
boolean outputPath, boolean inputPath) {
+public static String[] getArgValues(boolean clean, boolean identify, 
boolean outputPath, boolean inputPath)
+throws InterruptedException{
 final List args = Lists.newArrayList();
 if (outputPath) {
 args.add("-op");
@@ -454,8 +459,10 @@ public class OrphanViewToolIT extends 
ParallelStatsDisabledIT {
 if (identify) {
 args.add("-i");
 }
+final long ageMs = 2000;
+Thread.sleep(ageMs);
 args.add("-a");
-args.add("0");
+args.add(Long.toString(ageMs));
 return args.toArray(new String[0]);
 }
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
index a8a30b6..713fb05 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
@@ -812,17 +812,6 @@ public class OrphanViewTool extends Configured implements 
Tool {
 } catch (IllegalStateException e) {
 printHelpAndExit(e.getMessage(), getOptions());
 }
-
-Properties props = new Properties();
-long scn = System.currentTimeMillis() - ageMs;
-props.setProperty("CurrentSCN", Long.toString(scn));
-connection = ConnectionUtil.getInputConnection(configuration);
-PhoenixConnection phoenixConnection = 
connection.unwrap(PhoenixConnection.class);
-
-if (clean) {
-// Take a snapshot of system tables to be modified
-createSnapshot(phoenixConnection, scn);
-}
 if (outputPath != null) {
 // Create files to log orphan views and links
 for (int i = VIE

[phoenix] 03/18: PHOENIX-4832: Add Canary Test Tool for Phoenix Query Server.

2019-05-28 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 4.x-cdh5.16
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 1d9073c1a326f4317b0ee2960668c90f0234b003
Author: s.kadam 
AuthorDate: Thu Dec 6 00:11:07 2018 +

PHOENIX-4832: Add Canary Test Tool for Phoenix Query Server.
---
 phoenix-core/pom.xml   |   7 +
 .../org/apache/phoenix/tool/CanaryTestResult.java  |  86 
 .../org/apache/phoenix/tool/PhoenixCanaryTool.java | 477 +
 .../resources/phoenix-canary-file-sink.properties  |  17 +
 .../apache/phoenix/tool/PhoenixCanaryToolTest.java | 140 ++
 5 files changed, 727 insertions(+)

diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 97091b9..f8112fe 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -26,6 +26,7 @@
 
   
 ${project.basedir}/..
+0.8.1
   
 
   
@@ -228,6 +229,12 @@
   sqlline
 
 
+  net.sourceforge.argparse4j
+  argparse4j
+  ${argparse4j.version}
+
+
+
   com.google.guava
   guava
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/tool/CanaryTestResult.java 
b/phoenix-core/src/main/java/org/apache/phoenix/tool/CanaryTestResult.java
new file mode 100644
index 000..b72439c
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/tool/CanaryTestResult.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.tool;
+
+public class CanaryTestResult {
+
+private boolean isSuccessful;
+private long startTime;
+private long executionTime;
+private String message;
+private String testName;
+private String timestamp;
+private Object miscellaneous;
+
+public Object getMiscellaneous() {
+return miscellaneous;
+}
+
+public void setMiscellaneous(Object miscellaneous) {
+this.miscellaneous = miscellaneous;
+}
+
+public long getStartTime() {
+return startTime;
+}
+
+public void setStartTime(long startTime) {
+this.startTime = startTime;
+}
+
+public String getTimestamp() {
+return timestamp;
+}
+
+public void setTimestamp(String timestamp) {
+this.timestamp = timestamp;
+}
+
+public boolean isSuccessful() {
+return isSuccessful;
+}
+
+public void setSuccessful(boolean successful) {
+isSuccessful = successful;
+}
+
+public long getExecutionTime() {
+return executionTime;
+}
+
+public void setExecutionTime(long executionTime) {
+this.executionTime = executionTime;
+}
+
+public String getMessage() {
+return message;
+}
+
+public void setMessage(String message) {
+this.message = message;
+}
+
+public String getTestName() {
+return testName;
+}
+
+public void setTestName(String testName) {
+this.testName = testName;
+}
+
+}
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java 
b/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java
new file mode 100644
index 000..405f54f
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java
@@ -0,0 +1,477 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.tool;
+
+import co

[phoenix] branch 4.x-cdh5.16 created (now b129be9)

2019-05-28 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a change to branch 4.x-cdh5.16
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


  at b129be9  PHOENIX-5059 Use the Datasource v2 api in the spark connector

This branch includes the following new commits:

 new de4e041  PHOENIX-4781 Create artifact jar so that shaded jar replaces 
it properly
 new c360f87  ScanningResultIterator metric RowsScanned not set. 
PHOENIX-5051
 new 1d9073c  PHOENIX-4832: Add Canary Test Tool for Phoenix Query Server.
 new 622fcf4  PHOENIX-4763: Changing a base table property value should be 
reflected in child views (if the property wasn't changed)
 new dff179b  PHOENIX-5025 Tool to clean up orphan views
 new 4de622a  PHOENIX-4983: Allow using a connection with a SCN set to 
write data to tables EXCEPT transactional tables or mutable tables with indexes 
or tables with ROW_TIMESTAMP column.
 new 4db9a6f  PHOENIX-5048 Index Rebuilder does not handle INDEX_STATE 
timestamp check for all index
 new f530f94  PHOENIX-4983: Added missing apache license header.
 new bd4f52f  PHOENIX-5025 Tool to clean up orphan views (addendum)
 new 9c7ee72  PHOENIX-5070 NPE when upgrading Phoenix 4.13.0 to Phoenix 
4.14.1 with hbase-1.x branch in secure setup
 new 81f8503  [PHOENIX-3623] Integrate Omid with Phoenix.
 new 3f17a89  PHOENIX-5074 DropTableWithViewsIT.testDropTableWithChildViews 
is flapping
 new 82172a1  PHOENIX-5074; fix compilation failure.
 new 5873214  PHOENIX-5084 Changes from Transactional Tables are not 
visible to query in different client.
 new 460da61  PHOENIX-4820 Optimize OrderBy for ClientAggregatePlan
 new 0e5a263  PHOENIX-5055 Split mutations batches probably affects 
correctness of index data
 new cea84e8  Changes for CDH 5.16.x
 new b129be9  PHOENIX-5059 Use the Datasource v2 api in the spark connector

The 18 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.




[phoenix] 10/18: PHOENIX-5070 NPE when upgrading Phoenix 4.13.0 to Phoenix 4.14.1 with hbase-1.x branch in secure setup

2019-05-28 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 4.x-cdh5.16
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 9c7ee727aacce0b5d428160ed167345f8febf369
Author: Monani Mihir 
AuthorDate: Fri Dec 14 10:50:17 2018 +

PHOENIX-5070 NPE when upgrading Phoenix 4.13.0 to Phoenix 4.14.1 with 
hbase-1.x branch in secure setup
---
 .../java/org/apache/phoenix/coprocessor/PhoenixAccessController.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
index 62c158c..ef26d2c 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
@@ -406,7 +406,7 @@ public class PhoenixAccessController extends 
BaseMetaDataEndpointObserver {
 final List userPermissions = new 
ArrayList();
 try (Connection connection = 
ConnectionFactory.createConnection(env.getConfiguration())) {
 // Merge permissions from all accessController 
coprocessors loaded in memory
-for (BaseMasterAndRegionObserver service : 
accessControllers) {
+for (BaseMasterAndRegionObserver service : 
getAccessControllers()) {
 // Use AccessControlClient API's if the 
accessController is an instance of 
org.apache.hadoop.hbase.security.access.AccessController
 if 
(service.getClass().getName().equals(org.apache.hadoop.hbase.security.access.AccessController.class.getName()))
 {
 
userPermissions.addAll(AccessControlClient.getUserPermissions(connection, 
tableName.getNameAsString()));



[phoenix] 07/18: PHOENIX-5048 Index Rebuilder does not handle INDEX_STATE timestamp check for all index

2019-05-28 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 4.x-cdh5.16
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 4db9a6fb614a9d39130fe764adf52d92fb1ec8f7
Author: Monani Mihir 
AuthorDate: Fri Dec 14 12:45:55 2018 +

PHOENIX-5048 Index Rebuilder does not handle INDEX_STATE timestamp check 
for all index

Signed-off-by: Geoffrey Jacoby 
---
 .../coprocessor/MetaDataRegionObserver.java| 35 +-
 1 file changed, 21 insertions(+), 14 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
index 4968525..4045d47 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
@@ -512,20 +512,27 @@ public class MetaDataRegionObserver extends 
BaseRegionObserver {
String 
indexTableFullName = SchemaUtil.getTableName(

indexPTable.getSchemaName().getString(),

indexPTable.getTableName().getString());
-   if (scanEndTime 
== latestUpperBoundTimestamp) {
-   
IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.ACTIVE, 0L, 
latestUpperBoundTimestamp);
-   
batchExecutedPerTableMap.remove(dataPTable.getName());
-LOG.info("Making Index:" + 
indexPTable.getTableName() + " active after rebuilding");
-   } else {
-   // 
Increment timestamp so that client sees updated disable timestamp
-IndexUtil.updateIndexState(conn, 
indexTableFullName, indexPTable.getIndexState(), scanEndTime * 
signOfDisableTimeStamp, latestUpperBoundTimestamp);
-   Long 
noOfBatches = batchExecutedPerTableMap.get(dataPTable.getName());
-   if 
(noOfBatches == null) {
-   
noOfBatches = 0l;
-   }
-   
batchExecutedPerTableMap.put(dataPTable.getName(), ++noOfBatches);
-   
LOG.info("During Round-robin build: Successfully updated index disabled 
timestamp  for "
-   
+ indexTableFullName + " to " + scanEndTime);
+   try {
+   if 
(scanEndTime == latestUpperBoundTimestamp) {
+   
IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.ACTIVE, 0L,
+   
latestUpperBoundTimestamp);
+   
batchExecutedPerTableMap.remove(dataPTable.getName());
+   
LOG.info("Making Index:" + indexPTable.getTableName() + " active after 
rebuilding");
+   } else {
+   // 
Increment timestamp so that client sees updated disable timestamp
+   
IndexUtil.updateIndexState(conn, indexTableFullName, 
indexPTable.getIndexState(),
+   
scanEndTime * signOfDisableTimeStamp, latestUpperBoundTimestamp);
+   Long 
noOfBatches = batchExecutedPerTableMap.get(dataPTable.getName());
+   if 
(noOfBatches == null) {
+   
noOfBatches = 0l;
+   }
+

[phoenix] 06/18: PHOENIX-4983: Allow using a connection with a SCN set to write data to tables EXCEPT transactional tables or mutable tables with indexes or tables with ROW_TIMESTAMP column.

2019-05-28 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 4.x-cdh5.16
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 4de622ab30d3f5aeb755ffaf786ec9ec4cdd3ba1
Author: s.kadam 
AuthorDate: Mon Dec 10 22:40:17 2018 +

PHOENIX-4983: Allow using a connection with a SCN set to write data to 
tables EXCEPT transactional tables or mutable tables with indexes or tables 
with ROW_TIMESTAMP column.
---
 .../apache/phoenix/end2end/UpsertWithSCNIT.java| 139 +
 .../org/apache/phoenix/compile/UpsertCompiler.java |  23 +++-
 .../apache/phoenix/exception/SQLExceptionCode.java |  13 +-
 .../org/apache/phoenix/jdbc/PhoenixConnection.java |   2 +-
 4 files changed, 172 insertions(+), 5 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
new file mode 100644
index 000..6f231ff
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertWithSCNIT.java
@@ -0,0 +1,139 @@
+package org.apache.phoenix.end2end;
+
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.exception.SQLExceptionInfo;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import java.sql.Connection;
+import java.sql.Date;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Properties;
+
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.core.Is.is;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+
+public class UpsertWithSCNIT extends ParallelStatsDisabledIT {
+
+@Rule
+public final ExpectedException exception = ExpectedException.none();
+Properties props = null;
+PreparedStatement prep = null;
+String tableName =null;
+
+private void helpTestUpserWithSCNIT(boolean rowColumn, boolean txTable,
+boolean mutable, boolean local, 
boolean global)
+throws SQLException {
+
+tableName = generateUniqueName();
+String indx;
+String createTable = "CREATE TABLE "+tableName+" ("
++ (rowColumn ? "CREATED_DATE DATE NOT NULL, ":"")
++ "METRIC_ID CHAR(15) NOT NULL,METRIC_VALUE VARCHAR(50) 
CONSTRAINT PK PRIMARY KEY("
++ (rowColumn? "CREATED_DATE ROW_TIMESTAMP, ":"") + 
"METRIC_ID)) "
++ (mutable? "IMMUTABLE_ROWS=false":"" )
++ (txTable ? 
"TRANSACTION_PROVIDER='TEPHRA',TRANSACTIONAL=true":"");
+props = new Properties();
+Connection conn = DriverManager.getConnection(getUrl(), props);
+conn.createStatement().execute(createTable);
+
+if(local || global ){
+indx = "CREATE "+ (local? "LOCAL " : "") + "INDEX 
"+tableName+"_idx ON " +
+""+tableName+" (METRIC_VALUE)";
+conn.createStatement().execute(indx);
+}
+
+props.setProperty("CurrentSCN", 
Long.toString(System.currentTimeMillis()));
+conn = DriverManager.getConnection(getUrl(), props);
+conn.setAutoCommit(true);
+String upsert = "UPSERT INTO "+tableName+" (METRIC_ID, METRIC_VALUE) 
VALUES (?,?)";
+prep = conn.prepareStatement(upsert);
+prep.setString(1,"abc");
+prep.setString(2,"This is the first comment!");
+}
+
+@Test // See https://issues.apache.org/jira/browse/PHOENIX-4983
+public void testUpsertOnSCNSetTxnTable() throws SQLException {
+
+helpTestUpserWithSCNIT(false, true, false, false, false);
+exception.expect(SQLException.class);
+exception.expectMessage(containsString(String.valueOf(
+SQLExceptionCode
+.CANNOT_SPECIFY_SCN_FOR_TXN_TABLE
+.getErrorCode(;
+prep.executeUpdate();
+}
+
+@Test
+public void testUpsertOnSCNSetMutTableWithoutIdx() throws Exception {
+
+helpTestUpserWithSCNIT(false, false, true, false, false);
+prep.executeUpdate();
+props = new Properties();
+Connection conn = DriverManager.getConnection(getUrl(),props);
+ResultSet rs = conn.createStatement().executeQuery("SELECT * FROM 
"+tableName);
+assertTrue(rs.next());
+assertEquals("abc", rs.getString(1));
+assertEquals("This is the first comment!", rs.getString(2));
+assertFalse(rs.next());
+}
+
+@Test
+public void test

[phoenix] 14/18: PHOENIX-5084 Changes from Transactional Tables are not visible to query in different client.

2019-05-28 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 4.x-cdh5.16
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 58732144d28e1af4143b6554c0f01f7e1e0f1669
Author: Lars Hofhansl 
AuthorDate: Wed Jan 2 08:52:52 2019 +

PHOENIX-5084 Changes from Transactional Tables are not visible to query in 
different client.
---
 .../org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java | 12 
 1 file changed, 12 insertions(+)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index 3ff62e2..61ba0fc 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -718,6 +718,7 @@ public class PhoenixDatabaseMetaData implements 
DatabaseMetaData {
 @Override
 public ResultSet getColumns(String catalog, String schemaPattern, String 
tableNamePattern, String columnNamePattern)
 throws SQLException {
+try {
 boolean isTenantSpecificConnection = connection.getTenantId() != null;
 List tuples = Lists.newArrayListWithExpectedSize(10);
 ResultSet rs = getTables(catalog, schemaPattern, tableNamePattern, 
null);
@@ -893,6 +894,11 @@ public class PhoenixDatabaseMetaData implements 
DatabaseMetaData {
 }
 }
 return new PhoenixResultSet(new MaterializedResultIterator(tuples), 
GET_COLUMNS_ROW_PROJECTOR, new StatementContext(new 
PhoenixStatement(connection), false));
+} finally {
+if (connection.getAutoCommit()) {
+connection.commit();
+}
+}
 }
 
 @Override
@@ -1142,6 +1148,7 @@ public class PhoenixDatabaseMetaData implements 
DatabaseMetaData {
 if (tableName == null || tableName.length() == 0) {
 return emptyResultSet;
 }
+try {
 List tuples = Lists.newArrayListWithExpectedSize(10);
 ResultSet rs = getTables(catalog, schemaName, tableName, null);
 while (rs.next()) {
@@ -1219,6 +1226,11 @@ public class PhoenixDatabaseMetaData implements 
DatabaseMetaData {
 return new PhoenixResultSet(new MaterializedResultIterator(tuples),
 GET_PRIMARY_KEYS_ROW_PROJECTOR,
 new StatementContext(new PhoenixStatement(connection), false));
+} finally {
+if (connection.getAutoCommit()) {
+connection.commit();
+}
+}
 }
 
 @Override



[phoenix] branch 4.14-cdh5.14 updated (7e43ebb -> 98b689e)

2019-05-28 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a change to branch 4.14-cdh5.14
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from 7e43ebb  PHOENIX-5056 Ignore failing IT
 new 0de7307  PHOENIX-4872: BulkLoad has bug when loading on 
single-cell-array-with-offsets table.
 new 79ff982  modify index state based on client version to support old 
clients
 new 481fd38  PHOENIX-5126 RegionScanner leak leading to store files not 
getting cleared
 new 94379b7  PHOENIX-4900 Modify MAX_MUTATION_SIZE_EXCEEDED and 
MAX_MUTATION_SIZE_BYTES_EXCEEDED exception message to recommend turning 
autocommit on for deletes
 new 2302442  PHOENIX-5207 Create index if not exists fails incorrectly if 
table has 'maxIndexesPerTable' indexes already
 new e0a8b87  PHOENIX-5122: PHOENIX-4322 breaks client backward 
compatibility
 new 523bd30  PHOENIX-5217 Incorrect result for COUNT DISTINCT limit
 new 5c6955d  PHOENIX-5246: PhoenixAccessControllers.getAccessControllers() 
method is not correctly implementing the double-checked locking
 new cae2069  PHOENIX-5173: LIKE and ILIKE statements return empty result 
list for search without wildcard
 new de1f9b4  PHOENIX-5008: CQSI.init should not bubble up 
RetriableUpgradeException to client in case of an UpgradeRequiredException
 new 3be996d  PHOENIX-5008 (Addendum): CQSI.init should not bubble up 
RetriableUpgradeException to client in case of an UpgradeRequiredException
 new 0aa0a7e  PHOENIX-5005 Server-side delete / upsert-select potentially 
blocked after a split
 new 2b0d68d  PHOENIX-4750 Resolve server customizers and provide them to 
Avatica
 new 6d6ccea  PHOENIX-4755 Provide an option to plugin custom avatica 
server config in PQS
 new bac60e3  PHOENIX-3991 ROW_TIMESTAMP on TIMESTAMP column type throws 
ArrayOutOfBound when upserting without providing a value.
 new 151f816  PHOENIX-4834 PhoenixMetricsLog interface methods should not 
depend on specific logger
 new adde363  PHOENIX-4835 LoggingPhoenixConnection should log metrics upon 
connection close
 new 11ebb0f  PHOENIX-4853 Add sql statement to PhoenixMetricsLog interface 
for query level metrics logging
 new 685d9a0  PHOENIX-4854 Make LoggingPhoenixResultSet idempotent when 
logging metrics
 new 0ccb110  PHOENIX-4864 Fix NullPointerException while Logging some DDL 
Statements
 new db087e9  PHOENIX-4870 LoggingPhoenixConnection should log metrics when 
AutoCommit is set to True.
 new d1e234f  PHOENIX-4989 Include disruptor jar in shaded dependency
 new 06a94be  PHOENIX-4781 Create artifact jar so that shaded jar replaces 
it properly
 new 11375b9  PHOENIX-5048 Index Rebuilder does not handle INDEX_STATE 
timestamp check for all index
 new 017da22  PHOENIX-5070 NPE when upgrading Phoenix 4.13.0 to Phoenix 
4.14.1 with hbase-1.x branch in secure setup
 new 9f0616a  PHOENIX-5111: Null Pointer exception fix in index tool due to 
outputpath being null when direct option is supplied
 new 6b15799  PHOENIX-5094 increment pending disable count for index when 
rebuild starts
 new 04726ff  PHOENIX-4993 close cache connections when region server is 
going down
 new 68d956b  Add tenantId param to IndexTool
 new 7681bc1  PHOENIX-5080 Index becomes Active during Partial Index 
Rebuilder if Index Failure happens
 new dbc308e  PHOENIX-5025 Tool to clean up orphan views
 new 6dcf219  PHOENIX-5025 Tool to clean up orphan views (addendum)
 new 88e2ccf  PHOENIX-5247 DROP TABLE and DROP VIEW commands fail to drop 
second or higher level child views
 new 64437e8  PHOENIX-5137 check region close before commiting a batch for 
index rebuild
 new 5a66d58  PHOENIX-4832: Add Canary Test Tool for Phoenix Query Server.
 new 9f072eb  PHOENIX-5172: Harden the PQS canary synth test tool with 
retry mechanism and more logging
 new 654bb29  PHOENIX-5188 - IndexedKeyValue should populate KeyValue fields
 new 7c7ade4  PHOENIX-5124 PropertyPolicyProvider should not evaluate 
default hbase config properties
 new 4a32d77  PHOENIX-4822 Ensure the provided timezone is used client-side 
(Jaanai Zhang)
 new 51815e6  PHOENIX-4822 Fixed Spelling.
 new 06b7b9d  PHOENIX-5194 Thread Cache is not update for Index retries in 
for MutationState#send()#doMutation()
 new 9cb89e2  PHOENIX-5018 Index mutations created by UPSERT SELECT will 
have wrong timestamps
 new f7d3019  PHOENIX-5184: HBase and Phoenix connection leaks in Indexing 
code path, OrphanViewTool and PhoenixConfigurationUtil
 new 8ba7382  PhoenixResultSet#next() closes the result set if scanner 
returns null
 new 01e0e31  PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE
 new 736e2e4  PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE 
(Addendum)
 new 1f5bffa  Add missing license
 new 8e636a1  Set version to 4.14.2-cdh5.14
 new c18da31  PHOENIX-5195

[phoenix] 01/04: PHOENIX-4296: reverse scan in ChunkedResultIterator

2019-05-28 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 4.x-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit c98c89f2fbbc34817cef1e07d030c65e84cc1d66
Author: chfeng 
AuthorDate: Thu May 16 11:41:41 2019 +0100

PHOENIX-4296: reverse scan in ChunkedResultIterator
---
 .../phoenix/iterate/ChunkedResultIterator.java | 13 +++-
 .../phoenix/iterate/ChunkedResultIteratorTest.java | 73 ++
 2 files changed, 83 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
index acb6c04..1aab2d5 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
@@ -58,6 +58,7 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 
 private final ParallelIteratorFactory delegateIteratorFactory;
 private ImmutableBytesWritable lastKey = new ImmutableBytesWritable();
+private ImmutableBytesWritable prevLastKey = new ImmutableBytesWritable();
 private final StatementContext context;
 private final TableRef tableRef;
 private final long chunkSize;
@@ -96,8 +97,9 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 }
 }
 
-private ChunkedResultIterator(ParallelIteratorFactory 
delegateIteratorFactory, MutationState mutationState,
-   StatementContext context, TableRef tableRef, Scan scan, long 
chunkSize, ResultIterator scanner, QueryPlan plan) throws SQLException {
+private ChunkedResultIterator(ParallelIteratorFactory 
delegateIteratorFactory,
+MutationState mutationState, StatementContext context, TableRef 
tableRef, Scan scan,
+long chunkSize, ResultIterator scanner, QueryPlan plan) throws 
SQLException {
 this.delegateIteratorFactory = delegateIteratorFactory;
 this.context = context;
 this.tableRef = tableRef;
@@ -138,8 +140,12 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 if (resultIterator.peek() == null && lastKey != null) {
 resultIterator.close();
 scan = ScanUtil.newScan(scan);
-if(ScanUtil.isLocalIndex(scan)) {
+if (ScanUtil.isLocalIndex(scan)) {
 scan.setAttribute(SCAN_START_ROW_SUFFIX, 
ByteUtil.copyKeyBytesIfNecessary(lastKey));
+} else if (ScanUtil.isReversed(scan)) {
+// lastKey is the last row the previous iterator meet but not 
returned.
+// for reverse scan, use prevLastKey as the new stopRow.
+scan.setStopRow(ByteUtil.copyKeyBytesIfNecessary(prevLastKey));
 } else {
 scan.setStartRow(ByteUtil.copyKeyBytesIfNecessary(lastKey));
 }
@@ -212,6 +218,7 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 byte[] currentKey = lastKey.get();
 int offset = lastKey.getOffset();
 int length = lastKey.getLength();
+prevLastKey.set(lastKey.copyBytes());
 newTuple.getKey(lastKey);
 
 return Bytes.compareTo(currentKey, offset, length, lastKey.get(), 
lastKey.getOffset(), lastKey.getLength()) != 0;
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/iterate/ChunkedResultIteratorTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/iterate/ChunkedResultIteratorTest.java
new file mode 100644
index 000..18402f0
--- /dev/null
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/iterate/ChunkedResultIteratorTest.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.phoenix.iterate;
+
+import static org.apache.phoenix.util.TestUtil.PHOENIX_JDBC_URL;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.util.List;
+import j

[phoenix] 04/04: PHOENIX-5112 Simplify QueryPlan selection in Phoenix.

2019-05-28 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 4.x-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 68d1a04f1c80212a6fe6dd9574de9c34ad39b779
Author: Lars Hofhansl 
AuthorDate: Sat May 25 02:55:09 2019 +0100

PHOENIX-5112 Simplify QueryPlan selection in Phoenix.
---
 .../org/apache/phoenix/optimize/QueryOptimizer.java| 18 --
 1 file changed, 18 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java 
b/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
index 43a5950..4f0dfeb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
@@ -324,24 +324,6 @@ public class QueryOptimizer {
 
 QueryPlan plan = compiler.compile();
 
-boolean optimizedSort =
-plan.getOrderBy().getOrderByExpressions().isEmpty()
-&& 
!dataPlan.getOrderBy().getOrderByExpressions().isEmpty()
-|| plan.getGroupBy().isOrderPreserving()
-&& 
!dataPlan.getGroupBy().isOrderPreserving();
-
-// If query doesn't have where clause, or the planner didn't 
add any (bound) scan ranges, and some of
-// columns to project/filter are missing in the index then we 
need to get missing columns from main table
-// for each row in local index. It's like full scan of both 
local index and data table which is inefficient.
-// Then we don't use the index. If all the columns to project 
are present in the index 
-// then we can use the index even the query doesn't have where 
clause.
-// We'll use the index anyway if it allowed us to avoid a sort 
operation.
-if (index.getIndexType() == IndexType.LOCAL
-&& (indexSelect.getWhere() == null
-|| 
plan.getContext().getScanRanges().getBoundRanges().size() == 1)
-&& !plan.getContext().getDataColumns().isEmpty() && 
!optimizedSort) {
-return null;
-}
 indexTableRef = plan.getTableRef();
 indexTable = indexTableRef.getTable();
 indexState = indexTable.getIndexState();



[phoenix] 02/04: PHOENIX-5291 Ensure that Phoenix coprocessor close all scanners.

2019-05-28 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 4.x-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit a5f1b5c26b9d15c68244bc62faa57b70361d209b
Author: Lars Hofhansl 
AuthorDate: Thu May 23 06:40:34 2019 +0100

PHOENIX-5291 Ensure that Phoenix coprocessor close all scanners.
---
 .../coprocessor/UngroupedAggregateRegionObserver.java   | 17 ++---
 1 file changed, 14 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index dc7567b..dc61a98 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -1175,7 +1175,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 long rowCount = 0; // in case of async, we report 0 as number of rows 
updated
 StatisticsCollectionRunTracker statsRunTracker =
 StatisticsCollectionRunTracker.getInstance(config);
-boolean runUpdateStats = 
statsRunTracker.addUpdateStatsCommandRegion(region.getRegionInfo(),scan.getFamilyMap().keySet());
+final boolean runUpdateStats = 
statsRunTracker.addUpdateStatsCommandRegion(region.getRegionInfo(),scan.getFamilyMap().keySet());
 if (runUpdateStats) {
 if (!async) {
 rowCount = callable.call();
@@ -1204,8 +1204,11 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 
 @Override
 public void close() throws IOException {
-// No-op because we want to manage closing of the inner 
scanner ourselves.
-// This happens inside StatsCollectionCallable.
+// If we ran/scheduled StatsCollectionCallable the delegate
+// scanner is closed there. Otherwise close it here.
+if (!runUpdateStats) {
+super.close();
+}
 }
 
 @Override
@@ -1442,6 +1445,14 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 + fullTableName);
 Scan scan = new Scan();
 scan.setMaxVersions();
+
+// close the passed scanner since we are 
returning a brand-new one
+try {
+if (s != null) {
+s.close();
+}
+} catch (IOException ignore) {}
+
 return new StoreScanner(store, 
store.getScanInfo(), scan, scanners,
 ScanType.COMPACT_RETAIN_DELETES, 
store.getSmallestReadPoint(),
 HConstants.OLDEST_TIMESTAMP);



[phoenix] branch 4.x-HBase-1.2 updated (34ffbb9 -> 68d1a04)

2019-05-28 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a change to branch 4.x-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from 34ffbb9  PHOENIX-5231 Configurable Stats Cache
 new c98c89f  PHOENIX-4296: reverse scan in ChunkedResultIterator
 new a5f1b5c  PHOENIX-5291 Ensure that Phoenix coprocessor close all 
scanners.
 new 42511fb  PHOENIX-5297 POM cleanup and de-duplication
 new 68d1a04  PHOENIX-5112 Simplify QueryPlan selection in Phoenix.

The 4 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 phoenix-core/pom.xml   | 14 -
 .../UngroupedAggregateRegionObserver.java  | 17 -
 .../phoenix/iterate/ChunkedResultIterator.java | 13 +++-
 .../apache/phoenix/optimize/QueryOptimizer.java| 18 --
 .../phoenix/iterate/ChunkedResultIteratorTest.java | 73 ++
 phoenix-pherf/pom.xml  |  7 ---
 pom.xml|  8 +--
 7 files changed, 101 insertions(+), 49 deletions(-)
 create mode 100644 
phoenix-core/src/test/java/org/apache/phoenix/iterate/ChunkedResultIteratorTest.java



[phoenix] 03/04: PHOENIX-5297 POM cleanup and de-duplication

2019-05-28 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 4.x-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 42511fb880151962beb0005d2f514ac5c48acf33
Author: Josh Elser 
AuthorDate: Fri May 24 17:02:11 2019 +0100

PHOENIX-5297 POM cleanup and de-duplication

Signed-off-by: Geoffrey Jacoby 
---
 phoenix-core/pom.xml  | 14 --
 phoenix-pherf/pom.xml |  7 ---
 pom.xml   |  8 
 3 files changed, 4 insertions(+), 25 deletions(-)

diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 3aab0ed..99cab92 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -10,20 +10,6 @@
   Phoenix Core
   Core Phoenix codebase
 
-  
-  
-  The Apache Software License, Version 2.0
-  http://www.apache.org/licenses/LICENSE-2.0.txt
-  repo
-  
-  
-  
-
-  
-  Apache Software Foundation
-  http://www.apache.org
-  
-
   
 ${project.basedir}/..
 0.8.1
diff --git a/phoenix-pherf/pom.xml b/phoenix-pherf/pom.xml
index 8640b3a..6463c8f 100644
--- a/phoenix-pherf/pom.xml
+++ b/phoenix-pherf/pom.xml
@@ -27,13 +27,6 @@
org.apache.phoenix.shaded

 
-   
-   
-   apache release
-   
https://repository.apache.org/content/repositories/releases/
-   
-   
-


org.apache.phoenix
diff --git a/pom.xml b/pom.xml
index 83119ce..4ed9b89 100644
--- a/pom.xml
+++ b/pom.xml
@@ -11,7 +11,7 @@
   
 
   The Apache Software License, Version 2.0
-  http://www.apache.org/licenses/LICENSE-2.0.txt
+  https://www.apache.org/licenses/LICENSE-2.0.txt
   repo
   
 
@@ -19,7 +19,7 @@
 
   
 Apache Software Foundation
-http://www.apache.org
+https://www.apache.org
   
 
   
@@ -45,7 +45,7 @@
   
 
   
-
scm:git:http://git-wip-us.apache.org/repos/asf/phoenix.git
+
scm:git:https://git-wip-us.apache.org/repos/asf/phoenix.git
 https://git-wip-us.apache.org/repos/asf/phoenix.git
 
scm:git:https://git-wip-us.apache.org/repos/asf/phoenix.git
   
@@ -409,7 +409,7 @@
 
   true
   
-http://hbase.apache.org/apidocs/
+https://hbase.apache.org/apidocs/
   
 
 



[phoenix] 01/02: PHOENIX-4296: reverse scan in ChunkedResultIterator

2019-05-28 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 58083d70e1d774aa88283fb16945eb546c0e4f27
Author: chfeng 
AuthorDate: Thu May 16 11:41:41 2019 +0100

PHOENIX-4296: reverse scan in ChunkedResultIterator
---
 .../phoenix/iterate/ChunkedResultIterator.java | 13 +++-
 .../phoenix/iterate/ChunkedResultIteratorTest.java | 73 ++
 2 files changed, 83 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
index acb6c04..1aab2d5 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
@@ -58,6 +58,7 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 
 private final ParallelIteratorFactory delegateIteratorFactory;
 private ImmutableBytesWritable lastKey = new ImmutableBytesWritable();
+private ImmutableBytesWritable prevLastKey = new ImmutableBytesWritable();
 private final StatementContext context;
 private final TableRef tableRef;
 private final long chunkSize;
@@ -96,8 +97,9 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 }
 }
 
-private ChunkedResultIterator(ParallelIteratorFactory 
delegateIteratorFactory, MutationState mutationState,
-   StatementContext context, TableRef tableRef, Scan scan, long 
chunkSize, ResultIterator scanner, QueryPlan plan) throws SQLException {
+private ChunkedResultIterator(ParallelIteratorFactory 
delegateIteratorFactory,
+MutationState mutationState, StatementContext context, TableRef 
tableRef, Scan scan,
+long chunkSize, ResultIterator scanner, QueryPlan plan) throws 
SQLException {
 this.delegateIteratorFactory = delegateIteratorFactory;
 this.context = context;
 this.tableRef = tableRef;
@@ -138,8 +140,12 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 if (resultIterator.peek() == null && lastKey != null) {
 resultIterator.close();
 scan = ScanUtil.newScan(scan);
-if(ScanUtil.isLocalIndex(scan)) {
+if (ScanUtil.isLocalIndex(scan)) {
 scan.setAttribute(SCAN_START_ROW_SUFFIX, 
ByteUtil.copyKeyBytesIfNecessary(lastKey));
+} else if (ScanUtil.isReversed(scan)) {
+// lastKey is the last row the previous iterator meet but not 
returned.
+// for reverse scan, use prevLastKey as the new stopRow.
+scan.setStopRow(ByteUtil.copyKeyBytesIfNecessary(prevLastKey));
 } else {
 scan.setStartRow(ByteUtil.copyKeyBytesIfNecessary(lastKey));
 }
@@ -212,6 +218,7 @@ public class ChunkedResultIterator implements 
PeekingResultIterator {
 byte[] currentKey = lastKey.get();
 int offset = lastKey.getOffset();
 int length = lastKey.getLength();
+prevLastKey.set(lastKey.copyBytes());
 newTuple.getKey(lastKey);
 
 return Bytes.compareTo(currentKey, offset, length, lastKey.get(), 
lastKey.getOffset(), lastKey.getLength()) != 0;
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/iterate/ChunkedResultIteratorTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/iterate/ChunkedResultIteratorTest.java
new file mode 100644
index 000..18402f0
--- /dev/null
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/iterate/ChunkedResultIteratorTest.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.phoenix.iterate;
+
+import static org.apache.phoenix.util.TestUtil.PHOENIX_JDBC_URL;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.util.List;
+import j

[phoenix] 02/02: PHOENIX-5291 Ensure that Phoenix coprocessor close all scanners.

2019-05-28 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 6349f245e29ca54d773026a5563c43a2ab9e8264
Author: Lars Hofhansl 
AuthorDate: Thu May 23 06:40:34 2019 +0100

PHOENIX-5291 Ensure that Phoenix coprocessor close all scanners.
---
 .../coprocessor/UngroupedAggregateRegionObserver.java   | 17 ++---
 1 file changed, 14 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index f0ce5b2..72ee4a3 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -1158,7 +1158,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 long rowCount = 0; // in case of async, we report 0 as number of rows 
updated
 StatisticsCollectionRunTracker statsRunTracker =
 StatisticsCollectionRunTracker.getInstance(config);
-boolean runUpdateStats = 
statsRunTracker.addUpdateStatsCommandRegion(region.getRegionInfo(),scan.getFamilyMap().keySet());
+final boolean runUpdateStats = 
statsRunTracker.addUpdateStatsCommandRegion(region.getRegionInfo(),scan.getFamilyMap().keySet());
 if (runUpdateStats) {
 if (!async) {
 rowCount = callable.call();
@@ -1187,8 +1187,11 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 
 @Override
 public void close() throws IOException {
-// No-op because we want to manage closing of the inner 
scanner ourselves.
-// This happens inside StatsCollectionCallable.
+// If we ran/scheduled StatsCollectionCallable the delegate
+// scanner is closed there. Otherwise close it here.
+if (!runUpdateStats) {
+super.close();
+}
 }
 
 @Override
@@ -1425,6 +1428,14 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 + fullTableName);
 Scan scan = new Scan();
 scan.setMaxVersions();
+
+// close the passed scanner since we are 
returning a brand-new one
+try {
+if (s != null) {
+s.close();
+}
+} catch (IOException ignore) {}
+
 return new StoreScanner(store, 
store.getScanInfo(), scan, scanners,
 ScanType.COMPACT_RETAIN_DELETES, 
store.getSmallestReadPoint(),
 HConstants.OLDEST_TIMESTAMP);



[phoenix] branch 4.14-HBase-1.2 updated (0a7e93d -> 6349f24)

2019-05-28 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a change to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from 0a7e93d  PHOENIX-5055 Split mutations batches probably affects 
correctness of index data
 new 58083d7  PHOENIX-4296: reverse scan in ChunkedResultIterator
 new 6349f24  PHOENIX-5291 Ensure that Phoenix coprocessor close all 
scanners.

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../UngroupedAggregateRegionObserver.java  | 17 -
 .../phoenix/iterate/ChunkedResultIterator.java | 13 +++-
 .../phoenix/iterate/ChunkedResultIteratorTest.java | 73 ++
 3 files changed, 97 insertions(+), 6 deletions(-)
 create mode 100644 
phoenix-core/src/test/java/org/apache/phoenix/iterate/ChunkedResultIteratorTest.java



[phoenix] 05/12: PHOENIX-5235: Update SQLline version to the latest

2019-05-01 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit d55fc511616c1b2d33f79b8671549657e2c3e6de
Author: s.kadam 
AuthorDate: Fri Apr 19 23:05:27 2019 +0100

PHOENIX-5235: Update SQLline version to the latest
---
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/pom.xml b/pom.xml
index dd4dda9..a356365 100644
--- a/pom.xml
+++ b/pom.xml
@@ -98,7 +98,7 @@
 ${cdh.commons-lang3.version}
 
${cdh.commons-logging.version}
 1.0
-1.2.0
+1.7.0
 13.0.1
 ${cdh.flume-ng.version}
 ${cdh.kafka.version}



[phoenix] 07/12: PHOENIX-5181 support Math sin/cos/tan functions

2019-05-01 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 2f225e3680977d3b4e0ace578d6ccacb69994718
Author: Xinyi Yan 
AuthorDate: Thu Mar 7 18:48:57 2019 +

PHOENIX-5181 support Math sin/cos/tan functions
---
 .../phoenix/end2end/MathTrigFunctionEnd2EndIT.java |  94 +++
 .../apache/phoenix/expression/ExpressionType.java  |   3 +
 .../phoenix/expression/function/CosFunction.java   |  56 +++
 .../phoenix/expression/function/SinFunction.java   |  56 +++
 .../phoenix/expression/function/TanFunction.java   |  56 +++
 .../phoenix/expression/MathTrigFunctionTest.java   | 179 +
 6 files changed, 444 insertions(+)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MathTrigFunctionEnd2EndIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MathTrigFunctionEnd2EndIT.java
new file mode 100644
index 000..b4f2b4f
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MathTrigFunctionEnd2EndIT.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.util.TestUtil.closeStmtAndConn;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+
+import org.apache.phoenix.expression.function.CosFunction;
+import org.apache.phoenix.expression.function.SinFunction;
+import org.apache.phoenix.expression.function.TanFunction;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * End to end tests for
+ * {@link org.apache.phoenix.expression.function.CosFunction}
+ * {@link org.apache.phoenix.expression.function.SinFunction}
+ * {@link org.apache.phoenix.expression.function.TanFunction}
+ */
+
+public class MathTrigFunctionEnd2EndIT extends ParallelStatsDisabledIT {
+
+private static final String KEY = "key";
+private String tableName;
+
+@Before
+public void initTable() throws Exception {
+Connection conn = null;
+PreparedStatement stmt = null;
+tableName = generateUniqueName();
+
+try {
+conn = DriverManager.getConnection(getUrl());
+String ddl;
+ddl =
+"CREATE TABLE " + tableName + " (k VARCHAR NOT NULL 
PRIMARY KEY, doub DOUBLE)";
+conn.createStatement().execute(ddl);
+conn.commit();
+} finally {
+closeStmtAndConn(stmt, conn);
+}
+}
+
+private void updateTableSpec(Connection conn, double data, String 
tableName) throws Exception {
+PreparedStatement stmt =
+conn.prepareStatement("UPSERT INTO " + tableName + " VALUES 
(?, ?)");
+stmt.setString(1, KEY);
+stmt.setDouble(2, data);
+stmt.executeUpdate();
+conn.commit();
+}
+
+private void testNumberSpec(Connection conn, double data, String 
tableName) throws Exception {
+updateTableSpec(conn, data, tableName);
+ResultSet rs =
+conn.createStatement().executeQuery(
+"SELECT SIN(doub),COS(doub),TAN(doub) FROM " + 
tableName);
+assertTrue(rs.next());
+Double d = Double.valueOf(data);
+assertTrue(twoDoubleEquals(rs.getDouble(1), Math.sin(data)));
+assertTrue(twoDoubleEquals(rs.getDouble(2), Math.cos(data)));
+assertTrue(twoDoubleEquals(rs.getDouble(3), Math.tan(data)));
+
+assertTrue(!rs.next());
+}
+
+@Test
+public void test() throws Exception {
+Connection conn = DriverManager.getConnection(getUrl());
+for (double d : new double[] { 0.0, 1.0, -1.0, 123.1234, -123.1234 }) {
+testNumberSpec(conn, d, tableName);
+}
+}
+}
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/ExpressionType.java
index a18928c..8f36e23 100644
--- 
a/phoenix-core/src/main/java/org/apa

[phoenix] 01/12: PHOENIX-5246: PhoenixAccessControllers.getAccessControllers() method is not correctly implementing the double-checked locking

2019-05-01 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit f2d96590fa9be307f93ec56538c0ef1301d0ec9b
Author: s.kadam 
AuthorDate: Fri Apr 19 21:10:27 2019 +0100

PHOENIX-5246: PhoenixAccessControllers.getAccessControllers() method is not 
correctly implementing the double-checked locking
---
 .../java/org/apache/phoenix/coprocessor/PhoenixAccessController.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
index d07f4f7..1303363 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
@@ -77,7 +77,7 @@ import com.google.protobuf.RpcController;
 public class PhoenixAccessController extends BaseMetaDataEndpointObserver {
 
 private PhoenixMetaDataControllerEnvironment env;
-private ArrayList accessControllers;
+private volatile ArrayList accessControllers;
 private boolean accessCheckEnabled;
 private UserProvider userProvider;
 public static final Log LOG = 
LogFactory.getLog(PhoenixAccessController.class);



[phoenix] branch 5.x-cdh6 updated (f3e17d3 -> 14fcab4)

2019-05-01 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a change to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from f3e17d3  PHOENIX-5217 Incorrect result for COUNT DISTINCT limit
 new f2d9659  PHOENIX-5246: PhoenixAccessControllers.getAccessControllers() 
method is not correctly implementing the double-checked locking
 new cbca95d  PHOENIX-5173: LIKE and ILIKE statements return empty result 
list for search without wildcard
 new 5d37370  PhoenixResultSet#next() closes the result set if scanner 
returns null
 new 3ac1a48  PHOENIX-5187 Avoid using FileInputStream and FileOutputStream
 new d55fc51  PHOENIX-5235: Update SQLline version to the latest
 new 045e0bd  PHOENIX-5252 Add job priority option to UpdateStatisticsTool
 new 2f225e3  PHOENIX-5181 support Math sin/cos/tan functions
 new b66d81c  PHOENIX-5195 PHERF:- Handle batch failure in 
connection.commit() in WriteWorkload#upsertData
 new ffc630f  PHOENIX-5199 Pherf overrides user provided properties like 
dataloader threadpool, monitor frequency etc with pherf.properties
 new 30b73ed  PHOENIX-5168 IndexScrutinyTool to output to Table when that 
option is given
 new abe6dfe  PHOENIX-5251: Avoid taking explicit lock by using 
AtomicReference in PhoenixAccessController class
 new 14fcab4  PHOENIX-5213 Phoenix-client improvements: add more 
relocations, exclude log binding, add source jar

The 12 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 phoenix-assembly/pom.xml   |  22 +
 .../src/build/components/all-common-jars.xml   |   4 +-
 phoenix-client/pom.xml | 580 -
 .../phoenix/end2end/IndexScrutinyToolIT.java   |  38 +-
 .../apache/phoenix/end2end/LikeExpressionIT.java   |  24 +
 ...d2EndIT.java => MathTrigFunctionEnd2EndIT.java} |  57 +-
 .../org/apache/phoenix/end2end/QueryLoggerIT.java  | 193 +++
 .../apache/phoenix/cache/ServerCacheClient.java|   9 +-
 .../apache/phoenix/compile/ExpressionCompiler.java |   3 -
 .../coprocessor/PhoenixAccessController.java   |  23 +-
 .../apache/phoenix/expression/ExpressionType.java  |   3 +
 .../{ExpFunction.java => CosFunction.java} |  13 +-
 .../{ExpFunction.java => SinFunction.java} |  13 +-
 .../{ExpFunction.java => TanFunction.java} |  13 +-
 .../org/apache/phoenix/iterate/BufferedQueue.java  |   7 +-
 .../phoenix/iterate/SpoolingResultIterator.java|   4 +-
 .../org/apache/phoenix/jdbc/PhoenixResultSet.java  |   4 +-
 .../phoenix/mapreduce/index/IndexScrutinyTool.java |   4 +-
 .../phoenix/schema/stats/UpdateStatisticsTool.java |  32 +-
 .../apache/phoenix/compile/WhereOptimizerTest.java |   8 +-
 .../phoenix/expression/MathTrigFunctionTest.java   | 179 +++
 .../schema/stats/UpdateStatisticsToolTest.java |  15 +
 .../main/java/org/apache/phoenix/pherf/Pherf.java  |   2 +-
 .../phoenix/pherf/workload/WriteWorkload.java  |  36 +-
 pom.xml|   2 +-
 25 files changed, 834 insertions(+), 454 deletions(-)
 copy 
phoenix-core/src/it/java/org/apache/phoenix/end2end/{OctetLengthFunctionEnd2EndIT.java
 => MathTrigFunctionEnd2EndIT.java} (53%)
 copy 
phoenix-core/src/main/java/org/apache/phoenix/expression/function/{ExpFunction.java
 => CosFunction.java} (80%)
 copy 
phoenix-core/src/main/java/org/apache/phoenix/expression/function/{ExpFunction.java
 => SinFunction.java} (80%)
 copy 
phoenix-core/src/main/java/org/apache/phoenix/expression/function/{ExpFunction.java
 => TanFunction.java} (80%)
 create mode 100644 
phoenix-core/src/test/java/org/apache/phoenix/expression/MathTrigFunctionTest.java



[phoenix] 04/12: PHOENIX-5187 Avoid using FileInputStream and FileOutputStream

2019-05-01 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 3ac1a4830a8aa6140c2491dc2417551453baf590
Author: Aman Poonia 
AuthorDate: Mon Mar 11 17:44:23 2019 +

PHOENIX-5187 Avoid using FileInputStream and FileOutputStream
---
 .../main/java/org/apache/phoenix/cache/ServerCacheClient.java| 9 +
 .../src/main/java/org/apache/phoenix/iterate/BufferedQueue.java  | 7 +++
 .../java/org/apache/phoenix/iterate/SpoolingResultIterator.java  | 4 ++--
 3 files changed, 10 insertions(+), 10 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java 
b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
index 822e255..bb96637 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
@@ -22,9 +22,10 @@ import static 
org.apache.phoenix.util.LogUtil.addCustomAnnotations;
 
 import java.io.Closeable;
 import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
 import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.file.Files;
 import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -148,7 +149,7 @@ public class ServerCacheClient {
 } catch (InsufficientMemoryException e) {
 this.outputFile = 
File.createTempFile("HashJoinCacheSpooler", ".bin", new File(services.getProps()
 .get(QueryServices.SPOOL_DIRECTORY, 
QueryServicesOptions.DEFAULT_SPOOL_DIRECTORY)));
-try (FileOutputStream fio = new 
FileOutputStream(outputFile)) {
+try (OutputStream fio = 
Files.newOutputStream(outputFile.toPath())) {
 fio.write(cachePtr.get(), cachePtr.getOffset(), 
cachePtr.getLength());
 }
 }
@@ -158,7 +159,7 @@ public class ServerCacheClient {
 
 public ImmutableBytesWritable getCachePtr() throws IOException {
 if(this.outputFile!=null){
-try (FileInputStream fio = new FileInputStream(outputFile)) {
+try (InputStream fio = 
Files.newInputStream(outputFile.toPath())) {
 byte[] b = new byte[this.size];
 fio.read(b);
 cachePtr = new ImmutableBytesWritable(b);
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BufferedQueue.java 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BufferedQueue.java
index 1a646e6..3352641 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BufferedQueue.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BufferedQueue.java
@@ -23,9 +23,8 @@ import java.io.Closeable;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
 import java.io.IOException;
+import java.nio.file.Files;
 import java.util.AbstractQueue;
 import java.util.Comparator;
 import java.util.Iterator;
@@ -304,7 +303,7 @@ public abstract class BufferedQueue extends 
AbstractQueue implements SizeA
 if (totalResultSize >= thresholdBytes) {
 this.file = File.createTempFile(UUID.randomUUID().toString(), 
null);
 try (DataOutputStream out = new DataOutputStream(
-new BufferedOutputStream(new FileOutputStream(file 
{
+new 
BufferedOutputStream(Files.newOutputStream(file.toPath() {
 int resSize = inMemQueue.size();
 for (int i = 0; i < resSize; i++) {
 T e = inMemQueue.poll();
@@ -342,7 +341,7 @@ public abstract class BufferedQueue extends 
AbstractQueue implements SizeA
 this.next = null;
 try {
 this.in = new DataInputStream(
-new BufferedInputStream(new 
FileInputStream(file)));
+new 
BufferedInputStream(Files.newInputStream(file.toPath(;
 } catch (IOException e) {
 throw new RuntimeException(e);
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SpoolingResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SpoolingResultIterator.java
index fa90b1a..0823026 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SpoolingResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SpoolingResultIterator.java
@@ -27,8 +27,8 @@ import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.EOFException;
 import java.io.File;
-import java.io.FileInputStream;
 import ja

[phoenix] 02/12: PHOENIX-5173: LIKE and ILIKE statements return empty result list for search without wildcard

2019-05-01 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit cbca95d870abe1e831f343263bcf750be330198f
Author: s.kadam 
AuthorDate: Fri Apr 19 23:53:54 2019 +0100

PHOENIX-5173: LIKE and ILIKE statements return empty result list for search 
without wildcard
---
 .../apache/phoenix/end2end/LikeExpressionIT.java   | 24 ++
 .../apache/phoenix/compile/ExpressionCompiler.java |  3 ---
 .../apache/phoenix/compile/WhereOptimizerTest.java |  8 ++--
 3 files changed, 30 insertions(+), 5 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/LikeExpressionIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LikeExpressionIT.java
index 0b061d5..65d55cc 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/LikeExpressionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LikeExpressionIT.java
@@ -430,4 +430,28 @@ public class LikeExpressionIT extends 
ParallelStatsDisabledIT {
 rs = select.executeQuery();
 assertFalse(rs.next());
 }
+//associated to PHOENIX-5173 jira
+@Test
+public void testLikeExpressionWithoutWildcards() throws Exception {
+String table = generateUniqueName();
+final String createTable = "CREATE TABLE "
++ table + " (ID BIGINT NOT NULL PRIMARY KEY, USER_NAME 
VARCHAR(255))";
+final String upsertTable = "UPSERT INTO " + table + " VALUES(1, 'Some 
Name')";
+String likeSelect = "SELECT * FROM " + table + " WHERE USER_NAME LIKE 
'Some Name'";
+String iLikeSelect = "SELECT * FROM " + table + " WHERE USER_NAME 
ILIKE 'soMe nAme'";
+
+try(Connection conn = DriverManager.getConnection(getUrl())) {
+conn.setAutoCommit(true);
+conn.createStatement().execute(createTable);
+conn.createStatement().executeUpdate(upsertTable);
+try(ResultSet rs = 
conn.createStatement().executeQuery(likeSelect)) {
+assertTrue(rs.next());
+assertFalse(rs.next());
+}
+try(ResultSet rs = 
conn.createStatement().executeQuery(iLikeSelect)) {
+assertTrue(rs.next());
+assertFalse(rs.next());
+}
+}
+}
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
index 3b0f6d7..807c2e2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
@@ -507,9 +507,6 @@ public class ExpressionCompiler extends 
UnsupportedAllParseNodeVisitor binds = Arrays.asList(tenantId);
 StatementContext context = compileStatement(query, binds);
 Scan scan = context.getScan();
-assertDegenerate(scan);
+byte[] startRow = ByteUtil.concat(
+
PVarchar.INSTANCE.toBytes(tenantId),StringUtil.padChar(PVarchar.INSTANCE.toBytes(keyPrefix),15));
+assertArrayEquals(startRow, scan.getStartRow());
+byte[] stopRow = ByteUtil.nextKey(startRow);
+assertArrayEquals(stopRow, scan.getStopRow());
 }
 
 @Test



[phoenix] 03/12: PhoenixResultSet#next() closes the result set if scanner returns null

2019-05-01 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 5d37370c7c48558f924dce32a6f2c9c5dd52efe6
Author: s.kadam 
AuthorDate: Thu Apr 18 22:05:21 2019 +0100

PhoenixResultSet#next() closes the result set if scanner returns null
---
 .../org/apache/phoenix/end2end/QueryLoggerIT.java  | 193 +++--
 .../org/apache/phoenix/jdbc/PhoenixResultSet.java  |   4 +-
 2 files changed, 102 insertions(+), 95 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java
index 208eddd..8a08d37 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java
@@ -103,44 +103,47 @@ public class QueryLoggerIT extends 
BaseUniqueNamesOwnClusterIT {
 Connection conn = DriverManager.getConnection(getUrl(),props);
 
assertEquals(conn.unwrap(PhoenixConnection.class).getLogLevel(),LogLevel.DEBUG);
 String query = "SELECT * FROM " + tableName;
-ResultSet rs = conn.createStatement().executeQuery(query);
-StatementContext context = ((PhoenixResultSet)rs).getContext();
-String queryId = context.getQueryLogger().getQueryId();
-while (rs.next()) {
-rs.getString(1);
-rs.getString(2);
+StatementContext context;
+try (ResultSet rs = conn.createStatement().executeQuery(query)) {
+context = ((PhoenixResultSet) rs).getContext();
+while (rs.next()) {
+rs.getString(1);
+rs.getString(2);
+}
 }
-ResultSet explainRS = conn.createStatement().executeQuery("Explain " + 
query);
+String queryId = context.getQueryLogger().getQueryId();
 
 String logQuery = "SELECT * FROM " + SYSTEM_CATALOG_SCHEMA + ".\"" + 
SYSTEM_LOG_TABLE + "\"";
 int delay = 5000;
 
 // sleep for sometime to let query log committed
 Thread.sleep(delay);
-rs = conn.createStatement().executeQuery(logQuery);
-boolean foundQueryLog = false;
+try (ResultSet explainRS = 
conn.createStatement().executeQuery("Explain " + query);
+ ResultSet rs = conn.createStatement().executeQuery(logQuery)) {
+boolean foundQueryLog = false;
 
-while (rs.next()) {
-if (rs.getString(QUERY_ID).equals(queryId)) {
-foundQueryLog = true;
-assertEquals(rs.getString(BIND_PARAMETERS), null);
-assertEquals(rs.getString(USER), 
System.getProperty("user.name"));
-assertEquals(rs.getString(CLIENT_IP), 
InetAddress.getLocalHost().getHostAddress());
-assertEquals(rs.getString(EXPLAIN_PLAN), 
QueryUtil.getExplainPlan(explainRS));
-assertEquals(rs.getString(GLOBAL_SCAN_DETAILS), 
context.getScan().toJSON());
-assertEquals(rs.getLong(NO_OF_RESULTS_ITERATED), 10);
-assertEquals(rs.getString(QUERY), query);
-assertEquals(rs.getString(QUERY_STATUS), 
QueryStatus.COMPLETED.toString());
-assertEquals(rs.getString(TENANT_ID), null);
-assertTrue(rs.getString(SCAN_METRICS_JSON)==null);
-assertEquals(rs.getString(EXCEPTION_TRACE),null);
-}else{
-//confirm we are not logging system queries
-
assertFalse(rs.getString(QUERY).toString().contains(SYSTEM_CATALOG_SCHEMA));
+while (rs.next()) {
+if (rs.getString(QUERY_ID).equals(queryId)) {
+foundQueryLog = true;
+assertEquals(rs.getString(BIND_PARAMETERS), null);
+assertEquals(rs.getString(USER), 
System.getProperty("user.name"));
+assertEquals(rs.getString(CLIENT_IP), 
InetAddress.getLocalHost().getHostAddress());
+assertEquals(rs.getString(EXPLAIN_PLAN), 
QueryUtil.getExplainPlan(explainRS));
+assertEquals(rs.getString(GLOBAL_SCAN_DETAILS), 
context.getScan().toJSON());
+assertEquals(rs.getLong(NO_OF_RESULTS_ITERATED), 10);
+assertEquals(rs.getString(QUERY), query);
+assertEquals(rs.getString(QUERY_STATUS), 
QueryStatus.COMPLETED.toString());
+assertEquals(rs.getString(TENANT_ID), null);
+assertTrue(rs.getString(SCAN_METRICS_JSON) == null);
+assertEquals(rs.getString(EXCEPTION_TRACE), null);
+} else {
+//confirm we are not logging system queries
+
assertFalse(rs.getString(QUERY).toString().contains(SYSTEM

[phoenix] 12/12: PHOENIX-5213 Phoenix-client improvements: add more relocations, exclude log binding, add source jar

2019-05-01 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 14fcab420f1acc8f4bbc10760f7feb63f735a676
Author: Vincent Poon 
AuthorDate: Sat Apr 27 01:31:51 2019 +0100

PHOENIX-5213 Phoenix-client improvements: add more relocations, exclude log 
binding, add source jar
---
 phoenix-assembly/pom.xml   |  22 +
 .../src/build/components/all-common-jars.xml   |   4 +-
 phoenix-client/pom.xml | 580 -
 3 files changed, 346 insertions(+), 260 deletions(-)

diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index f147ad0..0356040 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -42,6 +42,28 @@
 
   
 
+  
+exec-maven-plugin
+org.codehaus.mojo
+
+  
+Symlink to deprecated client jar name
+compile
+
+  exec
+
+
+  ln
+  
${project.basedir}/../phoenix-client/target
+  
+-fnsv
+phoenix-client-${project.version}.jar
+phoenix-${project.version}-client.jar
+  
+
+  
+
+  
   
   
 org.apache.maven.plugins
diff --git a/phoenix-assembly/src/build/components/all-common-jars.xml 
b/phoenix-assembly/src/build/components/all-common-jars.xml
index 08ca29a..d7e8735 100644
--- a/phoenix-assembly/src/build/components/all-common-jars.xml
+++ b/phoenix-assembly/src/build/components/all-common-jars.xml
@@ -27,7 +27,9 @@
   ${project.basedir}/../phoenix-client/target
   /
   
-phoenix-*-client.jar
+phoenix-client-${project.version}.jar
+
+phoenix-${project.version}-client.jar
   
 
 
diff --git a/phoenix-client/pom.xml b/phoenix-client/pom.xml
index fb0712b..c1bc549 100644
--- a/phoenix-client/pom.xml
+++ b/phoenix-client/pom.xml
@@ -57,69 +57,340 @@
   
 org.apache.maven.plugins
 maven-jar-plugin
-
-  phoenix-${project.version}-client
-
   
+
   
 org.apache.maven.plugins
-maven-install-plugin
+maven-shade-plugin
+
+  
+
+
+  csv-bulk-load-config.properties
+  
+${project.basedir}/../config/csv-bulk-load-config.properties
+  
+
+
+  README.md
+  ${project.basedir}/../README.md
+
+
+  LICENSE.txt
+  ${project.basedir}/../LICENSE
+
+
+  NOTICE
+  ${project.basedir}/../NOTICE
+
+  
+  
+
+
+
+  com.beust.jcommander
+  
${shaded.package}.com.beust.jcommander
+
+
+  com.codahale
+  ${shaded.package}.com.codahale
+
+
+  com.fasterxml
+  ${shaded.package}.com.fasterxml
+
+
+  com.google
+  ${shaded.package}.com.google
+
+
+  com.jamesmurty
+  ${shaded.package}.com.jamesmurty
+
+
+  com.jcraft
+  ${shaded.package}.com.jcraft
+
+
+  com.lmax
+  ${shaded.package}.com.lmax
+
+
+  com.sun.xml
+  ${shaded.package}.com.sun.xml
+
+
+  com.sun.istack
+  ${shaded.package}.com.sun.istack
+
+
+  com.sun.research
+  ${shaded.package}.com.sun.research
+
+
+  com.sun.activation
+  
${shaded.package}.com.sun.activation
+
+
+  com.thoughtworks
+  ${shaded.package}.com.thoughtworks
+
+
+  com.yammer
+  ${shaded.package}.com.yammer
+
+
+
+
+  io.netty
+  ${shaded.package}.io.netty
+
+
+
+
+  org.antlr
+  ${shaded.package}.org.antlr
+
+
+  org.aopalliance
+  ${shaded.package}.org.aopalliance
+
+
+  org.codehaus
+  ${shaded.package}.org.codehaus
+
+
+  org.fusesource
+  ${shaded.package}.org.fusesource
+
+
+  org.hamcrest
+  ${shaded.package}.org.hamcrest
+
+
+  org.hsqldb
+  ${shaded.package}.org.hsqldb

[phoenix] 10/12: PHOENIX-5168 IndexScrutinyTool to output to Table when that option is given

2019-05-01 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 30b73ed2f2d796fc9113f7b8847046b36124bc49
Author: Gokcen Iskender 
AuthorDate: Wed Apr 24 21:16:34 2019 +0100

PHOENIX-5168 IndexScrutinyTool to output to Table when that option is given
---
 .../phoenix/end2end/IndexScrutinyToolIT.java   | 38 ++
 .../phoenix/mapreduce/index/IndexScrutinyTool.java |  4 ++-
 2 files changed, 27 insertions(+), 15 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexScrutinyToolIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexScrutinyToolIT.java
index 046c3f0..72857e7 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexScrutinyToolIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexScrutinyToolIT.java
@@ -10,6 +10,7 @@
  */
 package org.apache.phoenix.end2end;
 
+import static 
org.apache.phoenix.mapreduce.index.IndexScrutinyTableOutput.OUTPUT_TABLE_NAME;
 import static 
org.apache.phoenix.mapreduce.index.PhoenixScrutinyJobCounters.BAD_COVERED_COL_VAL_COUNT;
 import static 
org.apache.phoenix.mapreduce.index.PhoenixScrutinyJobCounters.BATCHES_PROCESSED_COUNT;
 import static 
org.apache.phoenix.mapreduce.index.PhoenixScrutinyJobCounters.INVALID_ROW_COUNT;
@@ -172,6 +173,14 @@ public class IndexScrutinyToolIT {
 protected long getCounterValue(Counters counters, 
Enum counter) {
 return counters.findCounter(counter).getValue();
 }
+
+protected int countRows(Connection conn, String tableFullName) throws 
SQLException {
+ResultSet count = conn.createStatement().executeQuery("select 
count(*) from " + tableFullName);
+count.next();
+int numRows = count.getInt(1);
+return numRows;
+}
+
 }
 
 @RunWith(Parameterized.class) public static class 
IndexScrutinyToolNonTenantIT extends SharedIndexToolIT {
@@ -247,8 +256,8 @@ public class IndexScrutinyToolIT {
 upsertRow(dataTableUpsertStmt, 2, "name-2", 95123);
 conn.commit();
 
-int numDataRows = countRows(dataTableFullName);
-int numIndexRows = countRows(indexTableFullName);
+int numDataRows = countRows(conn, dataTableFullName);
+int numIndexRows = countRows(conn, indexTableFullName);
 
 // scrutiny should report everything as ok
 List completedJobs = runScrutiny(schemaName, dataTableName, 
indexTableName);
@@ -259,8 +268,8 @@ public class IndexScrutinyToolIT {
 assertEquals(0, getCounterValue(counters, INVALID_ROW_COUNT));
 
 // make sure row counts weren't modified by scrutiny
-assertEquals(numDataRows, countRows(dataTableFullName));
-assertEquals(numIndexRows, countRows(indexTableFullName));
+assertEquals(numDataRows, countRows(conn, dataTableFullName));
+assertEquals(numIndexRows, countRows(conn, indexTableFullName));
 }
 
 /**
@@ -405,7 +414,7 @@ public class IndexScrutinyToolIT {
 deleteRow(indexTableFullName, "WHERE \":ID\"=" + idToDelete);
 }
 conn.commit();
-int numRows = countRows(indexTableFullName);
+int numRows = countRows(conn, indexTableFullName);
 int numDeleted = numTestRows - numRows;
 
 // run scrutiny with batch size of 10
@@ -683,13 +692,6 @@ public class IndexScrutinyToolIT {
 indexTableFullName = SchemaUtil.getTableName(schemaName, 
indexTableName);
 }
 
-private int countRows(String tableFullName) throws SQLException {
-ResultSet count = conn.createStatement().executeQuery("select 
count(*) from " + tableFullName);
-count.next();
-int numRows = count.getInt(1);
-return numRows;
-}
-
 private void upsertIndexRow(String name, int id, int zip) throws 
SQLException {
 indexTableUpsertStmt.setString(1, name);
 indexTableUpsertStmt.setInt(2, id); // id
@@ -898,9 +900,17 @@ public class IndexScrutinyToolIT {
 * Add 3 rows to Tenant view.
 * Empty index table and observe they are not equal.
 * Use data table as source and output to file.
-* Output to table doesn't work for tenantid connection because it 
can't create the scrutiny table as tenant.
 **/
 @Test public void testWithEmptyIndexTableOutputToFile() throws 
Exception{
+testWithOutput(OutputFormat.FILE);
+}
+
+@Test public void testWithEmptyIndexTableOutputToTable() throws 
Exception{
+testWithOutput(OutputFormat.TABLE);
+assertEquals(3, countRows(connGlobal, OUTPUT_TABLE_NAME));
+}
+
+private void testWithO

[phoenix] 08/12: PHOENIX-5195 PHERF:- Handle batch failure in connection.commit() in WriteWorkload#upsertData

2019-05-01 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit b66d81c8b55f003db70dd146f891e5e3602d926e
Author: Monani Mihir 
AuthorDate: Fri Mar 15 08:26:35 2019 +

PHOENIX-5195 PHERF:- Handle batch failure in connection.commit() in 
WriteWorkload#upsertData
---
 .../phoenix/pherf/workload/WriteWorkload.java  | 32 --
 1 file changed, 18 insertions(+), 14 deletions(-)

diff --git 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java
 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java
index 4023383..c482b3f 100644
--- 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java
+++ 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java
@@ -24,7 +24,6 @@ import java.sql.Connection;
 import java.sql.Date;
 import java.sql.PreparedStatement;
 import java.sql.SQLException;
-import java.sql.Timestamp;
 import java.sql.Types;
 import java.text.SimpleDateFormat;
 import java.util.ArrayList;
@@ -35,7 +34,6 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.pherf.PherfConstants;
 import org.apache.phoenix.pherf.PherfConstants.GeneratePhoenixStats;
 import org.apache.phoenix.pherf.configuration.Column;
@@ -294,26 +292,32 @@ public class WriteWorkload implements Workload {
 rowsCreated += result;
 }
 }
-connection.commit();
-duration = System.currentTimeMillis() - last;
-logger.info("Writer (" + 
Thread.currentThread().getName()
-+ ") committed Batch. Total " + 
getBatchSize()
-+ " rows for this thread (" + 
this.hashCode() + ") in ("
-+ duration + ") Ms");
-
-if (i % PherfConstants.LOG_PER_NROWS == 0 && i != 
0) {
-dataLoadThreadTime
-.add(tableName, 
Thread.currentThread().getName(), i,
-System.currentTimeMillis() - 
logStartTime);
-logStartTime = System.currentTimeMillis();
+try {
+connection.commit();
+duration = System.currentTimeMillis() - last;
+logger.info("Writer (" + 
Thread.currentThread().getName()
++ ") committed Batch. Total " + 
getBatchSize()
++ " rows for this thread (" + 
this.hashCode() + ") in ("
++ duration + ") Ms");
+
+if (i % PherfConstants.LOG_PER_NROWS == 0 && i 
!= 0) {
+dataLoadThreadTime.add(tableName,
+Thread.currentThread().getName(), i,
+System.currentTimeMillis() - 
logStartTime);
+}
+} catch (SQLException e) {
+logger.warn("SQLException in commit 
operation", e);
 }
 
+logStartTime = System.currentTimeMillis();
 // Pause for throttling if configured to do so
 Thread.sleep(threadSleepDuration);
 // Re-compute the start time for the next batch
 last = System.currentTimeMillis();
 }
 }
+} catch (SQLException e) {
+throw e;
 } finally {
 // Need to keep the statement open to send the remaining 
batch of updates
 if (!useBatchApi && stmt != null) {



[phoenix] 06/12: PHOENIX-5252 Add job priority option to UpdateStatisticsTool

2019-05-01 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 045e0bdcbea71907d62871c8bd496e9e23a1fec0
Author: Xinyi Yan 
AuthorDate: Sat Apr 20 01:25:02 2019 +0100

PHOENIX-5252 Add job priority option to UpdateStatisticsTool
---
 .../phoenix/schema/stats/UpdateStatisticsTool.java | 32 +-
 .../schema/stats/UpdateStatisticsToolTest.java | 15 ++
 2 files changed, 46 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/UpdateStatisticsTool.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/UpdateStatisticsTool.java
index 88b0f0a..110682d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/UpdateStatisticsTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/UpdateStatisticsTool.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.metrics.Gauge;
 import org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobPriority;
 import org.apache.hadoop.mapreduce.Mapper;
 import org.apache.hadoop.mapreduce.lib.db.DBInputFormat.NullDBWritable;
 import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
@@ -78,6 +79,8 @@ public class UpdateStatisticsTool extends Configured 
implements Tool {
 "HBase Snapshot Name");
 private static final Option RESTORE_DIR_OPTION = new Option("d", 
"restore-dir", true,
 "Restore Directory for HBase snapshot");
+private static final Option JOB_PRIORITY_OPTION = new Option("p", 
"job-priority", true,
+"Define job priority from 0(highest) to 4");
 private static final Option RUN_FOREGROUND_OPTION =
 new Option("runfg", "run-foreground", false,
 "If specified, runs UpdateStatisticsTool in Foreground. 
Default - Runs the build in background");
@@ -90,6 +93,7 @@ public class UpdateStatisticsTool extends Configured 
implements Tool {
 private String tableName;
 private String snapshotName;
 private Path restoreDir;
+private JobPriority jobPriority;
 private boolean manageSnapshot;
 private boolean isForeground;
 
@@ -164,12 +168,35 @@ public class UpdateStatisticsTool extends Configured 
implements Tool {
 if (restoreDirOptionValue == null) {
 restoreDirOptionValue = getConf().get(FS_DEFAULT_NAME_KEY) + 
"/tmp";
 }
-
+
+jobPriority = getJobPriority(cmdLine);
+
 restoreDir = new Path(restoreDirOptionValue);
 manageSnapshot = cmdLine.hasOption(MANAGE_SNAPSHOT_OPTION.getOpt());
 isForeground = cmdLine.hasOption(RUN_FOREGROUND_OPTION.getOpt());
 }
 
+public String getJobPriority() {
+return this.jobPriority.toString();
+}
+
+private JobPriority getJobPriority(CommandLine cmdLine) {
+String jobPriorityOption = 
cmdLine.getOptionValue(JOB_PRIORITY_OPTION.getOpt());
+ if (jobPriorityOption == null) {
+ return JobPriority.NORMAL;
+ }
+
+ switch (jobPriorityOption) {
+ case "0" : return JobPriority.VERY_HIGH;
+ case "1" : return JobPriority.HIGH;
+ case "2" : return JobPriority.NORMAL;
+ case "3" : return JobPriority.LOW;
+ case "4" : return JobPriority.VERY_LOW;
+ default:
+ return JobPriority.NORMAL;
+ }
+}
+
 private void configureJob() throws Exception {
 job = Job.getInstance(getConf(),
 "UpdateStatistics-" + tableName + "-" + snapshotName);
@@ -187,6 +214,8 @@ public class UpdateStatisticsTool extends Configured 
implements Tool {
 job.setMapOutputValueClass(NullWritable.class);
 job.setOutputFormatClass(NullOutputFormat.class);
 job.setNumReduceTasks(0);
+job.setPriority(this.jobPriority);
+
 TableMapReduceUtil.addDependencyJars(job);
 TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), 
PhoenixConnection.class, Chronology.class,
 CharStream.class, TransactionSystemClient.class, 
TransactionNotInProgressException.class,
@@ -265,6 +294,7 @@ public class UpdateStatisticsTool extends Configured 
implements Tool {
 options.addOption(SNAPSHOT_NAME_OPTION);
 options.addOption(HELP_OPTION);
 options.addOption(RESTORE_DIR_OPTION);
+options.addOption(JOB_PRIORITY_OPTION);
 options.addOption(RUN_FOREGROUND_OPTION);
 options.addOption(MANAGE_SNAPSHOT_OPTION);
 return options;
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/schem

[phoenix] 09/12: PHOENIX-5199 Pherf overrides user provided properties like dataloader threadpool, monitor frequency etc with pherf.properties

2019-05-01 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit ffc630f8d9cade551bddba05a69c40e4f89331fa
Author: Monani Mihir 
AuthorDate: Fri Mar 15 11:18:12 2019 +

PHOENIX-5199 Pherf overrides user provided properties like dataloader 
threadpool, monitor frequency etc with pherf.properties
---
 phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java   | 2 +-
 .../main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java| 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
index 43061e0..d92ffde 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
@@ -156,7 +156,7 @@ public class Pherf {
 writerThreadPoolSize =
 command.getOptionValue("writerThreadSize",
 
properties.getProperty("pherf.default.dataloader.threadpool"));
-properties.setProperty("pherf. default.dataloader.threadpool", 
writerThreadPoolSize);
+properties.setProperty("pherf.default.dataloader.threadpool", 
writerThreadPoolSize);
 label = command.getOptionValue("label", null);
 compareResults = command.getOptionValue("compare", null);
 compareType = command.hasOption("useAverageCompareType") ? 
CompareType.AVERAGE : CompareType.MINIMUM;
diff --git 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java
 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java
index c482b3f..b340a2b 100644
--- 
a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java
+++ 
b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java
@@ -84,8 +84,8 @@ public class WriteWorkload implements Workload {
 
 public WriteWorkload(PhoenixUtil phoenixUtil, XMLConfigParser parser, 
Scenario scenario, GeneratePhoenixStats generateStatistics)
 throws Exception {
-this(phoenixUtil, 
PherfConstants.create().getProperties(PherfConstants.PHERF_PROPERTIES,
-false),
+this(phoenixUtil,
+
PherfConstants.create().getProperties(PherfConstants.PHERF_PROPERTIES, true),
 parser, scenario, generateStatistics);
 }
 



[phoenix] 11/12: PHOENIX-5251: Avoid taking explicit lock by using AtomicReference in PhoenixAccessController class

2019-05-01 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit abe6dfec042f96fc0ce66ce923ef3aaf5c84826a
Author: s.kadam 
AuthorDate: Fri Apr 26 23:41:30 2019 +0100

PHOENIX-5251: Avoid taking explicit lock by using AtomicReference in 
PhoenixAccessController class

Signed-off-by: Geoffrey Jacoby 
---
 .../coprocessor/PhoenixAccessController.java   | 23 +++---
 1 file changed, 11 insertions(+), 12 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
index 1303363..dad663d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
@@ -26,6 +26,7 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Optional;
 import java.util.Set;
+import java.util.concurrent.atomic.AtomicReference;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -77,7 +78,7 @@ import com.google.protobuf.RpcController;
 public class PhoenixAccessController extends BaseMetaDataEndpointObserver {
 
 private PhoenixMetaDataControllerEnvironment env;
-private volatile ArrayList accessControllers;
+AtomicReference> accessControllers = new 
AtomicReference<>();
 private boolean accessCheckEnabled;
 private UserProvider userProvider;
 public static final Log LOG = 
LogFactory.getLog(PhoenixAccessController.class);
@@ -90,20 +91,18 @@ public class PhoenixAccessController extends 
BaseMetaDataEndpointObserver {
 }
 
 private List getAccessControllers() throws IOException {
-if (accessControllers == null) {
-synchronized (this) {
-if (accessControllers == null) {
-accessControllers = new ArrayList();
-RegionCoprocessorHost cpHost = 
this.env.getCoprocessorHost();
-for (RegionCoprocessor cp : 
cpHost.findCoprocessors(RegionCoprocessor.class)) {
-if (cp instanceof AccessControlService.Interface && cp 
instanceof MasterObserver) {
-accessControllers.add((MasterObserver)cp);
-}
-}
+ArrayList oldAccessControllers = 
accessControllers.get();
+if (oldAccessControllers == null) {
+oldAccessControllers = new ArrayList<>();
+RegionCoprocessorHost cpHost = this.env.getCoprocessorHost();
+for (RegionCoprocessor cp : 
cpHost.findCoprocessors(RegionCoprocessor.class)) {
+if (cp instanceof AccessControlService.Interface && cp 
instanceof MasterObserver) {
+oldAccessControllers.add((MasterObserver)cp);
 }
 }
+accessControllers.set(oldAccessControllers);
 }
-return accessControllers;
+return accessControllers.get();
 }
 
 public ObserverContext 
getMasterObsevrverContext() throws IOException {



svn commit: r1857793 - in /phoenix/site/publish: language/datatypes.html language/functions.html language/index.html team.html

2019-04-19 Thread pboado
Author: pboado
Date: Fri Apr 19 09:43:31 2019
New Revision: 1857793

URL: http://svn.apache.org/viewvc?rev=1857793=rev
Log:
updated info for pboado (addendum)

Modified:
phoenix/site/publish/language/datatypes.html
phoenix/site/publish/language/functions.html
phoenix/site/publish/language/index.html
phoenix/site/publish/team.html

Modified: phoenix/site/publish/language/datatypes.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/language/datatypes.html?rev=1857793=1857792=1857793=diff
==
--- phoenix/site/publish/language/datatypes.html (original)
+++ phoenix/site/publish/language/datatypes.html Fri Apr 19 09:43:31 2019
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/language/functions.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/language/functions.html?rev=1857793=1857792=1857793=diff
==
--- phoenix/site/publish/language/functions.html (original)
+++ phoenix/site/publish/language/functions.html Fri Apr 19 09:43:31 2019
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/language/index.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/language/index.html?rev=1857793=1857792=1857793=diff
==
--- phoenix/site/publish/language/index.html (original)
+++ phoenix/site/publish/language/index.html Fri Apr 19 09:43:31 2019
@@ -1,7 +1,7 @@
 
 
 
 

Modified: phoenix/site/publish/team.html
URL: 
http://svn.apache.org/viewvc/phoenix/site/publish/team.html?rev=1857793=1857792=1857793=diff
==
--- phoenix/site/publish/team.html (original)
+++ phoenix/site/publish/team.html Fri Apr 19 09:43:31 2019
@@ -1,7 +1,7 @@
 
 
 
 
@@ -299,7 +299,7 @@


Pedro Boado  
-   Santander UK  
+   Datadog  
mailto:pbo...@apache.org;>pbo...@apache.org  
PMC 





svn commit: r1857792 - /phoenix/site/source/src/site/markdown/team.md

2019-04-19 Thread pboado
Author: pboado
Date: Fri Apr 19 09:41:17 2019
New Revision: 1857792

URL: http://svn.apache.org/viewvc?rev=1857792=rev
Log:
updated info for pboado

Modified:
phoenix/site/source/src/site/markdown/team.md

Modified: phoenix/site/source/src/site/markdown/team.md
URL: 
http://svn.apache.org/viewvc/phoenix/site/source/src/site/markdown/team.md?rev=1857792=1857791=1857792=diff
==
--- phoenix/site/source/src/site/markdown/team.md (original)
+++ phoenix/site/source/src/site/markdown/team.md Fri Apr 19 09:41:17 2019
@@ -27,7 +27,7 @@ Maryann Xue | Databricks | maryannxue@ap
 Michael Stack | Cloudera | st...@apache.org | PMC
 Mujtaba Chohan | Salesforce | mujt...@apache.org | PMC
 Nick Dimiduk | Icebrg | ndimi...@apache.org | PMC
-Pedro Boado | Santander UK | pbo...@apache.org | PMC
+Pedro Boado | Datadog | pbo...@apache.org | PMC
 Rajeshbabu Chintaguntla | Hortonworks | rajeshb...@apache.org | PMC
 Ramkrishna Vasudevan | Intel | ramkris...@apache.org | PMC
 Ravi Magham | Elementum | ravimag...@apache.org | PMC




[phoenix] 04/07: PHOENIX-4273 MutableIndexSplitIT#testSplitDuringIndexScan is failing for local indexes

2019-04-19 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit b4d72c78232c2bff744b09ff68773f65348f8fe1
Author: Thomas D'Silva 
AuthorDate: Sat Apr 13 01:18:09 2019 +0100

PHOENIX-4273 MutableIndexSplitIT#testSplitDuringIndexScan is failing for 
local indexes
---
 .../apache/phoenix/end2end/index/MutableIndexSplitReverseScanIT.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexSplitReverseScanIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexSplitReverseScanIT.java
index d7afb31..f57dfdd 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexSplitReverseScanIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexSplitReverseScanIT.java
@@ -19,7 +19,8 @@ package org.apache.phoenix.end2end.index;
 
 import org.junit.Test;
 
-public class MutableIndexSplitReverseScanIT extends MutableIndexSplitIT {
+//TODO: re-enable once PHOENIX-4273 is fixeds
+public abstract class MutableIndexSplitReverseScanIT extends 
MutableIndexSplitIT {
 
 public MutableIndexSplitReverseScanIT(boolean localIndex, boolean 
multiTenant) {
 super(localIndex, multiTenant);



[phoenix] 03/07: PHOENIX-5194 Thread Cache is not update for Index retries in for MutationState#send()#doMutation()

2019-04-19 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit c21c02dac426c71c3d6aa227ad2c79a5043bb6fa
Author: Monani Mihir 
AuthorDate: Fri Apr 12 18:55:33 2019 +0100

PHOENIX-5194 Thread Cache is not update for Index retries in for 
MutationState#send()#doMutation()
---
 .../org/apache/phoenix/execute/MutationState.java  | 28 ++
 .../phoenix/index/PhoenixIndexFailurePolicy.java   | 10 ++--
 2 files changed, 36 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java 
b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index 93ee43d..677b4e0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -993,6 +993,9 @@ public class MutationState implements SQLCloseable {
 if (shouldRetryIndexedMutation) {
 // if there was an index write failure, retry 
the mutation in a loop
 final Table finalHTable = hTable;
+final ImmutableBytesWritable 
finalindexMetaDataPtr =
+indexMetaDataPtr;
+final PTable finalPTable = table;
 
PhoenixIndexFailurePolicy.doBatchWithRetries(new MutateCommand() {
 @Override
 public void doMutation() throws 
IOException {
@@ -1001,6 +1004,9 @@ public class MutationState implements SQLCloseable {
 } catch (InterruptedException e) {
 Thread.currentThread().interrupt();
 throw new IOException(e);
+} catch (IOException e) {
+e = 
updateTableRegionCacheIfNecessary(e);
+throw e;
 }
 }
 
@@ -1008,6 +1014,28 @@ public class MutationState implements SQLCloseable {
 public List getMutationList() {
 return mutationBatch;
 }
+
+private IOException
+
updateTableRegionCacheIfNecessary(IOException ioe) {
+SQLException sqlE =
+
ServerUtil.parseLocalOrRemoteServerException(ioe);
+if (sqlE != null
+&& sqlE.getErrorCode() == 
SQLExceptionCode.INDEX_METADATA_NOT_FOUND
+.getErrorCode()) {
+try {
+
connection.getQueryServices().clearTableRegionCache(
+finalHTable.getName());
+
IndexMetaDataCacheClient.setMetaDataOnMutations(
+connection, finalPTable, 
mutationBatch,
+finalindexMetaDataPtr);
+} catch (SQLException e) {
+return 
ServerUtil.createIOException(
+"Exception during updating 
index meta data cache",
+ioe);
+}
+}
+return ioe;
+}
 }, iwe, connection, 
connection.getQueryServices().getProps());
 } else {
 hTable.batch(mutationBatch, null);
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
index f0379dd..f13616a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
@@ -488,8 +488,14 @@ public class PhoenixIndexFailurePolicy extends 
DelegateIndexFailurePolicy {
 } catch (IOException e) {
 SQLException inferredE = 
ServerUtil.parseLocalOrRemoteServerException(e);
 if 

[phoenix] 02/07: PHOENIX-5138 - ViewIndexId sequences created after PHOENIX-5132 shouldn't collide with ones created before it

2019-04-19 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 2b095720d2b375718d3cce6a2cf3b3cf89227b39
Author: Geoffrey Jacoby 
AuthorDate: Mon Mar 25 23:12:52 2019 +

PHOENIX-5138 - ViewIndexId sequences created after PHOENIX-5132 shouldn't 
collide with ones created before it
---
 .../java/org/apache/phoenix/end2end/UpgradeIT.java | 118 +
 .../apache/phoenix/end2end/index/ViewIndexIT.java  |  10 +-
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |  48 +
 .../phoenix/query/ConnectionQueryServicesImpl.java |   6 ++
 .../java/org/apache/phoenix/util/MetaDataUtil.java |  30 +-
 .../java/org/apache/phoenix/util/UpgradeUtil.java  | 112 ++-
 6 files changed, 272 insertions(+), 52 deletions(-)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
index 632a2bb..b81557b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
@@ -21,6 +21,7 @@ import static 
com.google.common.base.Preconditions.checkNotNull;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -30,6 +31,7 @@ import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.util.List;
 import java.util.Properties;
 import java.util.Set;
 import java.util.concurrent.Callable;
@@ -38,6 +40,7 @@ import java.util.concurrent.FutureTask;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import com.google.common.collect.Lists;
 import org.apache.curator.shaded.com.google.common.collect.Sets;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
@@ -60,6 +63,9 @@ import org.apache.phoenix.schema.PNameFactory;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTable.LinkType;
 import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.schema.SequenceAllocation;
+import org.apache.phoenix.schema.SequenceKey;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
 import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
@@ -178,25 +184,10 @@ public class UpgradeIT extends ParallelStatsDisabledIT {
 }
 PName tenantId = phxConn.getTenantId();
 PName physicalName = PNameFactory.newName(hbaseTableName);
-String oldSchemaName = 
MetaDataUtil.getViewIndexSequenceSchemaName(PNameFactory.newName(phoenixFullTableName),
-false);
 String newSchemaName = 
MetaDataUtil.getViewIndexSequenceSchemaName(physicalName, true);
 String newSequenceName = 
MetaDataUtil.getViewIndexSequenceName(physicalName, tenantId, true);
-ResultSet rs = phxConn.createStatement()
-.executeQuery("SELECT " + 
PhoenixDatabaseMetaData.CURRENT_VALUE + "  FROM "
-+ PhoenixDatabaseMetaData.SYSTEM_SEQUENCE + " 
WHERE " + PhoenixDatabaseMetaData.TENANT_ID
-+ " IS NULL AND " + 
PhoenixDatabaseMetaData.SEQUENCE_SCHEMA + " = '" + newSchemaName
-+ "' AND " + PhoenixDatabaseMetaData.SEQUENCE_NAME 
+ "='" + newSequenceName + "'");
-assertTrue(rs.next());
-assertEquals("-9223372036854775805", rs.getString(1));
-rs = phxConn.createStatement().executeQuery("SELECT " + 
PhoenixDatabaseMetaData.SEQUENCE_SCHEMA + ","
-+ PhoenixDatabaseMetaData.SEQUENCE_SCHEMA + "," + 
PhoenixDatabaseMetaData.CURRENT_VALUE + "  FROM "
-+ PhoenixDatabaseMetaData.SYSTEM_SEQUENCE + " WHERE " + 
PhoenixDatabaseMetaData.TENANT_ID
-+ " IS NULL AND " + 
PhoenixDatabaseMetaData.SEQUENCE_SCHEMA + " = '" + oldSchemaName + "'");
-assertFalse(rs.next());
-phxConn.close();
+verifySequenceValue(null, newSequenceName, newSchemaName, 
-9223372036854775805L);
 admin.close();
-   
 }
 }
 
@@ -507,12 +498,20 @@ public class UpgradeIT extends ParallelStatsDisabledIT {
 return DriverManager.getConnection(getUrl(), props);
 }
 
-private Connection getConnection(boolean tenantSpecific, String tenantId) 
throws SQLException {
+  

[phoenix] branch 5.x-cdh6 updated (1d8240f -> f3e17d3)

2019-04-19 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a change to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from 1d8240f  PHOENIX-4956 Distribution of Apache Phoenix 5.1 for CDH 6.1 
(addendum)
 new 5e24206  PHOENIX-5230 Fix ChangePermissionsIT and TableDDLPermissionIT 
on master
 new 2b09572  PHOENIX-5138 - ViewIndexId sequences created after 
PHOENIX-5132 shouldn't collide with ones created before it
 new c21c02d  PHOENIX-5194 Thread Cache is not update for Index retries in 
for MutationState#send()#doMutation()
 new b4d72c7  PHOENIX-4273 MutableIndexSplitIT#testSplitDuringIndexScan is 
failing for local indexes
 new 2f5e959  PHOENIX-5226 The format of VIEW_MODIFIED_PROPERTY_BYTES is 
incorrect as a tag of the cell
 new 0a5aec8  PHOENIX-5137 check region close before commiting a batch for 
index rebuild
 new f3e17d3  PHOENIX-5217 Incorrect result for COUNT DISTINCT limit

The 7 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../apache/phoenix/end2end/BasePermissionsIT.java  | 630 ++---
 .../phoenix/end2end/ChangePermissionsIT.java   | 294 --
 .../apache/phoenix/end2end/DistinctCountIT.java|  28 +
 .../phoenix/end2end/PermissionNSDisabledIT.java|  15 +
 .../phoenix/end2end/PermissionNSEnabledIT.java |  53 ++
 .../phoenix/end2end/SystemTablePermissionsIT.java  | 131 -
 .../phoenix/end2end/TableDDLPermissionsIT.java | 240 
 .../java/org/apache/phoenix/end2end/UpgradeIT.java | 118 +++-
 .../index/MutableIndexSplitReverseScanIT.java  |   3 +-
 .../apache/phoenix/end2end/index/ViewIndexIT.java  |  10 +-
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |  54 +-
 .../UngroupedAggregateRegionObserver.java  |  30 +-
 .../org/apache/phoenix/execute/MutationState.java  |  28 +
 .../phoenix/index/PhoenixIndexFailurePolicy.java   |  10 +-
 .../phoenix/iterate/BaseResultIterators.java   |  23 +-
 .../phoenix/query/ConnectionQueryServicesImpl.java |   6 +
 .../org/apache/phoenix/query/QueryConstants.java   |   4 +-
 .../java/org/apache/phoenix/util/MetaDataUtil.java |  30 +-
 .../java/org/apache/phoenix/util/UpgradeUtil.java  | 112 +++-
 .../apache/phoenix/compile/QueryCompilerTest.java  |  25 +
 .../tool/ParameterizedPhoenixCanaryToolIT.java |   1 -
 .../org/apache/phoenix/util/MetaDataUtilTest.java  |  42 ++
 .../java/org/apache/phoenix/util/TestUtil.java |  13 +
 23 files changed, 1066 insertions(+), 834 deletions(-)
 delete mode 100644 
phoenix-core/src/it/java/org/apache/phoenix/end2end/ChangePermissionsIT.java
 create mode 100644 
phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionNSDisabledIT.java
 create mode 100644 
phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionNSEnabledIT.java
 delete mode 100644 
phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java
 delete mode 100644 
phoenix-core/src/it/java/org/apache/phoenix/end2end/TableDDLPermissionsIT.java



[phoenix] 01/07: PHOENIX-5230 Fix ChangePermissionsIT and TableDDLPermissionIT on master

2019-04-19 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 5e24206b302b8bf309f73b35f9f68ef7111a149e
Author: Thomas D'Silva 
AuthorDate: Fri Apr 5 00:34:24 2019 +0100

PHOENIX-5230 Fix ChangePermissionsIT and TableDDLPermissionIT on master
---
 .../apache/phoenix/end2end/BasePermissionsIT.java  | 630 ++---
 .../phoenix/end2end/ChangePermissionsIT.java   | 294 --
 .../phoenix/end2end/PermissionNSDisabledIT.java|  15 +
 .../phoenix/end2end/PermissionNSEnabledIT.java |  53 ++
 .../phoenix/end2end/SystemTablePermissionsIT.java  | 131 -
 .../phoenix/end2end/TableDDLPermissionsIT.java | 240 
 .../tool/ParameterizedPhoenixCanaryToolIT.java |   1 -
 7 files changed, 610 insertions(+), 754 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
index 81a68b4..57a176b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
@@ -16,62 +16,66 @@
  */
 package org.apache.phoenix.end2end;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.lang.reflect.UndeclaredThrowableException;
-import java.security.PrivilegedExceptionAction;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Properties;
-import java.util.Set;
-
+import com.google.common.base.Joiner;
+import com.google.common.base.Throwables;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.AuthUtil;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.LocalHBaseCluster;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.access.AccessControlClient;
 import org.apache.hadoop.hbase.security.access.Permission;
+import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.NewerSchemaAlreadyExistsException;
+import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.util.PhoenixRuntime;
-import org.junit.After;
-import org.junit.BeforeClass;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.Before;
+import org.junit.FixMethodOrder;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runners.MethodSorters;
 
-import com.google.common.base.Joiner;
-import com.google.common.base.Throwables;
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.security.PrivilegedExceptionAction;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Properties;
+import java.util.Set;
 
-@RunWith(Parameterized.class)
-public class BasePermissionsIT extends BaseTest {
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+@Category(NeedsOwnMiniClusterTest.class)
+@FixMethodOrder(MethodSorters.NAME_ASCENDING)
+public abstract class BasePermissionsIT extends BaseTest {
 
 private static final Log LOG = LogFactory.getLog(BasePermissionsIT.class);
 
-static String SUPERUSER;
+static String SUPER_USER = System.getProperty("user.name");
 
 static HBaseTestingUtility testUtil;
 static final Set PHOENIX_SYSTEM_TABLES =
@@ -98,8 +102,8 @@ pu

[phoenix] 07/07: PHOENIX-5217 Incorrect result for COUNT DISTINCT limit

2019-04-19 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit f3e17d3fab76df109b19cf9ac5fa87dfe6705d77
Author: chenglei 
AuthorDate: Fri Apr 19 07:53:05 2019 +0100

PHOENIX-5217 Incorrect result for COUNT DISTINCT limit
---
 .../apache/phoenix/end2end/DistinctCountIT.java| 28 ++
 .../phoenix/iterate/BaseResultIterators.java   | 23 ++
 .../apache/phoenix/compile/QueryCompilerTest.java  | 25 +++
 .../java/org/apache/phoenix/util/TestUtil.java | 13 ++
 4 files changed, 79 insertions(+), 10 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DistinctCountIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DistinctCountIT.java
index e586ebc..ae86c36 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DistinctCountIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DistinctCountIT.java
@@ -32,6 +32,7 @@ import static org.apache.phoenix.util.TestUtil.ROW7;
 import static org.apache.phoenix.util.TestUtil.ROW8;
 import static org.apache.phoenix.util.TestUtil.ROW9;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.apache.phoenix.util.TestUtil.assertResultSet;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -468,4 +469,31 @@ public class DistinctCountIT extends 
ParallelStatsDisabledIT {
 assertEquals(2, rs.getInt(1));
 conn.close();
 }
+
+@Test
+public void testDistinctCountLimitBug5217() throws Exception {
+Connection conn = null;
+try {
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+conn = DriverManager.getConnection(getUrl(), props);
+String tableName = generateUniqueName();
+String sql = "create table " + tableName + "( "+
+" pk1 integer not null , " +
+" pk2 integer not null, " +
+" v integer, " +
+" CONSTRAINT TEST_PK PRIMARY KEY (pk1,pk2))";
+conn.createStatement().execute(sql);
+conn.createStatement().execute("UPSERT INTO 
"+tableName+"(pk1,pk2,v) VALUES (1,1,1)");
+conn.createStatement().execute("UPSERT INTO 
"+tableName+"(pk1,pk2,v) VALUES (2,2,2)");
+conn.commit();
+
+sql = "select count(distinct pk1) from " + tableName + " limit 1";
+ResultSet rs = conn.prepareStatement(sql).executeQuery();
+assertResultSet(rs, new Object[][]{{Long.valueOf(2L)}});
+} finally {
+if(conn!=null) {
+conn.close();
+}
+}
+}
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index 7fbb636..a562b8d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -69,6 +69,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.cache.ServerCacheClient.ServerCache;
+import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
 import org.apache.phoenix.compile.QueryPlan;
 import org.apache.phoenix.compile.RowProjector;
 import org.apache.phoenix.compile.ScanRanges;
@@ -262,19 +263,21 @@ public abstract class BaseResultIterators extends 
ExplainTable implements Result
 if(offset!=null){
 ScanUtil.addOffsetAttribute(scan, offset);
 }
-int cols = plan.getGroupBy().getOrderPreservingColumnCount();
+GroupBy groupBy = plan.getGroupBy();
+int cols = groupBy.getOrderPreservingColumnCount();
 if (cols > 0 && keyOnlyFilter &&
 
!plan.getStatement().getHint().hasHint(HintNode.Hint.RANGE_SCAN) &&
 cols < 
plan.getTableRef().getTable().getRowKeySchema().getFieldCount() &&
-plan.getGroupBy().isOrderPreserving() &&
-(context.getAggregationManager().isEmpty() || 
plan.getGroupBy().isUngroupedAggregate())) {
-
-ScanUtil.andFilterAtEnd(scan,
-new 
DistinctPrefixFilter(plan.getTableRef().getTable().getRowKeySchema(),
-cols));
-if (plan.getLimit() != null) { // We can push the limit to the 
server
-ScanUtil.andFilterAtEnd(scan, new 
PageFilter(plan.getLimi

[phoenix] 06/07: PHOENIX-5137 check region close before commiting a batch for index rebuild

2019-04-19 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 0a5aec82e1c978306ea2cd13e76caa4ed2e00733
Author: Kiran Kumar Maturi 
AuthorDate: Tue Feb 26 11:39:41 2019 +

PHOENIX-5137 check region close before commiting a batch for index rebuild
---
 .../UngroupedAggregateRegionObserver.java  | 30 +-
 1 file changed, 18 insertions(+), 12 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 6b27a88..40b6faa 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -392,7 +392,18 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 super.clear();
 }
 }
-
+
+private long getBlockingMemstoreSize(Region region, Configuration conf) {
+long flushSize = region.getTableDescriptor().getMemStoreFlushSize();
+
+if (flushSize <= 0) {
+flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,
+TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE);
+}
+return flushSize * 
(conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER,
+
HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER)-1) ;
+}
+
 @Override
 protected RegionScanner doPostScannerOpen(final 
ObserverContext c, final Scan scan, final 
RegionScanner s) throws IOException, SQLException {
 RegionCoprocessorEnvironment env = c.getEnvironment();
@@ -524,12 +535,6 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 MutationList mutations = new MutationList();
 boolean needToWrite = false;
 Configuration conf = env.getConfiguration();
-long flushSize = region.getTableDescriptor().getMemStoreFlushSize();
-
-if (flushSize <= 0) {
-flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,
-TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE);
-}
 
 /**
  * Slow down the writes if the memstore size more than
@@ -537,9 +542,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
  * bytes. This avoids flush storm to hdfs for cases like index 
building where reads and
  * write happen to all the table regions in the server.
  */
-final long blockingMemStoreSize = flushSize * (
-conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER,
-
HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER)-1) ;
+final long blockingMemStoreSize = getBlockingMemstoreSize(region, 
conf);
 
 boolean buildLocalIndex = indexMaintainers != null && 
dataColumns==null && !localIndexScan;
 if(buildLocalIndex) {
@@ -1101,6 +1104,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 int maxBatchSize = config.getInt(MUTATE_BATCH_SIZE_ATTRIB, 
QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
 long maxBatchSizeBytes = 
config.getLong(MUTATE_BATCH_SIZE_BYTES_ATTRIB,
 QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE_BYTES);
+final long blockingMemstoreSize = getBlockingMemstoreSize(region, 
config);
 MutationList mutations = new MutationList(maxBatchSize);
 region.startRegionOperation();
 byte[] uuidValue = ServerCacheClient.generateId();
@@ -1142,7 +1146,8 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 }
 }
 if (ServerUtil.readyToCommit(mutations.size(), 
mutations.byteSize(), maxBatchSize, maxBatchSizeBytes)) {
-commitBatchWithRetries(region, mutations, -1);
+checkForRegionClosing();
+commitBatchWithRetries(region, mutations, 
blockingMemstoreSize);
 uuidValue = ServerCacheClient.generateId();
 mutations.clear();
 }
@@ -1151,7 +1156,8 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 
 } while (hasMore);
 if (!mutations.isEmpty()) {
-commitBatchWithRetries(region, mutations, -1);
+checkForRegionClosing();
+commitBatchWithRetries(region, mutations, 
blockingMemstoreSize);
 }
 }
 } catch (IOException e) {



[phoenix] 05/07: PHOENIX-5226 The format of VIEW_MODIFIED_PROPERTY_BYTES is incorrect as a tag of the cell

2019-04-19 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 2f5e959d2f07a596b84085d62c8691f14d7d68c7
Author: jaanai 
AuthorDate: Thu Apr 4 03:21:44 2019 +0100

PHOENIX-5226 The format of VIEW_MODIFIED_PROPERTY_BYTES is incorrect as a 
tag of the cell
---
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |  6 ++--
 .../org/apache/phoenix/query/QueryConstants.java   |  4 ++-
 .../org/apache/phoenix/util/MetaDataUtilTest.java  | 42 ++
 3 files changed, 49 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index e7910cf..dd47a7d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -76,6 +76,7 @@ import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID_DATA
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_STATEMENT_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE_BYTES;
 import static 
org.apache.phoenix.query.QueryConstants.DIVERGED_VIEW_BASE_COLUMN_COUNT;
+import static 
org.apache.phoenix.query.QueryConstants.VIEW_MODIFIED_PROPERTY_TAG_TYPE;
 import static org.apache.phoenix.schema.PTableType.INDEX;
 import static org.apache.phoenix.schema.PTableType.TABLE;
 import static org.apache.phoenix.schema.PTableImpl.getColumnsToClone;
@@ -102,6 +103,7 @@ import java.util.Properties;
 import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparatorImpl;
 import org.apache.hadoop.hbase.CellUtil;
@@ -113,6 +115,7 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
@@ -469,8 +472,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol 
implements RegionCopr
 private static final int LINK_TYPE_INDEX = 0;
 // Used to add a tag to a cell when a view modifies a table property to 
indicate that this
 // property should not be derived from the base table
-private static final byte[] VIEW_MODIFIED_PROPERTY_BYTES = 
Bytes.toBytes(1);
-
+public static final byte[] VIEW_MODIFIED_PROPERTY_BYTES = 
TagUtil.fromList(ImmutableList.of(new 
ArrayBackedTag(VIEW_MODIFIED_PROPERTY_TAG_TYPE, Bytes.toBytes(1;
 private static final Cell CLASS_NAME_KV = 
createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, 
CLASS_NAME_BYTES);
 private static final Cell JAR_PATH_KV = 
createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, JAR_PATH_BYTES);
 private static final Cell RETURN_TYPE_KV = 
createFirstOnRow(ByteUtil.EMPTY_BYTE_ARRAY, TABLE_FAMILY_BYTES, 
RETURN_TYPE_BYTES);
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
index a8f332c..7584705 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
@@ -162,7 +162,9 @@ public interface QueryConstants {
 public static final int NANOS_IN_SECOND = BigDecimal.valueOf(Math.pow(10, 
9)).intValue();
 public static final int DIVERGED_VIEW_BASE_COLUMN_COUNT = -100;
 public static final int BASE_TABLE_BASE_COLUMN_COUNT = -1;
-
+
+// custom TagType
+public static final byte VIEW_MODIFIED_PROPERTY_TAG_TYPE = (byte) 70;
 /**
  * We mark counter values 0 to 10 as reserved. Value 0 is used by {@link 
#ENCODED_EMPTY_COLUMN_NAME}. Values 1-10
  * are reserved for special column qualifiers returned by Phoenix 
co-processors.
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/util/MetaDataUtilTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/util/MetaDataUtilTest.java
index 7c8e021..52df041 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/MetaDataUtilTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/MetaDataUtilTest.java
@@ -17,6 +17,8 @@
  */
 package org.apache.phoenix.util;
 
+import static 
org.apache.phoenix.coprocessor.MetaDataEndpointImpl.VIEW_MODIFIED_PROPERTY_BYTES;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.UPDATE_CACHE_FREQUENCY_BYTES;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
@@ -27,6 +29,9

[phoenix] 30/34: PHOENIX-5169 Query logger is still initialized for each query if the log level is off

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit d3e5aefe818fc61f07749e1520cae00d9ee46f10
Author: jaanai 
AuthorDate: Tue Mar 26 07:32:08 2019 +

PHOENIX-5169 Query logger is still initialized for each query if the log 
level is off
---
 .../org/apache/phoenix/end2end/QueryLoggerIT.java  | 25 +-
 .../org/apache/phoenix/jdbc/PhoenixStatement.java  |  5 +
 2 files changed, 20 insertions(+), 10 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java
index 2c961e0..b740d18 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java
@@ -51,7 +51,9 @@ import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDriver;
 import org.apache.phoenix.jdbc.PhoenixResultSet;
+import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.log.LogLevel;
+import org.apache.phoenix.log.QueryLogger;
 import org.apache.phoenix.log.QueryStatus;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.EnvironmentEdge;
@@ -63,6 +65,9 @@ import org.junit.Test;
 
 import com.google.common.collect.Maps;
 
+import javax.validation.constraints.AssertFalse;
+import javax.validation.constraints.AssertTrue;
+
 public class QueryLoggerIT extends BaseUniqueNamesOwnClusterIT {
 
 
@@ -226,29 +231,29 @@ public class QueryLoggerIT extends 
BaseUniqueNamesOwnClusterIT {
 props.setProperty(QueryServices.LOG_LEVEL, LogLevel.OFF.name());
 Connection conn = DriverManager.getConnection(getUrl(),props);
 
assertEquals(conn.unwrap(PhoenixConnection.class).getLogLevel(),LogLevel.OFF);
+
+// delete old data
+conn.createStatement().executeUpdate("delete from " + 
SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_LOG_TABLE + "\"");
+conn.commit();
+
 String query = "SELECT * FROM " + tableName;
-
 ResultSet rs = conn.createStatement().executeQuery(query);
 StatementContext context = ((PhoenixResultSet)rs).getContext();
-String queryId = context.getQueryLogger().getQueryId();
+assertEquals(context.getQueryLogger(), QueryLogger.NO_OP_INSTANCE);
 while (rs.next()) {
 rs.getString(1);
 rs.getString(2);
 }
 
-String logQuery = "SELECT * FROM " + SYSTEM_CATALOG_SCHEMA + ".\"" + 
SYSTEM_LOG_TABLE + "\"";
+String logQuery = "SELECT count(*) FROM " + SYSTEM_CATALOG_SCHEMA + 
".\"" + SYSTEM_LOG_TABLE + "\"";
 int delay = 5000;
 
 // sleep for sometime to let query log committed
 Thread.sleep(delay);
 rs = conn.createStatement().executeQuery(logQuery);
-boolean foundQueryLog = false;
-while (rs.next()) {
-if (rs.getString(QUERY_ID).equals(queryId)) {
-foundQueryLog = true;
-}
-}
-assertFalse(foundQueryLog);
+assertTrue(rs.next());
+assertEquals(rs.getInt(1), 0);
+assertFalse(rs.next());
 conn.close();
 }
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
index 7cbc43c..d0d194c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
@@ -92,6 +92,7 @@ import org.apache.phoenix.expression.RowKeyColumnExpression;
 import org.apache.phoenix.iterate.MaterializedResultIterator;
 import org.apache.phoenix.iterate.ParallelScanGrouper;
 import org.apache.phoenix.iterate.ResultIterator;
+import org.apache.phoenix.log.LogLevel;
 import org.apache.phoenix.log.QueryLogInfo;
 import org.apache.phoenix.log.QueryStatus;
 import org.apache.phoenix.log.QueryLogger;
@@ -1769,6 +1770,10 @@ public class PhoenixStatement implements Statement, 
SQLCloseable {
 }
 
 public QueryLogger createQueryLogger(CompilableStatement stmt, String sql) 
throws SQLException {
+if (connection.getLogLevel() == LogLevel.OFF) {
+return QueryLogger.NO_OP_INSTANCE;
+}
+
 boolean isSystemTable=false;
 if(stmt instanceof ExecutableSelectStatement){
 TableNode from = ((ExecutableSelectStatement)stmt).getFrom();



[phoenix] 17/34: PHOENIX-5196 Fix rat check in pre commit

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 61924e5723a80fcedb950e29c6d3db2bed631828
Author: Abhishek Singh Chouhan 
AuthorDate: Wed Mar 13 22:42:33 2019 +

PHOENIX-5196 Fix rat check in pre commit
---
 bin/omid-server-configuration.yml| 16 
 .../it/java/org/apache/phoenix/spark/OrderByIT.java  | 17 +
 .../datasource/v2/writer/PhoenixDataWriter.java  | 17 +
 .../v2/writer/PhoenixDataWriterFactory.java  | 17 +
 .../v2/writer/PhoenixDatasourceWriter.java   | 17 +
 .../org.apache.spark.sql.sources.DataSourceRegister  | 19 ++-
 .../datasources/jdbc/PhoenixJdbcDialect.scala| 20 +++-
 7 files changed, 121 insertions(+), 2 deletions(-)

diff --git a/bin/omid-server-configuration.yml 
b/bin/omid-server-configuration.yml
index 8d1616e..470d791 100644
--- a/bin/omid-server-configuration.yml
+++ b/bin/omid-server-configuration.yml
@@ -1,3 +1,19 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 # 
=
 #
 # Omid TSO Server Configuration
diff --git a/phoenix-spark/src/it/java/org/apache/phoenix/spark/OrderByIT.java 
b/phoenix-spark/src/it/java/org/apache/phoenix/spark/OrderByIT.java
index 4c60bc8..e44b011 100644
--- a/phoenix-spark/src/it/java/org/apache/phoenix/spark/OrderByIT.java
+++ b/phoenix-spark/src/it/java/org/apache/phoenix/spark/OrderByIT.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.phoenix.spark;
 
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
diff --git 
a/phoenix-spark/src/main/java/org/apache/phoenix/spark/datasource/v2/writer/PhoenixDataWriter.java
 
b/phoenix-spark/src/main/java/org/apache/phoenix/spark/datasource/v2/writer/PhoenixDataWriter.java
index cf42aa5..32dc07a 100644
--- 
a/phoenix-spark/src/main/java/org/apache/phoenix/spark/datasource/v2/writer/PhoenixDataWriter.java
+++ 
b/phoenix-spark/src/main/java/org/apache/phoenix/spark/datasource/v2/writer/PhoenixDataWriter.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.phoenix.spark.datasource.v2.writer;
 
 import java.io.IOException;
diff --git 
a/phoenix-spark/src/main/java/org/apache/phoenix/spark/datasource/v2/writer/PhoenixDataWriterFactory.java
 
b/phoenix-spark/src/main/java/org/apache/phoenix/spark/datasource/v2

[phoenix] 22/34: PHOENIX-4900 Modify MAX_MUTATION_SIZE_EXCEEDED and MAX_MUTATION_SIZE_BYTES_EXCEEDED exception message to recommend turning autocommit on for deletes

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit cfaea97c21c50bd106076d484b3064bfcb94
Author: Xinyi Yan 
AuthorDate: Tue Mar 5 22:58:23 2019 +

PHOENIX-4900 Modify MAX_MUTATION_SIZE_EXCEEDED and 
MAX_MUTATION_SIZE_BYTES_EXCEEDED exception message to recommend turning 
autocommit on for deletes
---
 .../apache/phoenix/end2end/MutationStateIT.java| 50 +-
 .../apache/phoenix/exception/SQLExceptionCode.java |  8 +++-
 2 files changed, 55 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java
index 15460b8..70c37a1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java
@@ -59,7 +59,51 @@ public class MutationStateIT extends ParallelStatsDisabledIT 
{
 }
 
 @Test
-public void testMaxMutationSize() throws Exception {
+public void testDeleteMaxMutationSize() throws SQLException {
+String tableName = generateUniqueName();
+int NUMBER_OF_ROWS = 20;
+String ddl = "CREATE TABLE " + tableName + " (V BIGINT PRIMARY KEY, K 
BIGINT)";
+PhoenixConnection conn = (PhoenixConnection) 
DriverManager.getConnection(getUrl());
+conn.createStatement().execute(ddl);
+
+for(int i = 0; i < NUMBER_OF_ROWS; i++) {
+conn.createStatement().execute(
+"UPSERT INTO " + tableName + " VALUES (" + i + ", "+ i + 
")");
+conn.commit();
+}
+
+Properties props = new Properties();
+props.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB,
+String.valueOf(NUMBER_OF_ROWS / 2));
+PhoenixConnection connection =
+(PhoenixConnection) DriverManager.getConnection(getUrl(), 
props);
+connection.setAutoCommit(false);
+
+try {
+for(int i = 0; i < NUMBER_OF_ROWS; i++) {
+connection.createStatement().execute(
+"DELETE FROM " + tableName + " WHERE K = " + i );
+}
+} catch (SQLException e) {
+assertTrue(e.getMessage().contains(
+SQLExceptionCode.MAX_MUTATION_SIZE_EXCEEDED.getMessage()));
+}
+
+props.setProperty(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, "10");
+props.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB, "1");
+connection = (PhoenixConnection) DriverManager.getConnection(getUrl(), 
props);
+connection.setAutoCommit(false);
+
+try {
+connection.createStatement().execute("DELETE FROM " + tableName );
+} catch (SQLException e) {
+assertTrue(e.getMessage().contains(
+
SQLExceptionCode.MAX_MUTATION_SIZE_BYTES_EXCEEDED.getMessage()));
+}
+}
+
+@Test
+public void testUpsertMaxMutationSize() throws Exception {
 Properties connectionProperties = new Properties();
 
connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB, "3");
 
connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, 
"100");
@@ -76,6 +120,8 @@ public class MutationStateIT extends ParallelStatsDisabledIT 
{
 } catch (SQLException e) {
 
assertEquals(SQLExceptionCode.MAX_MUTATION_SIZE_EXCEEDED.getErrorCode(),
 e.getErrorCode());
+assertTrue(e.getMessage().contains(
+SQLExceptionCode.MAX_MUTATION_SIZE_EXCEEDED.getMessage()));
 }
 
 // set the max mutation size (bytes) to a low value
@@ -89,6 +135,8 @@ public class MutationStateIT extends ParallelStatsDisabledIT 
{
 } catch (SQLException e) {
 
assertEquals(SQLExceptionCode.MAX_MUTATION_SIZE_BYTES_EXCEEDED.getErrorCode(),
 e.getErrorCode());
+assertTrue(e.getMessage().contains(
+
SQLExceptionCode.MAX_MUTATION_SIZE_BYTES_EXCEEDED.getMessage()));
 }
 }
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java 
b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
index 00479ee..b773649 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
@@ -466,8 +466,12 @@ public enum SQLExceptionCode {
 "because this client already has the maximum number" +
 " of connections to the target cluster."),
 
-MAX_MUTATION_SIZE_EXCEEDED(729, "LIM01",

[phoenix] 09/34: PHOENIX-5063 Create a new repo for the phoenix query server (#454)

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 57df4c8d42bcec93a944bcbec07d2907737c0c06
Author: karanmehta93 
AuthorDate: Wed Mar 6 01:17:59 2019 +

PHOENIX-5063 Create a new repo for the phoenix query server (#454)

Removed phoenix-load-balancer module
---
 phoenix-assembly/pom.xml   |  12 --
 phoenix-load-balancer/pom.xml  |  96 ---
 .../phoenix/end2end/LoadBalancerEnd2EndIT.java | 144 -
 .../service/LoadBalanceZookeeperConfImpl.java  | 103 
 .../phoenix/loadbalancer/service/LoadBalancer.java | 178 -
 .../queryserver/register/ZookeeperRegistry.java|  72 -
 ...x.loadbalancer.service.LoadBalanceZookeeperConf |   1 -
 ...rg.apache.phoenix.queryserver.register.Registry |   1 -
 pom.xml|   6 -
 9 files changed, 613 deletions(-)

diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index ab46a82..592df78 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -119,18 +119,6 @@
   phoenix-spark
 
 
-  org.apache.phoenix
-  phoenix-queryserver
-
-
-  org.apache.phoenix
-  phoenix-queryserver-client
-
-
-  org.apache.phoenix
-  phoenix-load-balancer
-
-
   org.apache.omid
   omid-hbase-tools-hbase2.x
   ${omid.version}
diff --git a/phoenix-load-balancer/pom.xml b/phoenix-load-balancer/pom.xml
deleted file mode 100644
index 393a68c..000
--- a/phoenix-load-balancer/pom.xml
+++ /dev/null
@@ -1,96 +0,0 @@
-
-
-
-http://maven.apache.org/POM/4.0.0;
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd;>
-  4.0.0
-  
-org.apache.phoenix
-phoenix
-5.1.0-cdh6.1.1-SNAPSHOT
-  
-  phoenix-load-balancer
-  Phoenix Load Balancer
-  A Load balancer which routes calls to Phoenix Query 
Server
-
-  
-
-  org.apache.hbase
-  hbase-common
-
-   
-org.apache.curator
-curator-test
-${curator.version}
-   test
-  
-  
-org.apache.curator
-curator-client
-${curator.version}
-  
-
-  org.apache.curator
-  curator-recipes
-  ${curator.version}
-
-
-  org.apache.curator
-  curator-framework
-  ${curator.version}
-
-
-  org.apache.phoenix
-  phoenix-queryserver
-
-  
-
-  
-
-  
-maven-source-plugin
-
-  
-attach-sources
-verify
-
-  jar-no-fork
-  test-jar-no-fork
-
-  
-
-  
-  
-org.apache.rat
-apache-rat-plugin
-
-  
-
src/main/resources/META-INF/services/org.apache.phoenix.loadbalancer.service.LoadBalanceZookeeperConf
-
src/main/resources/META-INF/services/org.apache.phoenix.queryserver.register.Registry
-  
-
-  
-
-  
-
-
diff --git 
a/phoenix-load-balancer/src/it/java/org/apache/phoenix/end2end/LoadBalancerEnd2EndIT.java
 
b/phoenix-load-balancer/src/it/java/org/apache/phoenix/end2end/LoadBalancerEnd2EndIT.java
deleted file mode 100644
index 8aa516b..000
--- 
a/phoenix-load-balancer/src/it/java/org/apache/phoenix/end2end/LoadBalancerEnd2EndIT.java
+++ /dev/null
@@ -1,144 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import com.google.common.net.HostAndPort;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.curator.CuratorZookeeperClient;
-import org.apache.curator.framework.CuratorFramework;
-import org.apache.curator.framework.CuratorFrameworkFactory;
-import org.apache.curator.retry.ExponentialBackoffRetry;
-import org.apache.curator.test.TestingServer;
-import org.apache.curator.utils.CloseableUtils;
-import org.apache.phoenix.loadbalan

[phoenix] 05/34: PHOENIX-5141 Use HBaseFactoryProvider.getConfigurationFactory (addendum)

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit e899c3bbad0eb34613513f6ff262916e155f4980
Author: Thomas D'Silva 
AuthorDate: Fri Mar 1 22:24:10 2019 +

PHOENIX-5141 Use HBaseFactoryProvider.getConfigurationFactory (addendum)
---
 .../main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala| 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
index ac3993a..3b0289d 100644
--- 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
+++ 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
@@ -28,7 +28,7 @@ class DataFrameFunctions(data: DataFrame) extends 
Serializable {
saveToPhoenix(parameters("table"), zkUrl = 
parameters.get("zkUrl"), tenantId = parameters.get("TenantId"), 

skipNormalizingIdentifier=parameters.contains("skipNormalizingIdentifier"))
}
-  def saveToPhoenix(tableName: String, conf: Option[Configuration] = None,
+  def saveToPhoenix(tableName: String, conf: Configuration = new Configuration,
 zkUrl: Option[String] = None, tenantId: Option[String] = 
None, skipNormalizingIdentifier: Boolean = false): Unit = {
 
 // Retrieve the schema field names and normalize to Phoenix, need to do 
this outside of mapPartitions
@@ -36,7 +36,7 @@ class DataFrameFunctions(data: DataFrame) extends 
Serializable {
 
 
 // Create a configuration object to use for saving
-@transient val outConfig = 
ConfigurationUtil.getOutputConfiguration(tableName, fieldArray, zkUrl, 
tenantId, conf)
+@transient val outConfig = 
ConfigurationUtil.getOutputConfiguration(tableName, fieldArray, zkUrl, 
tenantId, Some(conf))
 
 // Retrieve the zookeeper URL
 val zkUrlFinal = ConfigurationUtil.getZookeeperURL(outConfig)
@@ -47,7 +47,7 @@ class DataFrameFunctions(data: DataFrame) extends 
Serializable {
// Create a within-partition config to retrieve the ColumnInfo list
@transient val partitionConfig = 
ConfigurationUtil.getOutputConfiguration(tableName, fieldArray, zkUrlFinal, 
tenantId)
@transient val columns = 
PhoenixConfigurationUtil.getUpsertColumnMetadataList(partitionConfig).toList
-
+ 
rows.map { row =>
  val rec = new PhoenixRecordWritable(columns)
  row.toSeq.foreach { e => rec.add(e) }



[phoenix] 01/34: PHOENIX-5089 Add tenantId parameter to IndexScrunityTool

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit e62d06126733ccf71c256448dd7e0580e25ce411
Author: Gokcen Iskender 
AuthorDate: Fri Feb 15 22:07:01 2019 +

PHOENIX-5089 Add tenantId parameter to IndexScrunityTool

Signed-off-by: Geoffrey Jacoby 
---
 .../phoenix/end2end/IndexScrutinyToolIT.java   | 1398 +++-
 .../mapreduce/index/IndexScrutinyMapper.java   |2 +
 .../mapreduce/index/IndexScrutinyTableOutput.java  |1 +
 .../phoenix/mapreduce/index/IndexScrutinyTool.java |   61 +-
 .../apache/phoenix/mapreduce/index/IndexTool.java  |5 +-
 .../phoenix/mapreduce/util/IndexColumnNames.java   |3 +
 6 files changed, 820 insertions(+), 650 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexScrutinyToolIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexScrutinyToolIT.java
index 692a98c..046c3f0 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexScrutinyToolIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexScrutinyToolIT.java
@@ -46,9 +46,12 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.Job;
+import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.mapreduce.CsvBulkImportUtil;
 import org.apache.phoenix.mapreduce.index.IndexScrutinyTableOutput;
 import org.apache.phoenix.mapreduce.index.IndexScrutinyTool;
@@ -58,6 +61,7 @@ import 
org.apache.phoenix.mapreduce.index.PhoenixScrutinyJobCounters;
 import org.apache.phoenix.mapreduce.index.SourceTargetColumnNames;
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
 import org.apache.phoenix.query.BaseTest;
+import org.apache.phoenix.query.ConnectionQueryServices;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.util.EnvironmentEdgeManager;
 import org.apache.phoenix.util.PhoenixRuntime;
@@ -70,6 +74,7 @@ import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.junit.experimental.runners.Enclosed;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
@@ -81,681 +86,850 @@ import com.google.common.collect.Sets;
  * Tests for the {@link IndexScrutinyTool}
  */
 @Category(NeedsOwnMiniClusterTest.class)
-@RunWith(Parameterized.class)
-public class IndexScrutinyToolIT extends BaseTest {
+@RunWith(Enclosed.class)
+public class IndexScrutinyToolIT {
+
+abstract public static class SharedIndexToolIT extends BaseTest {
+protected String outputDir;
+
+@BeforeClass public static void doSetup() throws Exception {
+Map serverProps = Maps.newHashMap();
+//disable major compactions
+serverProps.put(HConstants.MAJOR_COMPACTION_PERIOD, "0");
+Map clientProps = Maps.newHashMap();
+setUpTestDriver(new 
ReadOnlyProps(serverProps.entrySet().iterator()),
+new ReadOnlyProps(clientProps.entrySet().iterator()));
+}
 
-private String dataTableDdl;
-private String indexTableDdl;
+protected List runScrutiny(String[] cmdArgs) throws Exception {
+IndexScrutinyTool scrutiny = new IndexScrutinyTool();
+Configuration conf = new 
Configuration(getUtility().getConfiguration());
+scrutiny.setConf(conf);
+int status = scrutiny.run(cmdArgs);
+assertEquals(0, status);
+for (Job job : scrutiny.getJobs()) {
+assertTrue(job.waitForCompletion(true));
+}
+return scrutiny.getJobs();
+}
 
-private static final String UPSERT_SQL = "UPSERT INTO %s VALUES(?,?,?,?)";
+protected String[] getArgValues(String schemaName, String dataTable, 
String indxTable, Long batchSize,
+SourceTable sourceTable, boolean outputInvalidRows, 
OutputFormat outputFormat, Long maxOutputRows, String tenantId, Long 
scrutinyTs) {
+final List args = Lists.newArrayList();
+if (schemaName != null) {
+args.add("-s");
+args.add(schemaName);
+}
+args.add("-dt");
+args.add(dataTable);
+args.add("-it");
+args.add(indxTable);
+
+// TODO test snapshot reads
+// if(useSnapshot) {
+// args.add("-snap");
+// }
+
+if (OutputFormat.FILE.equals(outputFormat)) {
+   

[phoenix] branch 5.x-cdh6 updated (16248b3 -> 1d8240f)

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a change to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from 16248b3  PHOENIX-4956 Distribution of Apache Phoenix 5.1 for CDH 6.1
 new e62d061  PHOENIX-5089 Add tenantId parameter to IndexScrunityTool
 new aef34e6  PHOENIX-374 Enable access to dynamic columns in * or cf.* 
selection (Addendum)
 new d066f40  PHOENIX-5141 Use HBaseFactoryProvider.getConfigurationFactory 
to get the config in PhoenixRDD (addendum)
 new c9447c6  PHOENIX-5141 Use HBaseFactoryProvider.getConfigurationFactory 
(addendum)
 new e899c3b  PHOENIX-5141 Use HBaseFactoryProvider.getConfigurationFactory 
(addendum)
 new 597ee03  PHOENIX-2265 Disallow creation of view over HBase table if PK 
not specified
 new 8348457  PHOENIX-4345 Error message for incorrect index is not accurate
 new 855cb66  PHOENIX-5063 Create a new repo for the phoenix query server 
(#422)
 new 57df4c8  PHOENIX-5063 Create a new repo for the phoenix query server 
(#454)
 new 4cb9269  PHOENIX-4929 IndexOutOfBoundsException when casting timestamp 
to date
 new 3c2b20b  PHOENIX-5122: PHOENIX-4322 breaks client backward 
compatibility
 new ee22c65  PHOENIX-5175 Separate client settings for disabling server 
side mutations for upserts and deletes
 new 3b52902  PHOENIX-5182: Deprecate getTotalSum API of the GlobalMetric 
interface
 new 1468979  PHOENIX-5178 SYSTEM schema is not getting cached at MetaData 
server
 new e612d97  PHOENIX-5178 SYSTEM schema is not getting cached at MetaData 
server(addendum)
 new ae45a00  PHOENIX-5188 IndexedKeyValue should populate KeyValue fields
 new 61924e5  PHOENIX-5196 Fix rat check in pre commit
 new a26f6fc  PHOENIX-5185 support Math PI function (#461)
 new b6ce5c0  PHOENIX-5131 Make spilling to disk for order/group by 
configurable
 new 3920a21  PHOENIX-5148 Improve OrderPreservingTracker to optimize 
OrderBy/GroupBy for ClientScanPlan and ClientAggregatePlan
 new 814999c  PHOENIX-5062 Create a new repo for the phoenix connectors
 new cfaea97  PHOENIX-4900 Modify MAX_MUTATION_SIZE_EXCEEDED and 
MAX_MUTATION_SIZE_BYTES_EXCEEDED exception message to recommend turning 
autocommit on for deletes
 new 02ec040  PHOENIX-5184 HBase and Phoenix connection leaks in Indexing 
code path, OrphanViewTool and PhoenixConfigurationUtil
 new 5171199  PHOENIX-5172 Harden the PQS canary synth test tool with retry 
mechanism and more logging
 new 2ddfa02  PHOENIX-1614 ALTER TABLE ADD IF NOT EXISTS doesn't work as 
expected
 new 4a7cc09  PHOENIX-5180 Add API to PhoenixRunTime to get ptable of a 
tenant using a global connection
 new 263245f  PHOENIX-5062 Create a new repo for the phoenix connectors 
(addendum)
 new fabe30d  PHOENIX-5207 Create index if not exists fails incorrectly if 
table has 'maxIndexesPerTable' indexes already
 new fe6e767  PHOENIX-5219 Fix ConcurrentMutationsIT 
testLockUntilMVCCAdvanced and testRowLockDuringPreBatchMutateWhenIndexed 
failures on the master branch
 new d3e5aef  PHOENIX-5169 Query logger is still initialized for each query 
if the log level is off
 new 8a5879d  PHOENIX-5169 Query logger is still initialized for each query 
if the log level is off(addendum)
 new 6419394  PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE
 new 8cedf24  PHOENIX-5190 Implement TaskRegionObserver for Index rebuild
 new 1d8240f  PHOENIX-4956 Distribution of Apache Phoenix 5.1 for CDH 6.1 
(addendum)

The 34 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 bin/omid-server-configuration.yml  |   16 +
 phoenix-assembly/pom.xml   |   28 -
 phoenix-client/pom.xml |   18 -
 .../org/apache/phoenix/end2end/AlterTableIT.java   |   43 +-
 .../org/apache/phoenix/end2end/DateTimeIT.java |   28 +
 .../java/org/apache/phoenix/end2end/DeleteIT.java  |   32 +-
 .../org/apache/phoenix/end2end/DerivedTableIT.java | 2002 +-
 .../phoenix/end2end/DropTableWithViewsIT.java  |   37 +-
 .../end2end/GlobalConnectionTenantTableIT.java |  188 ++
 .../phoenix/end2end/IndexBuildTimestampIT.java |3 +-
 .../apache/phoenix/end2end/IndexRebuildTaskIT.java |  176 ++
 .../phoenix/end2end/IndexScrutinyToolIT.java   | 1398 +++--
 .../phoenix/end2end/LnLogFunctionEnd2EndIT.java|   16 -
 .../phoenix/end2end/MathPIFunctionEnd2EndIT.java   |   61 +
 .../apache/phoenix/end2end/MutationStateIT.java|   50 +-
 .../java/org/apache/phoenix/end2end/OrderByIT.java |  193 ++
 ...OrderByWithServerClientSpoolingDisabledIT.java} |   17 +-
 .../end2end/OrderByWithServerMemoryLimitIT.java|

[phoenix] 06/34: PHOENIX-2265 Disallow creation of view over HBase table if PK not specified

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 597ee0328a275a86b266f2a90cb8be59b60db99b
Author: Xinyi Yan 
AuthorDate: Sat Feb 9 00:05:55 2019 +

PHOENIX-2265 Disallow creation of view over HBase table if PK not specified

Signed-off-by: Geoffrey Jacoby 
---
 .../it/java/org/apache/phoenix/end2end/ViewIT.java | 51 +-
 .../phoenix/compile/CreateTableCompiler.java   | 17 
 2 files changed, 67 insertions(+), 1 deletion(-)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
index bcf7eca..4daf012 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
@@ -57,6 +57,8 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.curator.shaded.com.google.common.collect.Lists;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
@@ -73,6 +75,7 @@ import org.apache.phoenix.exception.PhoenixIOException;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.mapreduce.util.ConnectionUtil;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.ColumnAlreadyExistsException;
@@ -1368,7 +1371,53 @@ public class ViewIT extends SplitSystemCatalogIT {
 // we should be able to load the second view
 PhoenixRuntime.getTableNoCache(conn, fullViewName2);
 }
-
+
+@Test
+public void testCreateViewFromHBaseTable() throws Exception {
+String tableNameStr = generateUniqueName();
+String familyNameStr = generateUniqueName();
+
+TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(
+TableName.valueOf(tableNameStr));
+
builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(familyNameStr));
+
+HBaseTestingUtility testUtil = getUtility();
+Admin admin = testUtil.getAdmin();
+admin.createTable(builder.build());
+Connection conn = DriverManager.getConnection(getUrl());
+
+//PK is not specified, without where clause
+try {
+conn.createStatement().executeUpdate("CREATE VIEW \"" + 
tableNameStr +
+"\" (ROWKEY VARCHAR, \"" + familyNameStr + "\".a 
VARCHAR)");
+fail();
+} catch (SQLException e) {
+assertEquals(SQLExceptionCode.PRIMARY_KEY_MISSING.getErrorCode(), 
e.getErrorCode());
+}
+
+// No error, as PK is specified
+conn.createStatement().executeUpdate("CREATE VIEW \"" + tableNameStr +
+"\" (ROWKEY VARCHAR PRIMARY KEY, \"" + familyNameStr + "\".a 
VARCHAR)");
+
+conn.createStatement().executeUpdate("DROP VIEW \"" + tableNameStr + 
"\"");
+
+//PK is not specified, with where clause
+try {
+conn.createStatement().executeUpdate("CREATE VIEW \"" + 
tableNameStr +
+"\" (ROWKEY VARCHAR, \"" + familyNameStr + "\".a VARCHAR) 
AS SELECT * FROM \""
++ tableNameStr + "\" WHERE ROWKEY = '1'");
+fail();
+} catch (SQLException e) {
+assertEquals(SQLExceptionCode.PRIMARY_KEY_MISSING.getErrorCode(), 
e.getErrorCode());
+}
+
+conn.createStatement().executeUpdate("CREATE VIEW \"" + tableNameStr +
+"\" (ROWKEY VARCHAR PRIMARY KEY, \"" + familyNameStr + "\".a 
VARCHAR) AS SELECT " +
+"* FROM \"" + tableNameStr + "\" WHERE ROWKEY = '1'");
+
+conn.createStatement().executeUpdate("DROP VIEW \"" + tableNameStr + 
"\"");
+}
+
 @Test
 public void testConcurrentViewCreationAndTableDrop() throws Exception {
 try (Connection conn = DriverManager.getConnection(getUrl())) {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java
index 5293f8a..6329467 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java
+++ 
b/phoenix-core/src/m

[phoenix] 07/34: PHOENIX-4345 Error message for incorrect index is not accurate

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 8348457665230dd0a5c8b2f268a5b70bfdae7385
Author: Xinyi Yan 
AuthorDate: Thu Feb 28 07:33:23 2019 +

PHOENIX-4345 Error message for incorrect index is not accurate

Signed-off-by: Geoffrey Jacoby 
---
 .../apache/phoenix/end2end/index/IndexUsageIT.java | 40 ++
 .../org/apache/phoenix/compile/FromCompiler.java   | 12 ++-
 .../apache/phoenix/exception/SQLExceptionCode.java |  7 
 ...dException.java => IndexNotFoundException.java} | 35 +--
 .../org/apache/phoenix/schema/MetaDataClient.java  |  7 +++-
 .../phoenix/schema/TableNotFoundException.java |  4 +++
 6 files changed, 77 insertions(+), 28 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexUsageIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexUsageIT.java
index f114010..6433f5a 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexUsageIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexUsageIT.java
@@ -35,6 +35,7 @@ import java.sql.SQLException;
 import java.util.Properties;
 
 import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
+import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.execute.CommitException;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.util.DateUtil;
@@ -772,4 +773,43 @@ public class IndexUsageIT extends ParallelStatsDisabledIT {
 }
}
 
+@Test
+public void testIndexNotFoundForWrongIndexNameRebuild() throws Exception{
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+Connection conn = DriverManager.getConnection(getUrl(), props);
+String dataTableName = generateUniqueName();
+String wrongIndexName = generateUniqueName();
+
+try {
+conn.createStatement().execute("CREATE TABLE " + dataTableName +
+" (k VARCHAR NOT NULL PRIMARY KEY, v VARCHAR)");
+
+conn.createStatement().execute(
+"ALTER INDEX " + wrongIndexName + " ON " + dataTableName + 
" rebuild");
+
+}catch (SQLException e) {
+assertEquals(e.getErrorCode(), 
SQLExceptionCode.INDEX_UNDEFINED.getErrorCode());
+} finally {
+conn.close();
+}
+}
+
+@Test
+public void testIndexNotFoundForDropWongIndexName() throws Exception{
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+Connection conn = DriverManager.getConnection(getUrl(), props);
+String dataTableName = generateUniqueName();
+String wrongIndexName = generateUniqueName();
+
+try {
+conn.createStatement().execute("CREATE TABLE " + dataTableName +
+" (k VARCHAR NOT NULL PRIMARY KEY, v VARCHAR)");
+conn.createStatement().execute("DROP INDEX " + wrongIndexName + " 
ON " +
+dataTableName);
+}catch (SQLException e) {
+assertEquals(e.getErrorCode(), 
SQLExceptionCode.INDEX_UNDEFINED.getErrorCode());
+} finally {
+conn.close();
+}
+}
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
index d0a49cc..dbfc607 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
@@ -64,6 +64,7 @@ import 
org.apache.phoenix.schema.ColumnFamilyNotFoundException;
 import org.apache.phoenix.schema.ColumnNotFoundException;
 import org.apache.phoenix.schema.ColumnRef;
 import org.apache.phoenix.schema.FunctionNotFoundException;
+import org.apache.phoenix.schema.IndexNotFoundException;
 import org.apache.phoenix.schema.MetaDataClient;
 import org.apache.phoenix.schema.MetaDataEntityNotFoundException;
 import org.apache.phoenix.schema.PColumn;
@@ -265,6 +266,15 @@ public class FromCompiler {
 return visitor;
 }
 
+public static ColumnResolver getIndexResolver(SingleTableStatement 
statement,
+  PhoenixConnection connection) throws 
SQLException {
+try {
+return getResolver(statement, connection);
+} catch (TableNotFoundException e) {
+throw new IndexNotFoundException(e.getSchemaName(), 
e.getTableName(), e.getTimeStamp());
+}
+}
+
 public static ColumnResolver getResolver(SingleTableStatement statement, 
PhoenixConnection connection, Map udfParseNodes)
 throws SQLException {
 SingleTableColumnResolver visitor = new 
SingleTableColumnResolver(connection, 

[phoenix] 28/34: PHOENIX-5207 Create index if not exists fails incorrectly if table has 'maxIndexesPerTable' indexes already

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit fabe30d78b297e05b245e48edebdd7c731bb68e2
Author: Abhishek Singh Chouhan 
AuthorDate: Thu Mar 28 02:00:40 2019 +

PHOENIX-5207 Create index if not exists fails incorrectly if table has 
'maxIndexesPerTable' indexes already
---
 .../apache/phoenix/end2end/index/BaseIndexIT.java  | 37 ++
 1 file changed, 37 insertions(+)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java
index cc9a4ab..b5b2d1e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java
@@ -42,6 +42,7 @@ import java.util.List;
 import java.util.Properties;
 import java.util.Random;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.CellUtil;
@@ -68,6 +69,7 @@ import org.apache.phoenix.parse.NamedTableNode;
 import org.apache.phoenix.parse.TableName;
 import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableImpl;
 import org.apache.phoenix.schema.PTableKey;
@@ -1302,5 +1304,40 @@ public abstract class BaseIndexIT extends 
ParallelStatsDisabledIT {
 }
 }
 
+@Test
+public void testMaxIndexesPerTable() throws SQLException {
+String tableName = "TBL_" + generateUniqueName();
+String indexName = "IND_" + generateUniqueName();
+String fullTableName = 
SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, tableName);
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+Configuration conf =
+
conn.unwrap(PhoenixConnection.class).getQueryServices().getConfiguration();
+int maxIndexes =
+conf.getInt(QueryServices.MAX_INDEXES_PER_TABLE,
+QueryServicesOptions.DEFAULT_MAX_INDEXES_PER_TABLE);
+conn.createStatement()
+.execute("CREATE TABLE " + fullTableName
++ " (k VARCHAR NOT NULL PRIMARY KEY, \"V1\" 
VARCHAR, \"v2\" VARCHAR)"
++ tableDDLOptions);
+for (int i = 0; i < maxIndexes; i++) {
+conn.createStatement().execute("CREATE " + (localIndex ? 
"LOCAL " : "") + "INDEX "
++ indexName + i + " ON " + fullTableName + "(\"v2\") 
INCLUDE (\"V1\")");
+}
+try {
+conn.createStatement()
+.execute("CREATE " + (localIndex ? "LOCAL " : "") + 
"INDEX " + indexName
++ maxIndexes + " ON " + fullTableName
++ "(\"v2\") INCLUDE (\"V1\")");
+fail("Expected exception TOO_MANY_INDEXES");
+} catch (SQLException e) {
+assertEquals(e.getErrorCode(), 
SQLExceptionCode.TOO_MANY_INDEXES.getErrorCode());
+}
+conn.createStatement()
+.execute("CREATE " + (localIndex ? "LOCAL " : "") + "INDEX 
IF NOT EXISTS "
++ indexName + "0" + " ON " + fullTableName
++ "(\"v2\") INCLUDE (\"V1\")");
+}
+}
 
 }



[phoenix] 29/34: PHOENIX-5219 Fix ConcurrentMutationsIT testLockUntilMVCCAdvanced and testRowLockDuringPreBatchMutateWhenIndexed failures on the master branch

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit fe6e767f62c017a48da6fa16a6494c3f4a3b7f72
Author: Thomas D'Silva 
AuthorDate: Thu Mar 28 23:36:30 2019 +

PHOENIX-5219 Fix ConcurrentMutationsIT testLockUntilMVCCAdvanced and 
testRowLockDuringPreBatchMutateWhenIndexed failures on the master branch
---
 .../src/main/java/org/apache/phoenix/hbase/index/Indexer.java| 9 -
 1 file changed, 4 insertions(+), 5 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index 8c5184a..c34ffd2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -33,6 +33,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
@@ -424,7 +425,6 @@ public class Indexer implements RegionObserver, 
RegionCoprocessor {
   ReplayWrite replayWrite = this.builder.getReplayWrite(firstMutation);
   boolean resetTimeStamp = replayWrite == null;
   long now = EnvironmentEdgeManager.currentTimeMillis();
-  byte[] byteNow = Bytes.toBytes(now);
   for (int i = 0; i < miniBatchOp.size(); i++) {
   Mutation m = miniBatchOp.getOperation(i);
   // skip this mutation if we aren't enabling indexing
@@ -436,10 +436,9 @@ public class Indexer implements RegionObserver, 
RegionCoprocessor {
   // Unless we're replaying edits to rebuild the index, we 
update the time stamp
   // of the data table to prevent overlapping time stamps 
(which prevents index
   // inconsistencies as this case isn't handled correctly 
currently).
-  for (List family : m.getFamilyCellMap().values()) {
-  List familyKVs = 
KeyValueUtil.ensureKeyValues(family);
-  for (KeyValue kv : familyKVs) {
-  setTimeStamp(kv, byteNow);
+  for (List cells : m.getFamilyCellMap().values()) {
+  for (Cell cell : cells) {
+  CellUtil.setTimestamp(cell, now);
   }
   }
   }



[phoenix] 31/34: PHOENIX-5169 Query logger is still initialized for each query if the log level is off(addendum)

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 8a5879d7d56ae892a649b5b3cd700d346f2681f5
Author: jaanai 
AuthorDate: Sat Mar 30 08:41:44 2019 +

PHOENIX-5169 Query logger is still initialized for each query if the log 
level is off(addendum)
---
 phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java | 3 ---
 1 file changed, 3 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java
index b740d18..208eddd 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java
@@ -51,7 +51,6 @@ import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDriver;
 import org.apache.phoenix.jdbc.PhoenixResultSet;
-import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.log.LogLevel;
 import org.apache.phoenix.log.QueryLogger;
 import org.apache.phoenix.log.QueryStatus;
@@ -65,8 +64,6 @@ import org.junit.Test;
 
 import com.google.common.collect.Maps;
 
-import javax.validation.constraints.AssertFalse;
-import javax.validation.constraints.AssertTrue;
 
 public class QueryLoggerIT extends BaseUniqueNamesOwnClusterIT {
 



[phoenix] 18/34: PHOENIX-5185 support Math PI function (#461)

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit a26f6fc4ca6f60bccfd94733b3057efdfeae35fa
Author: Xinyi Yan 
AuthorDate: Thu Mar 14 17:45:45 2019 +

PHOENIX-5185 support Math PI function (#461)
---
 .../phoenix/end2end/LnLogFunctionEnd2EndIT.java| 16 --
 .../phoenix/end2end/MathPIFunctionEnd2EndIT.java   | 61 
 .../phoenix/end2end/PowerFunctionEnd2EndIT.java| 16 --
 .../apache/phoenix/expression/ExpressionType.java  |  1 +
 .../expression/function/MathPIFunction.java| 65 ++
 .../apache/phoenix/expression/ExpFunctionTest.java | 19 +--
 .../phoenix/expression/LnLogFunctionTest.java  | 23 ++--
 .../phoenix/expression/MathPIFunctionTest.java | 44 +++
 .../phoenix/expression/PowerFunctionTest.java  | 22 ++--
 .../phoenix/expression/SqrtFunctionTest.java   | 20 +--
 .../java/org/apache/phoenix/query/BaseTest.java| 17 +-
 11 files changed, 199 insertions(+), 105 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/LnLogFunctionEnd2EndIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LnLogFunctionEnd2EndIT.java
index ddbe2ad..d3d1b51 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/LnLogFunctionEnd2EndIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LnLogFunctionEnd2EndIT.java
@@ -36,25 +36,9 @@ import org.junit.Test;
 public class LnLogFunctionEnd2EndIT extends ParallelStatsDisabledIT {
 
 private static final String KEY = "key";
-private static final double ZERO = 1e-9;
 private String signedTableName;
 private String unsignedTableName;
 
-private static boolean twoDoubleEquals(double a, double b) {
-if (Double.isNaN(a) ^ Double.isNaN(b)) return false;
-if (Double.isNaN(a)) return true;
-if (Double.isInfinite(a) ^ Double.isInfinite(b)) return false;
-if (Double.isInfinite(a)) {
-if ((a > 0) ^ (b > 0)) return false;
-else return true;
-}
-if (Math.abs(a - b) <= ZERO) {
-return true;
-} else {
-return false;
-}
-}
-
 @Before
 public void initTable() throws Exception {
 Connection conn = null;
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MathPIFunctionEnd2EndIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MathPIFunctionEnd2EndIT.java
new file mode 100644
index 000..9594aec
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MathPIFunctionEnd2EndIT.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.junit.Assert.*;
+
+import java.sql.*;
+
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.expression.function.MathPIFunction;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * End to end tests for {@link MathPIFunction}
+ */
+public class MathPIFunctionEnd2EndIT extends ParallelStatsDisabledIT {
+
+@Test
+public void testGetMathPIValue() throws Exception {
+Connection conn  = DriverManager.getConnection(getUrl());
+ResultSet rs = conn.createStatement().executeQuery("SELECT PI()");
+assertTrue(rs.next());
+assertTrue(twoDoubleEquals(rs.getDouble(1), Math.PI));
+assertFalse(rs.next());
+}
+
+@Test
+public void testMathPIRoundTwoDecimal() throws Exception {
+Connection conn  = DriverManager.getConnection(getUrl());
+ResultSet rs = conn.createStatement().executeQuery("SELECT ROUND(PI(), 
2)");
+assertTrue(rs.next());
+assertTrue(twoDoubleEquals(rs.getDouble(1), 3.14));
+assertFalse(rs.next());
+}
+
+@Test
+public void testMathPIFunctionWithIncorrectFormat() throws Exception {
+Connection conn  = DriverManager.getConnection(getUrl());
+try {
+conn.createStatement().executeQuery("SELECT PI(1)&

[phoenix] 19/34: PHOENIX-5131 Make spilling to disk for order/group by configurable

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit b6ce5c0c045afac00ce4af7e953516e74a8fe12f
Author: Abhishek Singh Chouhan 
AuthorDate: Thu Mar 14 00:34:37 2019 +

PHOENIX-5131 Make spilling to disk for order/group by configurable

Signed-off-by: Chinmay Kulkarni 
---
 .../java/org/apache/phoenix/end2end/OrderByIT.java |  45 +++
 ...OrderByWithServerClientSpoolingDisabledIT.java} |  17 ++-
 .../end2end/OrderByWithServerMemoryLimitIT.java|  81 
 .../phoenix/end2end/OrderByWithSpillingIT.java |   3 +-
 .../phoenix/end2end/SpooledTmpFileDeleteIT.java|   2 +-
 .../end2end/join/SortMergeJoinNoSpoolingIT.java|  83 +
 .../phoenix/coprocessor/MetaDataProtocol.java  |   7 ++
 .../phoenix/coprocessor/ScanRegionObserver.java|   4 +-
 .../org/apache/phoenix/execute/AggregatePlan.java  |  28 -
 .../phoenix/execute/ClientAggregatePlan.java   |  30 -
 .../org/apache/phoenix/execute/ClientScanPlan.java |  16 ++-
 .../java/org/apache/phoenix/execute/ScanPlan.java  |  10 +-
 .../apache/phoenix/execute/SortMergeJoinPlan.java  | 138 +
 .../phoenix/hbase/index/util/VersionUtil.java  |  12 ++
 .../org/apache/phoenix/iterate/BufferedQueue.java  |  20 +--
 .../phoenix/iterate/BufferedSortedQueue.java   |  33 +
 .../apache/phoenix/iterate/BufferedTupleQueue.java | 134 
 .../iterate/NonAggregateRegionScannerFactory.java  |  45 +--
 .../iterate/OrderedAggregatingResultIterator.java  |   5 +-
 .../phoenix/iterate/OrderedResultIterator.java |  72 +--
 .../org/apache/phoenix/iterate/PhoenixQueues.java  |  96 ++
 .../apache/phoenix/iterate/SizeAwareQueue.java}|  19 +--
 .../org/apache/phoenix/iterate/SizeBoundQueue.java |  96 ++
 .../phoenix/iterate/SpoolingResultIterator.java|   5 +-
 .../org/apache/phoenix/query/QueryServices.java|  11 +-
 .../apache/phoenix/query/QueryServicesOptions.java |  19 ++-
 .../phoenix/iterate/OrderedResultIteratorTest.java |  55 +++-
 .../phoenix/query/QueryServicesTestImpl.java   |   3 +-
 .../org/apache/phoenix/util/MetaDataUtilTest.java  |  10 +-
 29 files changed, 880 insertions(+), 219 deletions(-)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrderByIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrderByIT.java
index 792d08f..172ed89 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrderByIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrderByIT.java
@@ -20,7 +20,10 @@ package org.apache.phoenix.end2end;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.hamcrest.CoreMatchers.containsString;
 
 import java.sql.Connection;
 import java.sql.Date;
@@ -30,6 +33,8 @@ import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.util.Properties;
 
+import org.apache.phoenix.exception.PhoenixIOException;
+import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.junit.Test;
 
@@ -461,4 +466,44 @@ public class OrderByIT extends BaseOrderByIT {
 conn.close();
 }
 }
+
+@Test
+public void testOrderByWithClientMemoryLimit() throws Exception {
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+props.put(QueryServices.CLIENT_SPOOL_THRESHOLD_BYTES_ATTRIB, 
Integer.toString(1));
+props.put(QueryServices.CLIENT_ORDERBY_SPOOLING_ENABLED_ATTRIB,
+Boolean.toString(Boolean.FALSE));
+
+try(Connection conn = DriverManager.getConnection(getUrl(), props)) {
+conn.setAutoCommit(false);
+String tableName = generateUniqueName();
+String ddl =
+"CREATE TABLE " + tableName + "  (a_string varchar not 
null, col1 integer"
++ "  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+createTestTable(getUrl(), ddl);
+
+String dml = "UPSERT INTO " + tableName + " VALUES(?, ?)";
+PreparedStatement stmt = conn.prepareStatement(dml);
+stmt.setString(1, "a");
+stmt.setInt(2, 40);
+stmt.execute();
+stmt.setString(1, "b");
+stmt.setInt(2, 20);
+stmt.execute();
+stmt.setString(1, "c");
+stmt.setInt(2, 30);
+stmt.execute();
+conn.commit();
+
+String query = "select count(*), col1 from " + tableName + " group 
by col1 order 

[phoenix] 03/34: PHOENIX-5141 Use HBaseFactoryProvider.getConfigurationFactory to get the config in PhoenixRDD (addendum)

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit d066f400f8fab830597a1b5a37d1d647705aa70f
Author: Thomas D'Silva 
AuthorDate: Fri Mar 1 00:52:36 2019 +

PHOENIX-5141 Use HBaseFactoryProvider.getConfigurationFactory to get the 
config in PhoenixRDD (addendum)
---
 .../main/scala/org/apache/phoenix/spark/ConfigurationUtil.scala   | 7 +--
 .../main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala  | 8 
 2 files changed, 9 insertions(+), 6 deletions(-)

diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/ConfigurationUtil.scala 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/ConfigurationUtil.scala
index d555954..9377986 100644
--- 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/ConfigurationUtil.scala
+++ 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/ConfigurationUtil.scala
@@ -17,6 +17,7 @@ import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.hbase.{HBaseConfiguration, HConstants}
 import org.apache.phoenix.jdbc.PhoenixEmbeddedDriver
 import org.apache.phoenix.mapreduce.util.{ColumnInfoToStringEncoderDecoder, 
PhoenixConfigurationUtil}
+import org.apache.phoenix.query.HBaseFactoryProvider
 import org.apache.phoenix.util.{ColumnInfo, PhoenixRuntime}
 
 import scala.collection.JavaConversions._
@@ -28,8 +29,8 @@ object ConfigurationUtil extends Serializable {
 
 // Create an HBaseConfiguration object from the passed in config, if 
present
 val config = conf match {
-  case Some(c) => HBaseConfiguration.create(c)
-  case _ => HBaseConfiguration.create()
+  case Some(c) => 
HBaseFactoryProvider.getConfigurationFactory.getConfiguration(c)
+  case _ => HBaseFactoryProvider.getConfigurationFactory.getConfiguration()
 }
 
 // Set the tenantId in the config if present
@@ -41,6 +42,8 @@ object ConfigurationUtil extends Serializable {
 // Set the table to save to
 PhoenixConfigurationUtil.setOutputTableName(config, tableName)
 PhoenixConfigurationUtil.setPhysicalTableName(config, tableName)
+// disable property provider evaluation
+PhoenixConfigurationUtil.setPropertyPolicyProviderDisabled(config);
 
 // Infer column names from the DataFrame schema
 PhoenixConfigurationUtil.setUpsertColumnNames(config, Array(columns : _*))
diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
index 3b0289d..85a6d8a 100644
--- 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
+++ 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
@@ -28,7 +28,7 @@ class DataFrameFunctions(data: DataFrame) extends 
Serializable {
saveToPhoenix(parameters("table"), zkUrl = 
parameters.get("zkUrl"), tenantId = parameters.get("TenantId"), 

skipNormalizingIdentifier=parameters.contains("skipNormalizingIdentifier"))
}
-  def saveToPhoenix(tableName: String, conf: Configuration = new Configuration,
+  def saveToPhoenix(tableName: String, conf: Option[Configuration] = None,
 zkUrl: Option[String] = None, tenantId: Option[String] = 
None, skipNormalizingIdentifier: Boolean = false): Unit = {
 
 // Retrieve the schema field names and normalize to Phoenix, need to do 
this outside of mapPartitions
@@ -36,7 +36,7 @@ class DataFrameFunctions(data: DataFrame) extends 
Serializable {
 
 
 // Create a configuration object to use for saving
-@transient val outConfig = 
ConfigurationUtil.getOutputConfiguration(tableName, fieldArray, zkUrl, 
tenantId, Some(conf))
+@transient val outConfig = 
ConfigurationUtil.getOutputConfiguration(tableName, fieldArray, zkUrl, 
tenantId, conf)
 
 // Retrieve the zookeeper URL
 val zkUrlFinal = ConfigurationUtil.getZookeeperURL(outConfig)
@@ -45,9 +45,9 @@ class DataFrameFunctions(data: DataFrame) extends 
Serializable {
 val phxRDD = data.rdd.mapPartitions{ rows =>
  
// Create a within-partition config to retrieve the ColumnInfo list
-   @transient val partitionConfig = 
ConfigurationUtil.getOutputConfiguration(tableName, fieldArray, zkUrlFinal, 
tenantId)
+   @transient val partitionConfig = ConfigurationUtil.getOutputCon 
figuration(tableName, fieldArray, zkUrlFinal, tenantId)
@transient val columns = 
PhoenixConfigurationUtil.getUpsertColumnMetadataList(partitionConfig).toList
- 
+
rows.map { row =>
  val rec = new PhoenixRecordWritable(columns)
  row.toSeq.foreach { e => rec.add(e) }



[phoenix] 16/34: PHOENIX-5188 IndexedKeyValue should populate KeyValue fields

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit ae45a00616f0efc8be7828fe0d833014f9d7c1cc
Author: Geoffrey Jacoby 
AuthorDate: Tue Mar 12 18:17:50 2019 +

PHOENIX-5188 IndexedKeyValue should populate KeyValue fields
---
 .../phoenix/hbase/index/wal/IndexedKeyValue.java   | 25 
 .../regionserver/wal/IndexedKeyValueTest.java  | 66 ++
 2 files changed, 91 insertions(+)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
index f2b3b98..025dcc8 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
@@ -53,6 +53,7 @@ public class IndexedKeyValue extends KeyValue {
 public IndexedKeyValue() {}
 
 public IndexedKeyValue(byte[] bs, Mutation mutation) {
+super(mutation.getRow(), 0, mutation.getRow().length);
 this.indexTableName = new ImmutableBytesPtr(bs);
 this.mutation = mutation;
 this.hashCode = calcHashCode(indexTableName, mutation);
@@ -109,6 +110,24 @@ public class IndexedKeyValue extends KeyValue {
 }
 
 @Override
+public int getRowOffset() {
+return this.offset;
+}
+
+@Override
+public short getRowLength() {
+return (short) this.length;
+}
+
+@Override
+public int getKeyLength(){
+//normally the key is row key + other key fields such as timestamp,
+// but those aren't defined here because a Mutation can contain 
multiple,
+// so we just return the length of the row key
+return this.length;
+}
+
+@Override
 public String toString() {
 return "IndexWrite:\n\ttable: " + indexTableName + "\n\tmutation:" + 
mutation;
 }
@@ -171,6 +190,12 @@ public class IndexedKeyValue extends KeyValue {
 MutationProto mProto = MutationProto.parseFrom(mutationData);
 this.mutation = 
org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation(mProto);
 this.hashCode = calcHashCode(indexTableName, mutation);
+if (mutation != null){
+bytes = mutation.getRow();
+offset = 0;
+length = bytes.length;
+}
+
 }
 
 public boolean getBatchFinished() {
diff --git 
a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/IndexedKeyValueTest.java
 
b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/IndexedKeyValueTest.java
new file mode 100644
index 000..009c323
--- /dev/null
+++ 
b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/IndexedKeyValueTest.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver.wal;
+
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WALEdit;
+import org.apache.phoenix.hbase.index.wal.IndexedKeyValue;
+import org.apache.phoenix.hbase.index.wal.KeyValueCodec;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+
+public class IndexedKeyValueTest {
+
+@Test
+public void testIndexedKeyValuePopulatesKVFields() throws Exception {
+byte[] row = Bytes.toBytes("foo");
+byte[] tableNameBytes = Bytes.toBytes("MyTableName");
+Mutation mutation = new Put(row);
+IndexedKeyValue indexedKeyValue = new IndexedKeyValue(tableNameBytes, 
mutation);
+testIndexedKeyValueHelper(indexedKeyValue, row, tableNameBytes, 
mutation);
+
+//now serialize the IndexedKeyValue and make sure the deserialized 
copy also
+//has all the right fields
+ByteArrayOutputStream baos = new ByteArrayOutputStream();
+DataOutputStream out = 

[phoenix] 10/34: PHOENIX-4929 IndexOutOfBoundsException when casting timestamp to date

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 4cb9269404d6ccbee440380d6e58db783b19b552
Author: Xinyi Yan 
AuthorDate: Tue Mar 5 06:24:36 2019 +

PHOENIX-4929 IndexOutOfBoundsException when casting timestamp to date
---
 .../org/apache/phoenix/end2end/DateTimeIT.java | 28 ++
 .../apache/phoenix/compile/ExpressionCompiler.java | 13 ++
 .../apache/phoenix/compile/QueryCompilerTest.java  |  7 ++
 3 files changed, 44 insertions(+), 4 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
index eecc540..df94a70 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
@@ -51,6 +51,7 @@ import java.sql.Time;
 import java.sql.Timestamp;
 import java.sql.Types;
 import java.text.Format;
+import java.time.LocalDate;
 import java.util.Calendar;
 import java.util.GregorianCalendar;
 import java.util.Properties;
@@ -1823,6 +1824,33 @@ public class DateTimeIT extends ParallelStatsDisabledIT {
 }
 }
 
+@Test
+public void testCastTimeStampToDate() throws Exception {
+String tablename = generateUniqueName();
+String ddl = "CREATE TABLE IF NOT EXISTS " + tablename +
+" (PK INTEGER PRIMARY KEY, A_TIMESTAMP TIMESTAMP)";
+Properties props = new Properties();
+props.setProperty("phoenix.query.dateFormatTimeZone", 
TimeZone.getDefault().toString());
+Connection conn = DriverManager.getConnection(getUrl(), props);
+conn.createStatement().execute(ddl);
+
+String localTime = LocalDate.now().toString();
+conn.createStatement().execute("UPSERT INTO " + tablename +
+" VALUES(1,TO_TIMESTAMP('"+ localTime + "'))");
+
+conn.setAutoCommit(true);
+try {
+PreparedStatement statement =
+conn.prepareStatement("SELECT CAST(A_TIMESTAMP AS DATE) as 
A_DATE FROM " + tablename);
+
+ResultSet rs = statement.executeQuery();
+assertTrue (rs.next());
+assertTrue (rs.getString(1).contains(localTime));
+assertFalse (rs.next());
+} finally {
+conn.close();
+}
+}
 
 @Test
 public void testTimestamp() throws Exception {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
index 077e1af..980c1b9 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
@@ -586,11 +586,16 @@ public class ExpressionCompiler extends 
UnsupportedAllParseNodeVisitor binds = Collections.emptyList();
 compileQuery(query, binds);
 }
+
+@Test
+public void testCastingTimestampToDateInSelect() throws Exception {
+String query = "SELECT CAST (a_timestamp AS DATE) FROM aTable";
+List binds = Collections.emptyList();
+compileQuery(query, binds);
+}
 
 @Test
 public void testCastingStringToDecimalInSelect() throws Exception {



[phoenix] 13/34: PHOENIX-5182: Deprecate getTotalSum API of the GlobalMetric interface

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 3b529025478c2fe06181f724fb96ea69cdc4f9b9
Author: Chinmay Kulkarni 
AuthorDate: Fri Mar 8 00:43:53 2019 +

PHOENIX-5182: Deprecate getTotalSum API of the GlobalMetric interface
---
 .../apache/phoenix/execute/PartialCommitIT.java|   3 +-
 .../monitoring/PhoenixMetricsDisabledIT.java   |   2 +-
 .../phoenix/monitoring/PhoenixMetricsIT.java   | 100 ++---
 .../apache/phoenix/monitoring/GlobalMetric.java|   1 +
 4 files changed, 53 insertions(+), 53 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
index 2b0c8b9..27f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/execute/PartialCommitIT.java
@@ -52,7 +52,6 @@ import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
 import org.apache.phoenix.execute.MutationState.MultiRowMutationState;
 import org.apache.phoenix.hbase.index.Indexer;
-import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.monitoring.GlobalMetric;
 import org.apache.phoenix.monitoring.MetricType;
@@ -249,7 +248,7 @@ public class PartialCommitIT extends 
BaseUniqueNamesOwnClusterIT {
 assertArrayEquals(expectedUncommittedStatementIndexes, 
uncommittedStatementIndexes);
 Map> mutationWriteMetrics = 
PhoenixRuntime.getWriteMetricInfoForMutationsSinceLastReset(con);
 assertEquals(expectedUncommittedStatementIndexes.length, 
mutationWriteMetrics.get(bFailureTable).get(MUTATION_BATCH_FAILED_SIZE).intValue());
-assertEquals(expectedUncommittedStatementIndexes.length, 
GLOBAL_MUTATION_BATCH_FAILED_COUNT.getMetric().getTotalSum());
+assertEquals(expectedUncommittedStatementIndexes.length, 
GLOBAL_MUTATION_BATCH_FAILED_COUNT.getMetric().getValue());
 }
 
 
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsDisabledIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsDisabledIT.java
index 85cf1a3..1efbc46 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsDisabledIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsDisabledIT.java
@@ -72,7 +72,7 @@ public class PhoenixMetricsDisabledIT extends 
BaseUniqueNamesOwnClusterIT {
 public void testResetGlobalPhoenixMetrics() {
 for (GlobalMetric m : PhoenixRuntime.getGlobalPhoenixClientMetrics()) {
 assertThat(m, 
CoreMatchers.instanceOf(NoOpGlobalMetricImpl.class));
-assertEquals(NO_VALUE, m.getTotalSum());
+assertEquals(NO_VALUE, m.getValue());
 assertEquals(NO_SAMPLES, m.getNumberOfSamples());
 }
 }
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
index 0764ff7..e00fab3 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
@@ -96,7 +96,7 @@ public class PhoenixMetricsIT extends BasePhoenixMetricsIT {
 resetGlobalMetrics();
 for (GlobalMetric m : PhoenixRuntime.getGlobalPhoenixClientMetrics()) {
 assertThat(m, 
CoreMatchers.instanceOf(GlobalMetricImpl.class));
-assertEquals(0, m.getTotalSum());
+assertEquals(0, m.getValue());
 assertEquals(0, m.getNumberOfSamples());
 }
 assertTrue(verifyMetricsFromSink());
@@ -114,25 +114,25 @@ public class PhoenixMetricsIT extends 
BasePhoenixMetricsIT {
 rs.getString(1);
 rs.getString(2);
 }
-assertEquals(1, GLOBAL_NUM_PARALLEL_SCANS.getMetric().getTotalSum());
-assertEquals(1, GLOBAL_SELECT_SQL_COUNTER.getMetric().getTotalSum());
-assertEquals(0, 
GLOBAL_REJECTED_TASK_COUNTER.getMetric().getTotalSum());
-assertEquals(0, 
GLOBAL_QUERY_TIMEOUT_COUNTER.getMetric().getTotalSum());
-assertEquals(0, GLOBAL_FAILED_QUERY_COUNTER.getMetric().getTotalSum());
-assertEquals(0, GLOBAL_SPOOL_FILE_COUNTER.getMetric().getTotalSum());
-assertEquals(0, GLOBAL_MUTATION_BATCH_SIZE.getMetric().getTotalSum());
-assertEquals(0, GLOBAL_MUTATION_BYTES.getMetric().getTotalSum());
-assertEquals(0, 
GLOBAL_MUTATION_BATCH_FAILED_COUNT.getMetric().getTotalSum());
-
-assertTrue(GLOBAL_SCAN_BYTES.getMetric().getTotalSum() > 0);
-asse

[phoenix] 14/34: PHOENIX-5178 SYSTEM schema is not getting cached at MetaData server

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 146897938e08967f1f59360eac0445bff4d4266f
Author: Ankit Singhal 
AuthorDate: Tue Mar 12 00:09:36 2019 +

PHOENIX-5178 SYSTEM schema is not getting cached at MetaData server
---
 .../phoenix/end2end/SystemTablePermissionsIT.java  | 43 ++
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |  2 +-
 2 files changed, 44 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java
index 0788ed7..6da970b 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemTablePermissionsIT.java
@@ -17,12 +17,23 @@
 package org.apache.phoenix.end2end;
 
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.security.PrivilegedExceptionAction;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.Statement;
 import java.util.Collections;
+import java.util.Properties;
 import java.util.Set;
 
 import org.apache.hadoop.hbase.security.access.Permission.Action;
+import org.apache.phoenix.coprocessor.MetaDataProtocol;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.NewerSchemaAlreadyExistsException;
+import org.apache.phoenix.schema.NewerTableAlreadyExistsException;
+import org.apache.phoenix.util.PhoenixRuntime;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -84,5 +95,37 @@ public class SystemTablePermissionsIT extends 
BasePermissionsIT {
 
 // Make sure that the unprivileged user can now read the table
 verifyAllowed(readTable(TABLE_NAME), regularUser1);
+//This verification is added to test PHOENIX-5178
+superUser1.runAs(new PrivilegedExceptionAction() {
+@Override public Void run() throws Exception {
+try {
+if (isNamespaceMapped) {
+grantPermissions(regularUser1.getShortName(),"SYSTEM", 
Action.ADMIN);
+}
+return null;
+} catch (Throwable e) {
+throw new Exception(e);
+}
+
+}
+});
+if(isNamespaceMapped) {
+verifyAllowed(new AccessTestAction() {
+@Override public Object run() throws Exception {
+Properties props = new Properties();
+
props.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, 
Boolean.toString(isNamespaceMapped));
+props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP));
+//Impersonate meta connection
+try (Connection metaConnection = 
DriverManager.getConnection(getUrl(), props);
+Statement stmt = metaConnection.createStatement()) {
+stmt.executeUpdate("CREATE SCHEMA IF NOT EXISTS 
SYSTEM");
+}catch(NewerSchemaAlreadyExistsException e){
+
+}
+return null;
+}
+}, regularUser1);
+}
 }
+
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 0b95b26..06d36d9 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -1088,7 +1088,7 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements RegionCopr
 keyRanges.add(PVarbinary.INSTANCE.getKeyRange(key, true, stopKey, 
false));
 }
 Scan scan = new Scan();
-scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp);
+scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp + 1);
 ScanRanges scanRanges = ScanRanges.createPointLookup(keyRanges);
 scanRanges.initializeScan(scan);
 scan.setFilter(scanRanges.getSkipScanFilter());



[phoenix] 02/34: PHOENIX-374 Enable access to dynamic columns in * or cf.* selection (Addendum)

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit aef34e6a1b4e2dda28a0cd9c03249446ff530e9f
Author: Chinmay Kulkarni 
AuthorDate: Thu Feb 28 23:47:42 2019 +

PHOENIX-374 Enable access to dynamic columns in * or cf.* selection 
(Addendum)
---
 phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index a7936e0..cd961da 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -1291,7 +1291,9 @@ public class PTableImpl implements PTable {
 }
 String fam = Bytes.toString(family);
 if (column.isDynamic()) {
-this.colFamToDynamicColumnsMapping.putIfAbsent(fam, new 
ArrayList<>());
+if (!this.colFamToDynamicColumnsMapping.containsKey(fam)) {
+this.colFamToDynamicColumnsMapping.put(fam, new 
ArrayList<>());
+}
 this.colFamToDynamicColumnsMapping.get(fam).add(column);
 }
 }



[phoenix] 04/34: PHOENIX-5141 Use HBaseFactoryProvider.getConfigurationFactory (addendum)

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit c9447c68bfc9be751d7ccffa880a8e981624b916
Author: Thomas D'Silva 
AuthorDate: Fri Mar 1 01:14:29 2019 +

PHOENIX-5141 Use HBaseFactoryProvider.getConfigurationFactory (addendum)
---
 .../src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
index 85a6d8a..ac3993a 100644
--- 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
+++ 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/DataFrameFunctions.scala
@@ -45,7 +45,7 @@ class DataFrameFunctions(data: DataFrame) extends 
Serializable {
 val phxRDD = data.rdd.mapPartitions{ rows =>
  
// Create a within-partition config to retrieve the ColumnInfo list
-   @transient val partitionConfig = ConfigurationUtil.getOutputCon 
figuration(tableName, fieldArray, zkUrlFinal, tenantId)
+   @transient val partitionConfig = 
ConfigurationUtil.getOutputConfiguration(tableName, fieldArray, zkUrlFinal, 
tenantId)
@transient val columns = 
PhoenixConfigurationUtil.getUpsertColumnMetadataList(partitionConfig).toList
 
rows.map { row =>



[phoenix] 26/34: PHOENIX-5180 Add API to PhoenixRunTime to get ptable of a tenant using a global connection

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 4a7cc097d82ed7d6f12d22e3c36b127c5cbc3e39
Author: Abhishek Singh Chouhan 
AuthorDate: Fri Mar 15 22:50:37 2019 +

PHOENIX-5180 Add API to PhoenixRunTime to get ptable of a tenant using a 
global connection
---
 .../end2end/GlobalConnectionTenantTableIT.java | 188 +
 .../org/apache/phoenix/schema/MetaDataClient.java  |   2 +-
 .../org/apache/phoenix/util/PhoenixRuntime.java|  68 +++-
 3 files changed, 252 insertions(+), 6 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/GlobalConnectionTenantTableIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/GlobalConnectionTenantTableIT.java
new file mode 100644
index 000..d0c890c
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/GlobalConnectionTenantTableIT.java
@@ -0,0 +1,188 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.query.BaseTest;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.google.common.collect.Maps;
+
+public class GlobalConnectionTenantTableIT extends BaseTest {
+
+private static final String SCHEMA_NAME = "SCHEMA1";
+private static final String TABLE_NAME = generateUniqueName();
+private static final String TENANT_NAME = "TENANT_A";
+private static final String VIEW_NAME = "VIEW1";
+private static final String INDEX_NAME = "INDEX1";
+private static final String VIEW_INDEX_COL = "v2";
+private static final String FULL_VIEW_NAME = 
SchemaUtil.getTableName(SCHEMA_NAME, VIEW_NAME);
+private static final String FULL_INDEX_NAME = 
SchemaUtil.getTableName(SCHEMA_NAME, INDEX_NAME);
+
+@BeforeClass
+public static void doSetup() throws Exception {
+Map props = Maps.newHashMapWithExpectedSize(1);
+setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+createBaseTable(SCHEMA_NAME, TABLE_NAME, true, null, null);
+try (Connection conn = getTenantConnection(TENANT_NAME)) {
+createView(conn, SCHEMA_NAME, VIEW_NAME, TABLE_NAME);
+createViewIndex(conn, SCHEMA_NAME, INDEX_NAME, VIEW_NAME, 
VIEW_INDEX_COL);
+}
+}
+
+@Test
+public void testGetLatestTenantTable() throws SQLException {
+try (Connection conn = getConnection()) {
+PTable table = PhoenixRuntime.getTable(conn, TENANT_NAME, 
FULL_VIEW_NAME, null);
+assertNotNull(table);
+table = null;
+table = PhoenixRuntime.getTable(conn, TENANT_NAME, 
FULL_INDEX_NAME, null);
+assertNotNull(table);
+}
+}
+
+@Test
+public void testGetTenantViewAtTimestamp() throws SQLException {
+long startTime = EnvironmentEdgeManager.currentTimeMillis();
+try (Connection conn = getConnection()) {
+PTable table = PhoenixRuntime.getTable(conn, TENANT_NAME, 
FULL_VIEW_NAME, null);
+long tableTimestamp = table.getTimeStamp();
+// Alter table
+try (Connection tenantConn = getTenantConnection(TENANT_NAME)) {
+String alterView = "ALTER VIEW " + FULL_VIEW_NAME + " ADD 
new_col INTEGER";
+tenantConn.createStatement().execute(alterView);
+}
+// Get the altered table and verify

[phoenix] 33/34: PHOENIX-5190 Implement TaskRegionObserver for Index rebuild

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 8cedf240d4fcf3188bc43ef19511d58aaca42119
Author: Gokcen Iskender 
AuthorDate: Wed Mar 6 17:58:21 2019 +

PHOENIX-5190 Implement TaskRegionObserver for Index rebuild

Signed-off-by: Geoffrey Jacoby 
---
 .../phoenix/end2end/DropTableWithViewsIT.java  |  37 ++-
 .../apache/phoenix/end2end/IndexRebuildTaskIT.java | 176 ++
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |   3 +-
 .../phoenix/coprocessor/TaskRegionObserver.java| 293 
 .../apache/phoenix/coprocessor/tasks/BaseTask.java |  17 +
 .../coprocessor/tasks/DropChildViewsTask.java  |  81 +
 .../coprocessor/tasks/IndexRebuildTask.java| 151 +
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java  |   5 +
 .../apache/phoenix/mapreduce/index/IndexTool.java  |  58 +++-
 .../index/PhoenixIndexImportDirectReducer.java |  43 +++
 .../phoenix/query/ConnectionQueryServicesImpl.java |  30 +-
 .../org/apache/phoenix/query/QueryConstants.java   |   6 +
 .../java/org/apache/phoenix/schema/PTable.java |  26 +-
 .../java/org/apache/phoenix/schema/task/Task.java  | 369 +
 14 files changed, 1115 insertions(+), 180 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java
index a4cd354..6e1f8aa 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DropTableWithViewsIT.java
@@ -18,12 +18,16 @@
 package org.apache.phoenix.end2end;
 
 import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertFalse;
 
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Timestamp;
 import java.util.Arrays;
 import java.util.Collection;
 
@@ -103,6 +107,9 @@ public class DropTableWithViewsIT extends 
SplitSystemCatalogIT {
 try (Connection conn = DriverManager.getConnection(getUrl());
 Connection viewConn =
 isMultiTenant ? 
DriverManager.getConnection(TENANT_SPECIFIC_URL1) : conn) {
+// Empty the task table first.
+conn.createStatement().execute("DELETE " + " FROM " + 
PhoenixDatabaseMetaData.SYSTEM_TASK_NAME);
+
 String ddlFormat =
 "CREATE TABLE IF NOT EXISTS " + baseTable + "  ("
 + " %s PK2 VARCHAR NOT NULL, V1 VARCHAR, V2 
VARCHAR "
@@ -126,16 +133,14 @@ public class DropTableWithViewsIT extends 
SplitSystemCatalogIT {
 // Run DropChildViewsTask to complete the tasks for dropping child 
views. The depth of the view tree is 2,
 // so we expect that this will be done in two task handling runs 
as each non-root level will be processed
 // in one run
-TaskRegionObserver.DropChildViewsTask task =
-new TaskRegionObserver.DropChildViewsTask(
+TaskRegionObserver.SelfHealingTask task =
+new TaskRegionObserver.SelfHealingTask(
 TaskRegionEnvironment, 
QueryServicesOptions.DEFAULT_TASK_HANDLING_MAX_INTERVAL_MS);
 task.run();
 task.run();
-ResultSet rs = conn.createStatement().executeQuery("SELECT * " +
-" FROM " + PhoenixDatabaseMetaData.SYSTEM_TASK_NAME +
-" WHERE " + PhoenixDatabaseMetaData.TASK_TYPE + " = " +
-PTable.TaskType.DROP_CHILD_VIEWS.getSerializedValue());
-assertFalse(rs.next());
+
+assertTaskColumns(conn, PTable.TaskStatus.COMPLETED.toString(), 
PTable.TaskType.DROP_CHILD_VIEWS, null);
+
 // Views should be dropped by now
 TableName linkTable = 
TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES);
 TableViewFinderResult childViewsResult = new 
TableViewFinderResult();
@@ -147,9 +152,25 @@ public class DropTableWithViewsIT extends 
SplitSystemCatalogIT {
 childViewsResult);
 assertTrue(childViewsResult.getLinks().size() == 0);
 // There should not be any orphan views
-rs = conn.createStatement().executeQuery("SELECT * FROM " + 
PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME +
+ResultSet rs = conn.createStatement().executeQuery("SELECT * FROM 
" + PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME +
  

[phoenix] 23/34: PHOENIX-5184 HBase and Phoenix connection leaks in Indexing code path, OrphanViewTool and PhoenixConfigurationUtil

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 02ec0400a9d7c9fb1bff5fff9d339694b1786195
Author: Chinmay Kulkarni 
AuthorDate: Fri Mar 8 23:31:17 2019 +

PHOENIX-5184 HBase and Phoenix connection leaks in Indexing code path, 
OrphanViewTool and PhoenixConfigurationUtil
---
 .../UngroupedAggregateRegionObserver.java  |  22 +++-
 .../hbase/index/write/RecoveryIndexWriter.java |  30 --
 .../phoenix/mapreduce/AbstractBulkLoadTool.java| 114 +++--
 .../apache/phoenix/mapreduce/OrphanViewTool.java   |  73 -
 .../phoenix/mapreduce/PhoenixRecordWriter.java |  18 +++-
 .../mapreduce/index/DirectHTableWriter.java|  19 +++-
 .../mapreduce/index/IndexScrutinyMapper.java   |  25 -
 .../apache/phoenix/mapreduce/index/IndexTool.java  |  85 +++
 .../index/PhoenixIndexImportDirectMapper.java  |  26 +++--
 .../mapreduce/index/PhoenixIndexImportMapper.java  |  16 +--
 .../index/PhoenixIndexPartialBuildMapper.java  |  25 +++--
 .../mapreduce/util/PhoenixConfigurationUtil.java   |  45 
 12 files changed, 325 insertions(+), 173 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 3be4d36..6b27a88 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -29,6 +29,7 @@ import static 
org.apache.phoenix.schema.stats.StatisticsCollectionRunTracker.CON
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
+import java.io.Closeable;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.IOException;
@@ -54,6 +55,7 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
@@ -475,13 +477,14 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 byte[] deleteCF = null;
 byte[] emptyCF = null;
 Table targetHTable = null;
+Connection targetHConn = null;
 boolean isPKChanging = false;
 ImmutableBytesWritable ptr = new ImmutableBytesWritable();
 if (upsertSelectTable != null) {
 isUpsert = true;
 projectedTable = deserializeTable(upsertSelectTable);
-targetHTable =
-
ConnectionFactory.createConnection(upsertSelectConfig).getTable(
+targetHConn = 
ConnectionFactory.createConnection(upsertSelectConfig);
+targetHTable = targetHConn.getTable(
 
TableName.valueOf(projectedTable.getPhysicalName().getBytes()));
 selectExpressions = 
deserializeExpressions(scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_EXPRS));
 values = new byte[projectedTable.getPKColumns().size()][];
@@ -852,9 +855,8 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 }
 }
 try {
-if (targetHTable != null) {
-targetHTable.close();
-}
+tryClosingResourceSilently(targetHTable);
+tryClosingResourceSilently(targetHConn);
 } finally {
 try {
 innerScanner.close();
@@ -900,6 +902,16 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 
 }
 
+private static void tryClosingResourceSilently(Closeable res) {
+if (res != null) {
+try {
+res.close();
+} catch (IOException e) {
+logger.error("Closing resource: " + res + " failed: ", e);
+}
+}
+}
+
 private void checkForLocalIndexColumnFamilies(Region region,
 List indexMaintainers) throws IOException {
 TableDescriptor tableDesc = region.getTableDescriptor();
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
index a1a917c..fefb812 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
@@ -26,10 +26,9 @@ 

[phoenix] 15/34: PHOENIX-5178 SYSTEM schema is not getting cached at MetaData server(addendum)

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit e612d9759614ee58248b8dbf43329f2a2a4085a7
Author: Ankit Singhal 
AuthorDate: Tue Mar 12 04:54:40 2019 +

PHOENIX-5178 SYSTEM schema is not getting cached at MetaData 
server(addendum)

(cherry picked from commit f969444c96a060a5619e70e543c6d6fb21b32bed)
---
 .../java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java  | 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 06d36d9..192d004 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -1088,7 +1088,12 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements RegionCopr
 keyRanges.add(PVarbinary.INSTANCE.getKeyRange(key, true, stopKey, 
false));
 }
 Scan scan = new Scan();
-scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp + 1);
+if (clientTimeStamp != HConstants.LATEST_TIMESTAMP
+&& clientTimeStamp != HConstants.OLDEST_TIMESTAMP) {
+scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp + 1);
+} else {
+scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp);
+}
 ScanRanges scanRanges = ScanRanges.createPointLookup(keyRanges);
 scanRanges.initializeScan(scan);
 scan.setFilter(scanRanges.getSkipScanFilter());



[phoenix] 12/34: PHOENIX-5175 Separate client settings for disabling server side mutations for upserts and deletes

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit ee22c65506bc693d011cda1bba2cfcfaf695bf5f
Author: Abhishek Singh Chouhan 
AuthorDate: Tue Mar 5 18:45:21 2019 +

PHOENIX-5175 Separate client settings for disabling server side mutations 
for upserts and deletes
---
 .../java/org/apache/phoenix/end2end/DeleteIT.java  | 32 +++
 .../phoenix/end2end/IndexBuildTimestampIT.java |  3 +-
 .../phoenix/end2end/UpsertSelectAutoCommitIT.java  | 15 +++--
 .../org/apache/phoenix/end2end/UpsertSelectIT.java | 65 ++
 .../org/apache/phoenix/compile/DeleteCompiler.java |  4 +-
 .../org/apache/phoenix/compile/UpsertCompiler.java |  4 +-
 .../org/apache/phoenix/query/QueryServices.java|  3 +-
 .../apache/phoenix/query/QueryServicesOptions.java |  3 +-
 8 files changed, 84 insertions(+), 45 deletions(-)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
index 39210fa..505a5ae 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
@@ -90,7 +90,8 @@ public class DeleteIT extends ParallelStatsDisabledIT {
 
 private void testDeleteFilter(boolean autoCommit) throws Exception {
 Properties props = new Properties();
-props.setProperty(QueryServices.ENABLE_SERVER_SIDE_MUTATIONS, 
allowServerSideMutations);
+props.setProperty(QueryServices.ENABLE_SERVER_SIDE_DELETE_MUTATIONS,
+allowServerSideMutations);
 Connection conn = DriverManager.getConnection(getUrl(), props);
 String tableName = initTableValues(conn);
 
@@ -119,7 +120,8 @@ public class DeleteIT extends ParallelStatsDisabledIT {
 
 private void testDeleteByFilterAndRow(boolean autoCommit) throws 
SQLException {
 Properties props = new Properties();
-props.setProperty(QueryServices.ENABLE_SERVER_SIDE_MUTATIONS, 
allowServerSideMutations);
+props.setProperty(QueryServices.ENABLE_SERVER_SIDE_DELETE_MUTATIONS,
+allowServerSideMutations);
 Connection conn = DriverManager.getConnection(getUrl(), props);
 String tableName = initTableValues(conn);
 
@@ -186,7 +188,8 @@ public class DeleteIT extends ParallelStatsDisabledIT {
 
 private void testDeleteRange(boolean autoCommit, boolean createIndex, 
boolean local) throws Exception {
 Properties props = new Properties();
-props.setProperty(QueryServices.ENABLE_SERVER_SIDE_MUTATIONS, 
allowServerSideMutations);
+props.setProperty(QueryServices.ENABLE_SERVER_SIDE_DELETE_MUTATIONS,
+allowServerSideMutations);
 Connection conn = DriverManager.getConnection(getUrl(), props);
 String tableName = initTableValues(conn);
 String indexName = generateUniqueName();
@@ -319,7 +322,8 @@ public class DeleteIT extends ParallelStatsDisabledIT {
 Connection con = null;
 try {
 Properties props = new Properties();
-props.setProperty(QueryServices.ENABLE_SERVER_SIDE_MUTATIONS, 
allowServerSideMutations);
+
props.setProperty(QueryServices.ENABLE_SERVER_SIDE_DELETE_MUTATIONS,
+allowServerSideMutations);
 con = DriverManager.getConnection(getUrl(), props);
 con.setAutoCommit(autoCommit);
 
@@ -413,7 +417,8 @@ public class DeleteIT extends ParallelStatsDisabledIT {
 try {
 boolean autoCommit = false;
 Properties props = new Properties();
-props.setProperty(QueryServices.ENABLE_SERVER_SIDE_MUTATIONS, 
allowServerSideMutations);
+
props.setProperty(QueryServices.ENABLE_SERVER_SIDE_DELETE_MUTATIONS,
+allowServerSideMutations);
 con = DriverManager.getConnection(getUrl(), props);
 con.setAutoCommit(autoCommit);
 
@@ -490,7 +495,8 @@ public class DeleteIT extends ParallelStatsDisabledIT {
 try {
 boolean autoCommit = false;
 Properties props = new Properties();
-props.setProperty(QueryServices.ENABLE_SERVER_SIDE_MUTATIONS, 
allowServerSideMutations);
+
props.setProperty(QueryServices.ENABLE_SERVER_SIDE_DELETE_MUTATIONS,
+allowServerSideMutations);
 con = DriverManager.getConnection(getUrl(), props);
 con.setAutoCommit(autoCommit);
 
@@ -615,7 +621,8 @@ public class DeleteIT extends ParallelStatsDisabledIT {
 Connection con = null;
 try {
 Properties props = new Properties();
-props.setProperty(QueryServices.ENABLE_SERVER_SIDE_MUTATIONS, 
allowServerSideMutations);
+
props.setProperty(QueryServices.ENABLE_SERVER_SIDE_DELETE_MUTATIONS,
+allowServerSideMutations);
 con

[phoenix] 32/34: PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 641939423d6547e4e9c90b1728d23960024a2b11
Author: Karan Mehta 
AuthorDate: Wed Apr 3 19:34:26 2019 +0100

PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE
---
 .../phoenix/iterate/ScanningResultIterator.java| 85 --
 1 file changed, 46 insertions(+), 39 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
index 893eaa2..9a656ee 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
@@ -17,6 +17,17 @@
  */
 package org.apache.phoenix.iterate;
 
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.BYTES_IN_REMOTE_RESULTS_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.BYTES_IN_RESULTS_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.MILLIS_BETWEEN_NEXTS_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.NOT_SERVING_REGION_EXCEPTION_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.REGIONS_SCANNED_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.REMOTE_RPC_CALLS_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.REMOTE_RPC_RETRIES_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.RPC_CALLS_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.RPC_RETRIES_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_BYTES_IN_REMOTE_RESULTS;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_BYTES_REGION_SERVER_RESULTS;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_MILLS_BETWEEN_NEXTS;
@@ -40,6 +51,7 @@ import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
 import org.apache.phoenix.monitoring.CombinableMetric;
+import org.apache.phoenix.monitoring.GlobalClientMetrics;
 import org.apache.phoenix.monitoring.ScanMetricsHolder;
 import org.apache.phoenix.schema.tuple.ResultTuple;
 import org.apache.phoenix.schema.tuple.Tuple;
@@ -47,29 +59,12 @@ import org.apache.phoenix.util.ServerUtil;
 
 public class ScanningResultIterator implements ResultIterator {
 private final ResultScanner scanner;
-private final Scan scan;
 private final ScanMetricsHolder scanMetricsHolder;
 boolean scanMetricsUpdated;
 boolean scanMetricsEnabled;
 
-// These metric names are how HBase refers them
-// Since HBase stores these strings as static final, we are using the same 
here
-static final String RPC_CALLS_METRIC_NAME = "RPC_CALLS";
-static final String REMOTE_RPC_CALLS_METRIC_NAME = "REMOTE_RPC_CALLS";
-static final String MILLIS_BETWEEN_NEXTS_METRIC_NAME = 
"MILLIS_BETWEEN_NEXTS";
-static final String NOT_SERVING_REGION_EXCEPTION_METRIC_NAME = 
"NOT_SERVING_REGION_EXCEPTION";
-static final String BYTES_IN_RESULTS_METRIC_NAME = "BYTES_IN_RESULTS";
-static final String BYTES_IN_REMOTE_RESULTS_METRIC_NAME = 
"BYTES_IN_REMOTE_RESULTS";
-static final String REGIONS_SCANNED_METRIC_NAME = "REGIONS_SCANNED";
-static final String RPC_RETRIES_METRIC_NAME = "RPC_RETRIES";
-static final String REMOTE_RPC_RETRIES_METRIC_NAME = "REMOTE_RPC_RETRIES";
-static final String COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME = "ROWS_SCANNED";
-static final String COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME = 
"ROWS_FILTERED";
-static final String GLOBAL_BYTES_IN_RESULTS_METRIC_NAME = 
"BYTES_IN_RESULTS";
-
 public ScanningResultIterator(ResultScanner scanner, Scan scan, 
ScanMetricsHolder scanMetricsHolder) {
 this.scanner = scanner;
-this.scan = scan;
 this.scanMetricsHolder = scanMetricsHolder;
 scanMetricsUpdated = false;
 scanMetricsEnabled = scan.isScanMetricsEnabled();
@@ -81,24 +76,25 @@ public class ScanningResultIterator implements 
ResultIterator {
 scanner.close();
 }
 
-private static void changeMetric(CombinableMetric metric, Long value) {
+private void changeMetric(CombinableMetric metric, Long value) {
 if(value != null) 

[phoenix] 34/34: PHOENIX-4956 Distribution of Apache Phoenix 5.1 for CDH 6.1 (addendum)

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 1d8240ffeccedfd0b9e4e12b8828eeef09b96513
Author: Pedro Boado 
AuthorDate: Sun Apr 7 18:37:25 2019 +0100

PHOENIX-4956 Distribution of Apache Phoenix 5.1 for CDH 6.1 (addendum)
---
 phoenix-parcel/pom.xml |  12 --
 .../src/build/components/all-common-jars.xml   | 150 -
 2 files changed, 162 deletions(-)

diff --git a/phoenix-parcel/pom.xml b/phoenix-parcel/pom.xml
index 09d0037..332cf40 100644
--- a/phoenix-parcel/pom.xml
+++ b/phoenix-parcel/pom.xml
@@ -140,17 +140,5 @@
   org.apache.phoenix
   phoenix-core
 
-
-  org.apache.phoenix
-  phoenix-flume
-
-
-  org.apache.phoenix
-  phoenix-pig
-
-
-  org.apache.phoenix
-  phoenix-spark
-
   
 
diff --git a/phoenix-parcel/src/build/components/all-common-jars.xml 
b/phoenix-parcel/src/build/components/all-common-jars.xml
index c659ab8..80d2716 100644
--- a/phoenix-parcel/src/build/components/all-common-jars.xml
+++ b/phoenix-parcel/src/build/components/all-common-jars.xml
@@ -36,102 +36,7 @@
   
   0644
 
-
-  ${project.basedir}/../phoenix-server/target
-  ${parcel.folder}/lib/phoenix/
-  
-phoenix-*-server.jar
-  
-  
-*-minimal.jar
-*-sources.jar
-*-tests.jar
-  
-  0644
-
-
-  ${project.basedir}/../phoenix-queryserver/target/
-  ${parcel.folder}/lib/phoenix/
-  
-phoenix-*-queryserver.jar
-  
-  
-*-minimal.jar
-*-sources.jar
-*-tests.jar
-  
-  0644
-
-
-  
${project.basedir}/../phoenix-queryserver-client/target/
-  ${parcel.folder}/lib/phoenix/
-  
-phoenix-*-thin-client.jar
-  
-  
-*-minimal.jar
-*-sources.jar
-*-tests.jar
-  
-  0644
-
-
-  ${project.basedir}/../phoenix-hive/target/
-  ${parcel.folder}/lib/phoenix/
-  
-phoenix-*-hive.jar
-  
-  
-*-minimal.jar
-*-sources.jar
-*-tests.jar
-  
-  0644
-
 
-
-
-  
${project.basedir}/../phoenix-hadoop-compat/target/
-  ${parcel.folder}/lib/phoenix/lib
-  
-phoenix-*.jar
-  
-  
-*-minimal.jar
-*-sources.jar
-*-tests.jar
-  
-  0644
-
-
-  ${project.basedir}/../phoenix-pig/target/
-  ${parcel.folder}/lib/phoenix/lib
-  
-phoenix-pig-*.jar
-  
-  
-*-minimal.jar
-*-sources.jar
-*-tests.jar
-  
-  0644
-
-
-  ${project.basedir}/../phoenix-flume/target/
-  ${parcel.folder}/lib/phoenix/lib
-  
-phoenix-*.jar
-  
-  
-*-minimal.jar
-*-sources.jar
-*-tests.jar
-  
-  0644
-
 
   ${project.basedir}/../phoenix-core/target/
   ${parcel.folder}/lib/phoenix/lib
@@ -146,61 +51,6 @@
   0644
 
 
-  ${project.basedir}/../phoenix-spark/target/
-  ${parcel.folder}/lib/phoenix/lib
-  
-  phoenix-*.jar
-  
-  
-*-javadoc.jar
-*-minimal.jar
-*-sources.jar
-*-tests.jar
-  
-  0644
-
-
-  ${project.basedir}/../phoenix-queryserver/target/
-  ${parcel.folder}/lib/phoenix/lib
-  
-phoenix-queryserver-*.jar
-  
-  
-*-minimal.jar
-*-sources.jar
-*-tests.jar
-  
-  0644
-
-
-  ${project.basedir}/../phoenix-hive/target/
-  ${parcel.folder}/lib/phoenix/lib
-  
-phoenix-hive-*.jar
-  
-  
-*-minimal.jar
-*-sources.jar
-*-tests.jar
-  
-  0644
-
-
-  
${project.basedir}/../phoenix-queryserver-client/target/
-  ${parcel.folder}/lib/phoenix/lib
-  
-phoenix-*.jar
-  
-  
-*-minimal.jar
-*-sources.jar
-*-tests.jar
-
-phoenix-*-thin-client.jar
-  
-  0644
-
-
   ${project.basedir}/../phoenix-pherf/target/
   ${parcel.folder}/lib/phoenix/lib
   



[phoenix] 25/34: PHOENIX-1614 ALTER TABLE ADD IF NOT EXISTS doesn't work as expected

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 2ddfa02128dd653b0091033e416659033c306052
Author: Toshihiro Suzuki 
AuthorDate: Thu Mar 21 07:46:09 2019 +

PHOENIX-1614 ALTER TABLE ADD IF NOT EXISTS doesn't work as expected

(cherry picked from commit 69e5bb0b304a53967cef40b2a4cfc66e69ecaa51)
---
 .../org/apache/phoenix/end2end/AlterTableIT.java   | 43 +++---
 .../org/apache/phoenix/schema/MetaDataClient.java  | 29 +++
 2 files changed, 35 insertions(+), 37 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
index 64f0349..a05132e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
@@ -330,17 +330,15 @@ public class AlterTableIT extends ParallelStatsDisabledIT 
{
 
 }
 
-
 @Test
 public void testAddVarCols() throws Exception {
 Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-Connection conn = DriverManager.getConnection(getUrl(), props);
-conn.setAutoCommit(false);
+try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+conn.setAutoCommit(false);
 
-try {
 String ddl = "CREATE TABLE " + dataTableFullName +
-"  (a_string varchar not null, col1 integer" +
-"  CONSTRAINT pk PRIMARY KEY (a_string)) " + 
tableDDLOptions;
+  "  (a_string varchar not null, col1 integer" +
+  "  CONSTRAINT pk PRIMARY KEY (a_string)) " + tableDDLOptions;
 conn.createStatement().execute(ddl);
 
 String dml = "UPSERT INTO " + dataTableFullName + " VALUES(?)";
@@ -359,16 +357,18 @@ public class AlterTableIT extends ParallelStatsDisabledIT 
{
 assertEquals("b",rs.getString(1));
 assertFalse(rs.next());
 
-
 query = "SELECT * FROM " + dataTableFullName + " WHERE a_string = 
'a' ";
 rs = conn.createStatement().executeQuery(query);
 assertTrue(rs.next());
 assertEquals("a",rs.getString(1));
 
-ddl = "ALTER TABLE " + dataTableFullName + " ADD  c1.col2 VARCHAR  
, c1.col3 integer , c2.col4 integer";
+ddl = "ALTER TABLE " + dataTableFullName + " ADD c1.col2 VARCHAR, 
c1.col3 integer, "
+  + "c2.col4 integer";
 conn.createStatement().execute(ddl);
 
-ddl = "ALTER TABLE " + dataTableFullName + " ADD   col5 integer , 
c1.col2 VARCHAR";
+// If we are adding two columns but one of them already exists, 
the other one should
+// not be added
+ddl = "ALTER TABLE " + dataTableFullName + " ADD col5 integer, 
c1.col2 VARCHAR";
 try {
 conn.createStatement().execute(ddl);
 fail();
@@ -384,10 +384,7 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
 assertTrue(e.getMessage(), e.getMessage().contains("ERROR 504 
(42703): Undefined column."));
 }
 
-ddl = "ALTER TABLE " + dataTableFullName + " ADD IF NOT EXISTS 
col5 integer , c1.col2 VARCHAR";
-conn.createStatement().execute(ddl);
-
-dml = "UPSERT INTO " + dataTableFullName + " VALUES(?,?,?,?,?)";
+dml = "UPSERT INTO " + dataTableFullName + " VALUES(?, ?, ?, ?, 
?)";
 stmt = conn.prepareStatement(dml);
 stmt.setString(1, "c");
 stmt.setInt(2, 100);
@@ -407,9 +404,6 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
 assertEquals(102,rs.getInt(5));
 assertFalse(rs.next());
 
-ddl = "ALTER TABLE " + dataTableFullName + " ADD  col5 integer";
-conn.createStatement().execute(ddl);
-
 query = "SELECT c1.* FROM " + dataTableFullName + " WHERE a_string 
= 'c' ";
 rs = conn.createStatement().executeQuery(query);
 assertTrue(rs.next());
@@ -417,8 +411,16 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
 assertEquals(101,rs.getInt(2));
 assertFalse(rs.next());
 
+// If we are adding two columns with "IF NOT EXISTS" and one of 
them already exists,
+// the other one should be added
+ddl = "ALTER TABLE " + dataTableFullName + " ADD IF NOT EXISTS 
col5 integer, "
+  + "c1.

[phoenix] 24/34: PHOENIX-5172 Harden the PQS canary synth test tool with retry mechanism and more logging

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 5171199733190134d039b1797e606d1e6e36dbf7
Author: Swaroopa Kadam 
AuthorDate: Tue Mar 19 20:39:45 2019 +

PHOENIX-5172 Harden the PQS canary synth test tool with retry mechanism and 
more logging
---
 .../org/apache/phoenix/tool/PhoenixCanaryTool.java | 212 ++--
 .../tool/ParameterizedPhoenixCanaryToolIT.java | 280 +
 .../apache/phoenix/tool/PhoenixCanaryToolTest.java |  53 +---
 .../resources/phoenix-canary-file-sink.properties  |  17 ++
 4 files changed, 378 insertions(+), 184 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java 
b/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java
index 405f54f..865d210 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java
@@ -28,18 +28,20 @@ import 
net.sourceforge.argparse4j.inf.ArgumentParserException;
 import net.sourceforge.argparse4j.inf.Namespace;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.RetryCounter;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import java.io.File;
 import java.io.InputStream;
 import java.sql.Connection;
-import java.sql.DatabaseMetaData;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
-import java.sql.Statement;
+import java.sql.SQLException;
+import java.sql.Timestamp;
 import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.Date;
@@ -49,16 +51,23 @@ import java.util.concurrent.Callable;
 import java.util.concurrent.TimeUnit;
 
 /**
- * A Canary Tool to perform synthetic tests for Query Server
+ * A Canary Tool to perform synthetic tests for Phoenix
+ * It assumes that TEST.PQSTEST or the schema.table passed in the argument
+ * is already present as following command
+ * CREATE TABLE IF NOT EXISTS TEST.PQSTEST (mykey INTEGER NOT NULL
+ * PRIMARY KEY, mycolumn VARCHAR, insert_date TIMESTAMP);
+ *
  */
 public class PhoenixCanaryTool extends Configured implements Tool {
 
 private static String TEST_SCHEMA_NAME = "TEST";
 private static String TEST_TABLE_NAME = "PQSTEST";
 private static String FQ_TABLE_NAME = "TEST.PQSTEST";
-private boolean USE_NAMESPACE = true;
-
+private static Timestamp timestamp;
+private static final int MAX_CONNECTION_ATTEMPTS = 5;
+private final int FIRST_TIME_RETRY_TIMEOUT = 5000;
 private Sink sink = new StdOutSink();
+public static final String propFileName = 
"phoenix-canary-file-sink.properties";
 
 /**
  * Base class for a Canary Test
@@ -97,84 +106,38 @@ public class PhoenixCanaryTool extends Configured 
implements Tool {
 }
 }
 
-/**
- * Test which prepares environment before other tests run
- */
-static class PrepareTest extends CanaryTest {
-void onExecute() throws Exception {
-result.setTestName("prepare");
-Statement statement = connection.createStatement();
-DatabaseMetaData dbm = connection.getMetaData();
-ResultSet tables = dbm.getTables(null, TEST_SCHEMA_NAME, 
TEST_TABLE_NAME, null);
-if (tables.next()) {
-// Drop test Table if exists
-statement.executeUpdate("DROP TABLE IF EXISTS " + 
FQ_TABLE_NAME);
-}
-
-// Drop test schema if exists
-if (TEST_SCHEMA_NAME != null) {
-statement = connection.createStatement();
-statement.executeUpdate("DROP SCHEMA IF EXISTS " + 
TEST_SCHEMA_NAME);
-}
-}
-}
-
-/**
- * Create Schema Test
- */
-static class CreateSchemaTest extends CanaryTest {
-void onExecute() throws Exception {
-result.setTestName("createSchema");
-Statement statement = connection.createStatement();
-statement.executeUpdate("CREATE SCHEMA IF NOT EXISTS " + 
TEST_SCHEMA_NAME);
-}
-}
-
-/**
- * Create Table Test
- */
-static class CreateTableTest extends CanaryTest {
-void onExecute() throws Exception {
-result.setTestName("createTable");
-Statement statement = connection.createStatement();
-// Create Table
-statement.executeUpdate("CREATE TABLE IF NOT EXISTS" + 
FQ_TABLE_NAME + " (mykey " + "INTEGER "
-+ "NOT " + "NULL PRIMARY KEY, " + "mycolumn VARCHAR)");
-}

[phoenix] 11/34: PHOENIX-5122: PHOENIX-4322 breaks client backward compatibility

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 3c2b20b58b357fc252416ba3390e51172ee993a3
Author: Jacob Isaac 
AuthorDate: Wed Feb 27 22:11:55 2019 +

PHOENIX-5122: PHOENIX-4322 breaks client backward compatibility

Signed-off-by: Chinmay Kulkarni 
---
 .../expression/RowValueConstructorExpression.java  | 53 +++---
 1 file changed, 47 insertions(+), 6 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
index 9bb7234..c06bdc8 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
@@ -28,6 +28,7 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 import java.util.Arrays;
+import java.util.BitSet;
 import java.util.List;
 
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -47,13 +48,42 @@ public class RowValueConstructorExpression extends 
BaseCompoundExpression {
 private int partialEvalIndex = -1;
 private int estimatedByteSize;
 
+// The boolean field that indicated the object is a literal constant, 
+// has been repurposed to a bitset and now holds additional information. 
+// This is to facilitate b/w compat to 4.13 clients.
+// @see https://issues.apache.org/jira/browse/PHOENIX-5122;>PHOENIX-5122 
+private BitSet extraFields;
+
+// Important : When you want to add new bits make sure to add those 
towards the end, 
+// else will break b/w compat again.
+private enum ExtraFieldPosition {
+   
+   LITERAL_CONSTANT(0),
+   STRIP_TRAILING_SEPARATOR_BYTE(1);
+   
+   private int bitPosition;
+
+   private ExtraFieldPosition(int position) {
+   bitPosition = position;
+   }
+   
+   private int getBitPosition() {
+   return bitPosition;
+   }
+}
+
 public RowValueConstructorExpression() {
 }
 
 public RowValueConstructorExpression(List children, boolean 
isConstant) {
 super(children);
+extraFields = new BitSet(8);
+   
extraFields.set(ExtraFieldPosition.STRIP_TRAILING_SEPARATOR_BYTE.getBitPosition());
+if (isConstant) {
+   
extraFields.set(ExtraFieldPosition.LITERAL_CONSTANT.getBitPosition());
+}
 estimatedByteSize = 0;
-init(isConstant);
+init();
 }
 
 public RowValueConstructorExpression clone(List children) {
@@ -82,24 +112,34 @@ public class RowValueConstructorExpression extends 
BaseCompoundExpression {
 @Override
 public void readFields(DataInput input) throws IOException {
 super.readFields(input);
-init(input.readBoolean());
+extraFields = BitSet.valueOf(new byte[] {input.readByte()});
+init();
 }
 
 @Override
 public void write(DataOutput output) throws IOException {
 super.write(output);
-output.writeBoolean(literalExprPtr != null);
+byte[] b = extraFields.toByteArray();
+output.writeByte((int)(b.length > 0 ? b[0] & 0xff  : 0));
 }
 
-private void init(boolean isConstant) {
+private void init() {
 this.ptrs = new ImmutableBytesWritable[children.size()];
-if(isConstant) {
+if (isConstant()) {
 ImmutableBytesWritable ptr = new ImmutableBytesWritable();
 this.evaluate(null, ptr);
 literalExprPtr = ptr;
 }
 }
 
+private boolean isConstant() {
+   return 
extraFields.get(ExtraFieldPosition.LITERAL_CONSTANT.getBitPosition());
+}
+
+private boolean isStripTrailingSepByte() {
+   return 
extraFields.get(ExtraFieldPosition.STRIP_TRAILING_SEPARATOR_BYTE.getBitPosition());
+}
+
 @Override
 public PDataType getDataType() {
 return PVarbinary.INSTANCE;
@@ -200,7 +240,8 @@ public class RowValueConstructorExpression extends 
BaseCompoundExpression {
 for (int k = expressionCount -1 ; 
 k >=0 &&  getChildren().get(k).getDataType() != 
null 
   && 
!getChildren().get(k).getDataType().isFixedWidth()
-  && outputBytes[outputSize-1] == 
SchemaUtil.getSeparatorByte(true, false, getChildren().get(k)) ; k--) {
+  && outputBytes[outputSize-1] == 
SchemaUtil.getSeparatorByte(true, false, getChildren().get(k))
+  && isStripTrailingSepByte() ; k--) {
 outputSize--;
 }
 ptr.set(outputBytes, 0, outputSize);



[phoenix] branch 5.x-cdh6 updated (03465dd -> 16248b3)

2019-04-07 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a change to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


 discard 03465dd  PHOENIX-4956 Distribution of Apache Phoenix 5.1 for CDH 6.1
 new 16248b3  PHOENIX-4956 Distribution of Apache Phoenix 5.1 for CDH 6.1

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (03465dd)
\
 N -- N -- N   refs/heads/5.x-cdh6 (16248b3)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 pom.xml | 4 
 1 file changed, 4 deletions(-)



[phoenix] branch 5.x-cdh6 created (now 03465dd)

2019-02-25 Thread pboado
This is an automated email from the ASF dual-hosted git repository.

pboado pushed a change to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


  at 03465dd  PHOENIX-4956 Distribution of Apache Phoenix 5.1 for CDH 6.1

This branch includes the following new commits:

 new 03465dd  PHOENIX-4956 Distribution of Apache Phoenix 5.1 for CDH 6.1

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.




[8/9] phoenix git commit: PHOENIX-4977 Make KillServerOnFailurePolicy a configurable option in PhoenixIndexFailurePolicy

2018-12-03 Thread pboado
PHOENIX-4977 Make KillServerOnFailurePolicy a configurable option in 
PhoenixIndexFailurePolicy


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e1e265a8
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e1e265a8
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e1e265a8

Branch: refs/heads/4.14-cdh5.13
Commit: e1e265a81ca239bbaa604b3207da2499c7faf5eb
Parents: 578b840
Author: Vincent Poon 
Authored: Wed Oct 24 00:03:22 2018 +0100
Committer: pboado 
Committed: Mon Dec 3 20:32:51 2018 +

--
 .../phoenix/hbase/index/write/DelegateIndexFailurePolicy.java  | 5 -
 .../org/apache/phoenix/index/PhoenixIndexFailurePolicy.java| 6 ++
 .../src/main/java/org/apache/phoenix/query/QueryServices.java  | 1 +
 3 files changed, 11 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e1e265a8/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/DelegateIndexFailurePolicy.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/DelegateIndexFailurePolicy.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/DelegateIndexFailurePolicy.java
index a7fb7ec..caf2b38 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/DelegateIndexFailurePolicy.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/DelegateIndexFailurePolicy.java
@@ -28,7 +28,7 @@ import com.google.common.collect.Multimap;
 
 public class DelegateIndexFailurePolicy implements IndexFailurePolicy {
 
-private final IndexFailurePolicy delegate;
+private IndexFailurePolicy delegate;
 
 public DelegateIndexFailurePolicy(IndexFailurePolicy delegate) {
 this.delegate = delegate;
@@ -55,4 +55,7 @@ public class DelegateIndexFailurePolicy implements 
IndexFailurePolicy {
 delegate.stop(arg0);
 }
 
+public void setDelegate(IndexFailurePolicy delegate) {
+this.delegate = delegate;
+}
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e1e265a8/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
index e7f5ac2..eabf481 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
@@ -60,6 +60,7 @@ import 
org.apache.phoenix.hbase.index.table.HTableInterfaceReference;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.hbase.index.write.DelegateIndexFailurePolicy;
 import org.apache.phoenix.hbase.index.write.KillServerOnFailurePolicy;
+import org.apache.phoenix.hbase.index.write.LeaveIndexActiveFailurePolicy;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.QueryServices;
@@ -134,6 +135,11 @@ public class PhoenixIndexFailurePolicy extends 
DelegateIndexFailurePolicy {
 } else {
throwIndexWriteFailure = Boolean.parseBoolean(value);
 }
+
+boolean killServer = 
env.getConfiguration().getBoolean(QueryServices.INDEX_FAILURE_KILL_SERVER, 
true);
+if (!killServer) {
+setDelegate(new LeaveIndexActiveFailurePolicy());
+} // else, default in constructor is KillServerOnFailurePolicy
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e1e265a8/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index 559d165..48b7b7f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -152,6 +152,7 @@ public interface QueryServices extends SQLCloseable {
 public static final String INDEX_FAILURE_BLOCK_WRITE = 
"phoenix.index.failure.block.write";
 public static final String INDEX_FAILURE_DISABLE_INDEX = 
"phoenix.index.failure.disable.index";
 public static final String INDEX_FAILURE_THROW_EXCEPTION_ATTRIB = 
"phoenix.index.failure.throw.exception";
+public static final String INDEX_FAILURE_KILL_SERVER = 
"phoenix.index.failure.unhandled.killserver";
 
 // Index will be partiall

[2/9] phoenix git commit: PHOENIX-4519 - Index rebuild MR jobs not created for "alter index rebuild async" rebuilds

2018-12-03 Thread pboado
PHOENIX-4519 - Index rebuild MR jobs not created for "alter index rebuild 
async" rebuilds


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/352c4205
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/352c4205
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/352c4205

Branch: refs/heads/4.14-cdh5.13
Commit: 352c42054f5a95d6162854feffeaec3c23b4a074
Parents: 13901c3
Author: Geoffrey 
Authored: Fri Sep 7 00:18:09 2018 +0100
Committer: pboado 
Committed: Mon Dec 3 20:32:35 2018 +

--
 .../end2end/index/PhoenixMRJobSubmitterIT.java  | 113 +++
 .../index/automation/PhoenixMRJobSubmitter.java |  16 ++-
 .../apache/phoenix/schema/MetaDataClient.java   |   2 +-
 3 files changed, 126 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/352c4205/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PhoenixMRJobSubmitterIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PhoenixMRJobSubmitterIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PhoenixMRJobSubmitterIT.java
new file mode 100644
index 000..7cc3aa0
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PhoenixMRJobSubmitterIT.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end.index;
+
+import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
+import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
+import org.apache.phoenix.mapreduce.index.automation.PhoenixAsyncIndex;
+import org.apache.phoenix.mapreduce.index.automation.PhoenixMRJobSubmitter;
+import org.apache.phoenix.schema.MetaDataClient;
+import org.apache.phoenix.schema.PIndexState;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.RunUntilFailure;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.util.Map;
+
+@Category(NeedsOwnMiniClusterTest.class)
+public class PhoenixMRJobSubmitterIT extends BaseUniqueNamesOwnClusterIT {
+
+  private static String REQUEST_INDEX_REBUILD_SQL = "ALTER INDEX %s ON %s 
REBUILD ASYNC";
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+setUpTestDriver(ReadOnlyProps.EMPTY_PROPS);
+
+  }
+
+  @Test
+  public void testGetCandidateJobs() throws Exception {
+String tableName = "TBL_" + generateUniqueName();
+String asyncIndexName = "IDX_" + generateUniqueName();
+String needsRebuildIndexName = "IDX_" + generateUniqueName();
+String tableDDL = "CREATE TABLE " + tableName + TestUtil.TEST_TABLE_SCHEMA;
+String asyncIndexDDL = "CREATE INDEX " + asyncIndexName + " ON " + 
tableName + " (a.varchar_col1) ASYNC";
+String needsRebuildIndexDDL = "CREATE INDEX " + needsRebuildIndexName + " 
ON " + tableName + " (a.char_col1)";
+long rebuildTimestamp = 100L;
+
+createTestTable(getUrl(), tableDDL);
+
+createTestTable(getUrl(), needsRebuildIndexDDL);
+Connection conn = null;
+PreparedStatement stmt = null;
+try {
+  conn = DriverManager.getConnection(getUrl());
+  TestUtil.assertIndexState(conn, needsRebuildIndexName, 
PIndexState.ACTIVE, 0L);
+
+  //first make sure that we don't return an active index
+  PhoenixMRJobSubmitter submitter = new 
PhoenixMRJobSubmitter(getUtility().getConfiguration());
+  Map candidateMap = 
submitter.getCandidateJobs(conn);
+  Assert.assertNotNull(candidateMap);
+  Assert.assertEquals(0, candidateMap.size());
+
+  //create an index with ASYNC that will need building via

[1/9] phoenix git commit: PHOENIX-4935 - IndexTool should use empty catalog instead of null

2018-12-03 Thread pboado
Repository: phoenix
Updated Branches:
  refs/heads/4.14-cdh5.13 8559765e2 -> fc9cdb6cf


PHOENIX-4935 - IndexTool should use empty catalog instead of null


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/13901c39
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/13901c39
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/13901c39

Branch: refs/heads/4.14-cdh5.13
Commit: 13901c395039ee252a36f5609f07c6c40ccae63d
Parents: 8559765
Author: Geoffrey 
Authored: Mon Oct 1 23:04:02 2018 +0100
Committer: pboado 
Committed: Mon Dec 3 20:32:32 2018 +

--
 .../main/java/org/apache/phoenix/mapreduce/index/IndexTool.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/13901c39/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
index ac0be01..15d41ea 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
@@ -737,7 +737,7 @@ public class IndexTool extends Configured implements Tool {
 
 ResultSet rs = null;
 try {
-rs = dbMetaData.getIndexInfo(null, schemaName, tableName, false, 
false);
+rs = dbMetaData.getIndexInfo("", schemaName, tableName, false, 
false);
 while (rs.next()) {
 final String indexName = rs.getString(6);
 if (indexTable.equalsIgnoreCase(indexName)) {



[7/9] phoenix git commit: PHOENIX-4988 Incorrect index rowkey generated when updating only non-indexed columns after a delete

2018-12-03 Thread pboado
PHOENIX-4988 Incorrect index rowkey generated when updating only non-indexed 
columns after a delete


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/578b8402
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/578b8402
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/578b8402

Branch: refs/heads/4.14-cdh5.13
Commit: 578b8402ea4c5f000ec6c6046b656767e92ce96a
Parents: 0952475
Author: Vincent Poon 
Authored: Mon Oct 22 21:20:10 2018 +0100
Committer: pboado 
Committed: Mon Dec 3 20:32:49 2018 +

--
 .../phoenix/end2end/index/MutableIndexIT.java   | 36 
 .../filter/ApplyAndFilterDeletesFilter.java |  9 +++--
 2 files changed, 43 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/578b8402/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
index e968e99..1b9b8df 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
@@ -62,6 +62,7 @@ import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.IndexScrutiny;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
@@ -910,6 +911,41 @@ public class MutableIndexIT extends 
ParallelStatsDisabledIT {
   }
   }
 
+  /**
+   * PHOENIX-4988
+   * Test updating only a non-indexed column after two successive deletes to 
an indexed row
+   */
+  @Test
+  public void testUpdateNonIndexedColumn() throws Exception {
+  String tableName = "TBL_" + generateUniqueName();
+  String indexName = "IDX_" + generateUniqueName();
+  String fullTableName = 
SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, tableName);
+  String fullIndexName = 
SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, indexName);
+  try (Connection conn = getConnection()) {
+  conn.setAutoCommit(false);
+  conn.createStatement().execute("CREATE TABLE " + fullTableName + " 
(k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) " + tableDDLOptions);
+  conn.createStatement().execute("CREATE " + (localIndex ? " LOCAL " : 
"") + " INDEX " + indexName + " ON " + fullTableName + " (v2)");
+  conn.createStatement().executeUpdate("UPSERT INTO " + fullTableName 
+ "(k,v1,v2) VALUES ('testKey','v1_1','v2_1')");
+  conn.commit();
+  conn.createStatement().executeUpdate("DELETE FROM " + fullTableName);
+  conn.commit();
+  conn.createStatement().executeUpdate("UPSERT INTO " + fullTableName 
+ "(k,v1,v2) VALUES ('testKey','v1_2','v2_2')");
+  conn.commit();
+  conn.createStatement().executeUpdate("DELETE FROM " + fullTableName);
+  conn.commit();
+  conn.createStatement().executeUpdate("UPSERT INTO " + fullTableName 
+ "(k,v1) VALUES ('testKey','v1_3')");
+  conn.commit();
+  IndexScrutiny.scrutinizeIndex(conn, fullTableName, fullIndexName);
+  // PHOENIX-4980
+  // When there is a flush after a data table update of non-indexed 
columns, the
+  // index gets out of sync on the next write
+  getUtility().getHBaseAdmin().flush(TableName.valueOf(fullTableName));
+  conn.createStatement().executeUpdate("UPSERT INTO " + fullTableName 
+ "(k,v1,v2) VALUES ('testKey','v1_4','v2_3')");
+  conn.commit();
+  IndexScrutiny.scrutinizeIndex(conn, fullTableName, fullIndexName);
+  }
+  }
+
 private void upsertRow(String dml, Connection tenantConn, int i) throws 
SQLException {
 PreparedStatement stmt = tenantConn.prepareStatement(dml);
   stmt.setString(1, "00" + String.valueOf(i));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/578b8402/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/cove

[4/9] phoenix git commit: PHOENIX-4964 ORDER BY should use a LOCAL index even if the query is not fully covered.

2018-12-03 Thread pboado
PHOENIX-4964 ORDER BY should use a LOCAL index even if the query is not fully 
covered.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d6b574c0
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d6b574c0
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d6b574c0

Branch: refs/heads/4.14-cdh5.13
Commit: d6b574c0b0e99454a93669e3a1716da10163225c
Parents: 0ecc818
Author: Lars Hofhansl 
Authored: Fri Oct 12 06:46:53 2018 +0100
Committer: pboado 
Committed: Mon Dec 3 20:32:41 2018 +

--
 .../phoenix/end2end/index/LocalIndexIT.java | 59 
 .../apache/phoenix/optimize/QueryOptimizer.java |  9 ++-
 2 files changed, 66 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d6b574c0/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 796d5a2..42cdab3 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -266,6 +266,65 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 }
 indexTable.close();
 }
+
+@Test
+public void testLocalIndexUsedForUncoveredOrderBy() throws Exception {
+String tableName = schemaName + "." + generateUniqueName();
+String indexName = "IDX_" + generateUniqueName();
+TableName physicalTableName = 
SchemaUtil.getPhysicalTableName(tableName.getBytes(), isNamespaceMapped);
+String indexPhysicalTableName = physicalTableName.getNameAsString();
+
+createBaseTable(tableName, null, "('e','i','o')");
+try (Connection conn1 = getConnection()) {
+conn1.createStatement().execute("UPSERT INTO " + tableName + " 
values('b',1,2,4,'z')");
+conn1.createStatement().execute("UPSERT INTO " + tableName + " 
values('f',1,2,3,'a')");
+conn1.createStatement().execute("UPSERT INTO " + tableName + " 
values('j',2,4,2,'a')");
+conn1.createStatement().execute("UPSERT INTO " + tableName + " 
values('q',3,1,1,'c')");
+conn1.commit();
+conn1.createStatement().execute("CREATE LOCAL INDEX " + indexName 
+ " ON " + tableName + "(v1)");
+
+String query = "SELECT * FROM " + tableName +" ORDER BY V1";
+ResultSet rs = conn1.createStatement().executeQuery("EXPLAIN "+ 
query);
+
+HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), 
TestUtil.TEST_PROPERTIES).getAdmin();
+int numRegions = admin.getTableRegions(physicalTableName).size();
+
+assertEquals(
+"CLIENT PARALLEL " + numRegions + "-WAY RANGE SCAN OVER "
++ indexPhysicalTableName + " [1]\n"
++ "SERVER FILTER BY FIRST KEY ONLY\n"
++ "CLIENT MERGE SORT",
+QueryUtil.getExplainPlan(rs));
+
+rs = conn1.createStatement().executeQuery(query);
+String v = "";
+while(rs.next()) {
+String next = rs.getString("v1");
+assertTrue(v.compareTo(next) <= 0);
+v = next;
+}
+rs.close();
+
+query = "SELECT * FROM " + tableName +" ORDER BY V1 DESC NULLS 
LAST";
+rs = conn1.createStatement().executeQuery("EXPLAIN "+ query);
+assertEquals(
+"CLIENT PARALLEL " + numRegions + "-WAY REVERSE RANGE SCAN 
OVER "
++ indexPhysicalTableName + " [1]\n"
++ "SERVER FILTER BY FIRST KEY ONLY\n"
++ "CLIENT MERGE SORT",
+QueryUtil.getExplainPlan(rs));
+
+rs = conn1.createStatement().executeQuery(query);
+v = "zz";
+while(rs.next()) {
+String next = rs.getString("v1");
+assertTrue(v.compareTo(next) >= 0);
+v = next;
+}
+rs.close();
+
+}
+}
 
 @Test
 public void testLocalIndexScanJoinColumnsFromDataTable() throws Exception {

http://git-wip-us.apache.org/repos/asf/ph

[9/9] phoenix git commit: PHOENIX-5056 Ignore failing IT

2018-12-03 Thread pboado
PHOENIX-5056 Ignore failing IT


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/fc9cdb6c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/fc9cdb6c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/fc9cdb6c

Branch: refs/heads/4.14-cdh5.13
Commit: fc9cdb6cf6825c337665d17f50427e9a0c7eb0cc
Parents: e1e265a
Author: Pedro Boado 
Authored: Mon Dec 3 13:49:42 2018 +
Committer: pboado 
Committed: Mon Dec 3 20:32:54 2018 +

--
 .../org/apache/phoenix/end2end/LocalIndexSplitMergeIT.java | 2 ++
 .../java/org/apache/phoenix/end2end/index/MutableIndexIT.java  | 6 +++---
 2 files changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc9cdb6c/phoenix-core/src/it/java/org/apache/phoenix/end2end/LocalIndexSplitMergeIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/LocalIndexSplitMergeIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LocalIndexSplitMergeIT.java
index dc3e5d3..4dfe7b9 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/LocalIndexSplitMergeIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LocalIndexSplitMergeIT.java
@@ -45,11 +45,13 @@ import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import com.google.common.collect.Maps;
 
+@Ignore
 @Category(NeedsOwnMiniClusterTest.class)
 public class LocalIndexSplitMergeIT extends BaseTest {
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/fc9cdb6c/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
index 1b9b8df..b4ddb5c 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
@@ -111,10 +111,10 @@ public class MutableIndexIT extends 
ParallelStatsDisabledIT {
 public static Collection data() {
 return Arrays.asList(new Object[][] { 
 { false, null, false }, { false, null, true },
-{ false, "TEPHRA", false }, { false, "TEPHRA", true },
+{ false, "TEPHRA", false }, { false, "TEPHRA", true } // ,
 //{ false, "OMID", false }, { false, "OMID", true },
-{ true, null, false }, { true, null, true },
-{ true, "TEPHRA", false }, { true, "TEPHRA", true },
+//{ true, null, false }, { true, null, true },
+//{ true, "TEPHRA", false }, { true, "TEPHRA", true },
 //{ true, "OMID", false }, { true, "OMID", true },
 });
 }



[5/9] phoenix git commit: PHOENIX-4967 Reverse scan along LOCAL index does not always return all data.

2018-12-03 Thread pboado
PHOENIX-4967 Reverse scan along LOCAL index does not always return all data.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b3b72676
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b3b72676
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b3b72676

Branch: refs/heads/4.14-cdh5.13
Commit: b3b72676e3bd4a220c63164d7b1b495eec3569b7
Parents: d6b574c
Author: Lars Hofhansl 
Authored: Sat Oct 13 22:34:44 2018 +0100
Committer: pboado 
Committed: Mon Dec 3 20:32:43 2018 +

--
 .../phoenix/end2end/index/LocalIndexIT.java | 55 +++-
 .../phoenix/iterate/BaseResultIterators.java|  3 +-
 2 files changed, 56 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b3b72676/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 42cdab3..cc3a2a5 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -298,11 +298,15 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 
 rs = conn1.createStatement().executeQuery(query);
 String v = "";
+int i = 0;
 while(rs.next()) {
 String next = rs.getString("v1");
 assertTrue(v.compareTo(next) <= 0);
 v = next;
+i++;
 }
+// see PHOENIX-4967
+assertEquals(4, i);
 rs.close();
 
 query = "SELECT * FROM " + tableName +" ORDER BY V1 DESC NULLS 
LAST";
@@ -316,16 +320,65 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 
 rs = conn1.createStatement().executeQuery(query);
 v = "zz";
+i = 0;
 while(rs.next()) {
 String next = rs.getString("v1");
 assertTrue(v.compareTo(next) >= 0);
 v = next;
+i++;
 }
+// see PHOENIX-4967
+assertEquals(4, i);
 rs.close();
 
 }
 }
-
+
+@Test
+public void testLocalIndexReverseScanShouldReturnAllRows() throws 
Exception {
+String tableName = schemaName + "." + generateUniqueName();
+String indexName = "IDX_" + generateUniqueName();
+TableName physicalTableName = 
SchemaUtil.getPhysicalTableName(tableName.getBytes(), isNamespaceMapped);
+String indexPhysicalTableName = physicalTableName.getNameAsString();
+
+createBaseTable(tableName, null, "('e','i','o')");
+try (Connection conn1 = getConnection()) {
+conn1.createStatement().execute("UPSERT INTO " + tableName + " 
values('b',1,2,4,'z')");
+conn1.createStatement().execute("UPSERT INTO " + tableName + " 
values('f',1,2,3,'a')");
+conn1.createStatement().execute("UPSERT INTO " + tableName + " 
values('j',2,4,2,'b')");
+conn1.createStatement().execute("UPSERT INTO " + tableName + " 
values('q',3,1,1,'c')");
+conn1.commit();
+conn1.createStatement().execute("CREATE LOCAL INDEX " + indexName 
+ " ON " + tableName + "(v1)");
+
+String query = "SELECT V1 FROM " + tableName +" ORDER BY V1 DESC 
NULLS LAST";
+ResultSet rs = conn1.createStatement().executeQuery("EXPLAIN "+ 
query);
+
+HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), 
TestUtil.TEST_PROPERTIES).getAdmin();
+int numRegions = admin.getTableRegions(physicalTableName).size();
+
+assertEquals(
+"CLIENT PARALLEL " + numRegions + "-WAY REVERSE RANGE SCAN 
OVER "
++ indexPhysicalTableName + " [1]\n"
++ "SERVER FILTER BY FIRST KEY ONLY\n"
++ "CLIENT MERGE SORT",
+QueryUtil.getExplainPlan(rs));
+
+rs = conn1.createStatement().executeQuery(query);
+String v = "zz";
+int i = 0;
+while(rs.next()) {
+String next = rs.getString("v1");
+assertTrue(v.compareTo(next) >= 0);
+v = next;
+i++;
+}
+   

[3/9] phoenix git commit: PHOENIX-4907 - IndexScrutinyTool should use empty catalog instead of null

2018-12-03 Thread pboado
PHOENIX-4907 - IndexScrutinyTool should use empty catalog instead of null


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/0ecc8187
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/0ecc8187
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/0ecc8187

Branch: refs/heads/4.14-cdh5.13
Commit: 0ecc8187e113d9b5e2aaa731c5591cb423743bcf
Parents: 352c420
Author: Geoffrey 
Authored: Tue Sep 18 00:09:44 2018 +0100
Committer: pboado 
Committed: Mon Dec 3 20:32:38 2018 +

--
 .../java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/0ecc8187/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
index f3ff39e..d9a14bf 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
@@ -499,7 +499,7 @@ public class IndexScrutinyTool extends Configured 
implements Tool {
 
 ResultSet rs = null;
 try {
-rs = dbMetaData.getIndexInfo(null, schemaName, tableName, false, 
false);
+rs = dbMetaData.getIndexInfo("", schemaName, tableName, false, 
false);
 while (rs.next()) {
 final String indexName = rs.getString(6);
 if (indexTable.equalsIgnoreCase(indexName)) {



[6/9] phoenix git commit: PHOENIX-4960 Write to table with global index failed if meta of index changed (split, move, etc)

2018-12-03 Thread pboado
PHOENIX-4960 Write to table with global index failed if meta of index changed 
(split, move, etc)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/0952475b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/0952475b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/0952475b

Branch: refs/heads/4.14-cdh5.13
Commit: 0952475bea7bcaeed58731c0d3b3abba4bcd6746
Parents: b3b7267
Author: Vincent Poon 
Authored: Tue Oct 16 03:11:40 2018 +0100
Committer: pboado 
Committed: Mon Dec 3 20:32:46 2018 +

--
 .../org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/0952475b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index ea72a01..68f8abf 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -3902,6 +3902,10 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 newKVs.remove(disableTimeStampKVIndex);
 newKVs.set(indexStateKVIndex, 
KeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES,
 INDEX_STATE_BYTES, timeStamp, 
Bytes.toBytes(newState.getSerializedValue(;
+} else if (disableTimeStampKVIndex == -1) { // clear 
disableTimestamp if client didn't pass it in
+newKVs.add(KeyValueUtil.newKeyValue(key, 
TABLE_FAMILY_BYTES,
+
PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES, timeStamp, 
PLong.INSTANCE.toBytes(0)));
+disableTimeStampKVIndex = newKVs.size() - 1;
 }
 } else if (newState == PIndexState.DISABLE) {
 //reset the counter for pending disable when 
transitioning from PENDING_DISABLE to DISABLE



[1/9] phoenix git commit: PHOENIX-4935 - IndexTool should use empty catalog instead of null

2018-12-03 Thread pboado
Repository: phoenix
Updated Branches:
  refs/heads/4.14-cdh5.12 2ed114fb5 -> 24b4dbe10


PHOENIX-4935 - IndexTool should use empty catalog instead of null


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/459518fd
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/459518fd
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/459518fd

Branch: refs/heads/4.14-cdh5.12
Commit: 459518fd86cf0b0d963e65ced5e20e2d4b6edf95
Parents: 2ed114f
Author: Geoffrey 
Authored: Mon Oct 1 23:04:02 2018 +0100
Committer: pboado 
Committed: Mon Dec 3 20:26:53 2018 +

--
 .../main/java/org/apache/phoenix/mapreduce/index/IndexTool.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/459518fd/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
index ac0be01..15d41ea 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
@@ -737,7 +737,7 @@ public class IndexTool extends Configured implements Tool {
 
 ResultSet rs = null;
 try {
-rs = dbMetaData.getIndexInfo(null, schemaName, tableName, false, 
false);
+rs = dbMetaData.getIndexInfo("", schemaName, tableName, false, 
false);
 while (rs.next()) {
 final String indexName = rs.getString(6);
 if (indexTable.equalsIgnoreCase(indexName)) {



[4/9] phoenix git commit: PHOENIX-4964 ORDER BY should use a LOCAL index even if the query is not fully covered.

2018-12-03 Thread pboado
PHOENIX-4964 ORDER BY should use a LOCAL index even if the query is not fully 
covered.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e861abb5
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e861abb5
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e861abb5

Branch: refs/heads/4.14-cdh5.12
Commit: e861abb5ffa6c0583f93cbca0cfe7ba353a47a1d
Parents: 4736ec3
Author: Lars Hofhansl 
Authored: Fri Oct 12 06:46:53 2018 +0100
Committer: pboado 
Committed: Mon Dec 3 20:27:09 2018 +

--
 .../phoenix/end2end/index/LocalIndexIT.java | 59 
 .../apache/phoenix/optimize/QueryOptimizer.java |  9 ++-
 2 files changed, 66 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e861abb5/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 796d5a2..42cdab3 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -266,6 +266,65 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 }
 indexTable.close();
 }
+
+@Test
+public void testLocalIndexUsedForUncoveredOrderBy() throws Exception {
+String tableName = schemaName + "." + generateUniqueName();
+String indexName = "IDX_" + generateUniqueName();
+TableName physicalTableName = 
SchemaUtil.getPhysicalTableName(tableName.getBytes(), isNamespaceMapped);
+String indexPhysicalTableName = physicalTableName.getNameAsString();
+
+createBaseTable(tableName, null, "('e','i','o')");
+try (Connection conn1 = getConnection()) {
+conn1.createStatement().execute("UPSERT INTO " + tableName + " 
values('b',1,2,4,'z')");
+conn1.createStatement().execute("UPSERT INTO " + tableName + " 
values('f',1,2,3,'a')");
+conn1.createStatement().execute("UPSERT INTO " + tableName + " 
values('j',2,4,2,'a')");
+conn1.createStatement().execute("UPSERT INTO " + tableName + " 
values('q',3,1,1,'c')");
+conn1.commit();
+conn1.createStatement().execute("CREATE LOCAL INDEX " + indexName 
+ " ON " + tableName + "(v1)");
+
+String query = "SELECT * FROM " + tableName +" ORDER BY V1";
+ResultSet rs = conn1.createStatement().executeQuery("EXPLAIN "+ 
query);
+
+HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), 
TestUtil.TEST_PROPERTIES).getAdmin();
+int numRegions = admin.getTableRegions(physicalTableName).size();
+
+assertEquals(
+"CLIENT PARALLEL " + numRegions + "-WAY RANGE SCAN OVER "
++ indexPhysicalTableName + " [1]\n"
++ "SERVER FILTER BY FIRST KEY ONLY\n"
++ "CLIENT MERGE SORT",
+QueryUtil.getExplainPlan(rs));
+
+rs = conn1.createStatement().executeQuery(query);
+String v = "";
+while(rs.next()) {
+String next = rs.getString("v1");
+assertTrue(v.compareTo(next) <= 0);
+v = next;
+}
+rs.close();
+
+query = "SELECT * FROM " + tableName +" ORDER BY V1 DESC NULLS 
LAST";
+rs = conn1.createStatement().executeQuery("EXPLAIN "+ query);
+assertEquals(
+"CLIENT PARALLEL " + numRegions + "-WAY REVERSE RANGE SCAN 
OVER "
++ indexPhysicalTableName + " [1]\n"
++ "SERVER FILTER BY FIRST KEY ONLY\n"
++ "CLIENT MERGE SORT",
+QueryUtil.getExplainPlan(rs));
+
+rs = conn1.createStatement().executeQuery(query);
+v = "zz";
+while(rs.next()) {
+String next = rs.getString("v1");
+assertTrue(v.compareTo(next) >= 0);
+v = next;
+}
+rs.close();
+
+}
+}
 
 @Test
 public void testLocalIndexScanJoinColumnsFromDataTable() throws Exception {

http://git-wip-us.apache.org/repos/asf/ph

[7/9] phoenix git commit: PHOENIX-4988 Incorrect index rowkey generated when updating only non-indexed columns after a delete

2018-12-03 Thread pboado
PHOENIX-4988 Incorrect index rowkey generated when updating only non-indexed 
columns after a delete


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b17bab61
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b17bab61
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b17bab61

Branch: refs/heads/4.14-cdh5.12
Commit: b17bab6136fa58c0f51fe2c42a4203fefc71e530
Parents: 5325a18
Author: Vincent Poon 
Authored: Mon Oct 22 21:20:10 2018 +0100
Committer: pboado 
Committed: Mon Dec 3 20:27:20 2018 +

--
 .../phoenix/end2end/index/MutableIndexIT.java   | 36 
 .../filter/ApplyAndFilterDeletesFilter.java |  9 +++--
 2 files changed, 43 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b17bab61/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
index e968e99..1b9b8df 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
@@ -62,6 +62,7 @@ import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.IndexScrutiny;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
@@ -910,6 +911,41 @@ public class MutableIndexIT extends 
ParallelStatsDisabledIT {
   }
   }
 
+  /**
+   * PHOENIX-4988
+   * Test updating only a non-indexed column after two successive deletes to 
an indexed row
+   */
+  @Test
+  public void testUpdateNonIndexedColumn() throws Exception {
+  String tableName = "TBL_" + generateUniqueName();
+  String indexName = "IDX_" + generateUniqueName();
+  String fullTableName = 
SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, tableName);
+  String fullIndexName = 
SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, indexName);
+  try (Connection conn = getConnection()) {
+  conn.setAutoCommit(false);
+  conn.createStatement().execute("CREATE TABLE " + fullTableName + " 
(k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) " + tableDDLOptions);
+  conn.createStatement().execute("CREATE " + (localIndex ? " LOCAL " : 
"") + " INDEX " + indexName + " ON " + fullTableName + " (v2)");
+  conn.createStatement().executeUpdate("UPSERT INTO " + fullTableName 
+ "(k,v1,v2) VALUES ('testKey','v1_1','v2_1')");
+  conn.commit();
+  conn.createStatement().executeUpdate("DELETE FROM " + fullTableName);
+  conn.commit();
+  conn.createStatement().executeUpdate("UPSERT INTO " + fullTableName 
+ "(k,v1,v2) VALUES ('testKey','v1_2','v2_2')");
+  conn.commit();
+  conn.createStatement().executeUpdate("DELETE FROM " + fullTableName);
+  conn.commit();
+  conn.createStatement().executeUpdate("UPSERT INTO " + fullTableName 
+ "(k,v1) VALUES ('testKey','v1_3')");
+  conn.commit();
+  IndexScrutiny.scrutinizeIndex(conn, fullTableName, fullIndexName);
+  // PHOENIX-4980
+  // When there is a flush after a data table update of non-indexed 
columns, the
+  // index gets out of sync on the next write
+  getUtility().getHBaseAdmin().flush(TableName.valueOf(fullTableName));
+  conn.createStatement().executeUpdate("UPSERT INTO " + fullTableName 
+ "(k,v1,v2) VALUES ('testKey','v1_4','v2_3')");
+  conn.commit();
+  IndexScrutiny.scrutinizeIndex(conn, fullTableName, fullIndexName);
+  }
+  }
+
 private void upsertRow(String dml, Connection tenantConn, int i) throws 
SQLException {
 PreparedStatement stmt = tenantConn.prepareStatement(dml);
   stmt.setString(1, "00" + String.valueOf(i));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b17bab61/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/filter/ApplyAndFilterDeletesFilter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/cove

[5/9] phoenix git commit: PHOENIX-4967 Reverse scan along LOCAL index does not always return all data.

2018-12-03 Thread pboado
PHOENIX-4967 Reverse scan along LOCAL index does not always return all data.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/623566bc
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/623566bc
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/623566bc

Branch: refs/heads/4.14-cdh5.12
Commit: 623566bcc16bad62da631e341cef75e74bc09357
Parents: e861abb
Author: Lars Hofhansl 
Authored: Sat Oct 13 22:34:44 2018 +0100
Committer: pboado 
Committed: Mon Dec 3 20:27:12 2018 +

--
 .../phoenix/end2end/index/LocalIndexIT.java | 55 +++-
 .../phoenix/iterate/BaseResultIterators.java|  3 +-
 2 files changed, 56 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/623566bc/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 42cdab3..cc3a2a5 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -298,11 +298,15 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 
 rs = conn1.createStatement().executeQuery(query);
 String v = "";
+int i = 0;
 while(rs.next()) {
 String next = rs.getString("v1");
 assertTrue(v.compareTo(next) <= 0);
 v = next;
+i++;
 }
+// see PHOENIX-4967
+assertEquals(4, i);
 rs.close();
 
 query = "SELECT * FROM " + tableName +" ORDER BY V1 DESC NULLS 
LAST";
@@ -316,16 +320,65 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 
 rs = conn1.createStatement().executeQuery(query);
 v = "zz";
+i = 0;
 while(rs.next()) {
 String next = rs.getString("v1");
 assertTrue(v.compareTo(next) >= 0);
 v = next;
+i++;
 }
+// see PHOENIX-4967
+assertEquals(4, i);
 rs.close();
 
 }
 }
-
+
+@Test
+public void testLocalIndexReverseScanShouldReturnAllRows() throws 
Exception {
+String tableName = schemaName + "." + generateUniqueName();
+String indexName = "IDX_" + generateUniqueName();
+TableName physicalTableName = 
SchemaUtil.getPhysicalTableName(tableName.getBytes(), isNamespaceMapped);
+String indexPhysicalTableName = physicalTableName.getNameAsString();
+
+createBaseTable(tableName, null, "('e','i','o')");
+try (Connection conn1 = getConnection()) {
+conn1.createStatement().execute("UPSERT INTO " + tableName + " 
values('b',1,2,4,'z')");
+conn1.createStatement().execute("UPSERT INTO " + tableName + " 
values('f',1,2,3,'a')");
+conn1.createStatement().execute("UPSERT INTO " + tableName + " 
values('j',2,4,2,'b')");
+conn1.createStatement().execute("UPSERT INTO " + tableName + " 
values('q',3,1,1,'c')");
+conn1.commit();
+conn1.createStatement().execute("CREATE LOCAL INDEX " + indexName 
+ " ON " + tableName + "(v1)");
+
+String query = "SELECT V1 FROM " + tableName +" ORDER BY V1 DESC 
NULLS LAST";
+ResultSet rs = conn1.createStatement().executeQuery("EXPLAIN "+ 
query);
+
+HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), 
TestUtil.TEST_PROPERTIES).getAdmin();
+int numRegions = admin.getTableRegions(physicalTableName).size();
+
+assertEquals(
+"CLIENT PARALLEL " + numRegions + "-WAY REVERSE RANGE SCAN 
OVER "
++ indexPhysicalTableName + " [1]\n"
++ "SERVER FILTER BY FIRST KEY ONLY\n"
++ "CLIENT MERGE SORT",
+QueryUtil.getExplainPlan(rs));
+
+rs = conn1.createStatement().executeQuery(query);
+String v = "zz";
+int i = 0;
+while(rs.next()) {
+String next = rs.getString("v1");
+assertTrue(v.compareTo(next) >= 0);
+v = next;
+i++;
+}
+   

[9/9] phoenix git commit: PHOENIX-5056 Ignore failing IT

2018-12-03 Thread pboado
PHOENIX-5056 Ignore failing IT


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/24b4dbe1
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/24b4dbe1
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/24b4dbe1

Branch: refs/heads/4.14-cdh5.12
Commit: 24b4dbe10aef51b244b80e775b3b0415617a4bf4
Parents: 94203e1
Author: Pedro Boado 
Authored: Mon Dec 3 13:49:42 2018 +
Committer: pboado 
Committed: Mon Dec 3 20:27:25 2018 +

--
 .../org/apache/phoenix/end2end/LocalIndexSplitMergeIT.java | 2 ++
 .../java/org/apache/phoenix/end2end/index/MutableIndexIT.java  | 6 +++---
 2 files changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/24b4dbe1/phoenix-core/src/it/java/org/apache/phoenix/end2end/LocalIndexSplitMergeIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/LocalIndexSplitMergeIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LocalIndexSplitMergeIT.java
index dc3e5d3..4dfe7b9 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/LocalIndexSplitMergeIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/LocalIndexSplitMergeIT.java
@@ -45,11 +45,13 @@ import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import com.google.common.collect.Maps;
 
+@Ignore
 @Category(NeedsOwnMiniClusterTest.class)
 public class LocalIndexSplitMergeIT extends BaseTest {
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/24b4dbe1/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
index 1b9b8df..b4ddb5c 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
@@ -111,10 +111,10 @@ public class MutableIndexIT extends 
ParallelStatsDisabledIT {
 public static Collection data() {
 return Arrays.asList(new Object[][] { 
 { false, null, false }, { false, null, true },
-{ false, "TEPHRA", false }, { false, "TEPHRA", true },
+{ false, "TEPHRA", false }, { false, "TEPHRA", true } // ,
 //{ false, "OMID", false }, { false, "OMID", true },
-{ true, null, false }, { true, null, true },
-{ true, "TEPHRA", false }, { true, "TEPHRA", true },
+//{ true, null, false }, { true, null, true },
+//{ true, "TEPHRA", false }, { true, "TEPHRA", true },
 //{ true, "OMID", false }, { true, "OMID", true },
 });
 }



[3/9] phoenix git commit: PHOENIX-4907 - IndexScrutinyTool should use empty catalog instead of null

2018-12-03 Thread pboado
PHOENIX-4907 - IndexScrutinyTool should use empty catalog instead of null


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4736ec37
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4736ec37
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4736ec37

Branch: refs/heads/4.14-cdh5.12
Commit: 4736ec37104abc07093ddc7e44ee111f99f65798
Parents: 5b65a5f
Author: Geoffrey 
Authored: Tue Sep 18 00:09:44 2018 +0100
Committer: pboado 
Committed: Mon Dec 3 20:27:04 2018 +

--
 .../java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/4736ec37/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
index f3ff39e..d9a14bf 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
@@ -499,7 +499,7 @@ public class IndexScrutinyTool extends Configured 
implements Tool {
 
 ResultSet rs = null;
 try {
-rs = dbMetaData.getIndexInfo(null, schemaName, tableName, false, 
false);
+rs = dbMetaData.getIndexInfo("", schemaName, tableName, false, 
false);
 while (rs.next()) {
 final String indexName = rs.getString(6);
 if (indexTable.equalsIgnoreCase(indexName)) {



  1   2   3   4   5   6   7   8   9   10   >