Build failed in Jenkins: Phoenix-omid2 #131

2018-10-17 Thread Apache Jenkins Server
See 

--
[...truncated 331.86 KB...]
[INFO] --- maven-failsafe-plugin:2.20:verify (ParallelStatsEnabledTest) @ 
phoenix-pig ---
[INFO] 
[INFO] --- maven-failsafe-plugin:2.20:verify (ParallelStatsDisabledTest) @ 
phoenix-pig ---
[INFO] 
[INFO] --- maven-failsafe-plugin:2.20:verify (HBaseManagedTimeTests) @ 
phoenix-pig ---
[INFO] 
[INFO] --- maven-failsafe-plugin:2.20:verify (NeedTheirOwnClusterTests) @ 
phoenix-pig ---
[INFO] 
[INFO] --- maven-failsafe-plugin:2.20:verify (SplitSystemCatalogTests) @ 
phoenix-pig ---
[INFO] 
[INFO] --- maven-install-plugin:2.5.2:install (default-install) @ phoenix-pig 
---
[INFO] Installing 

 to 
/home/jenkins/.m2/repository/org/apache/phoenix/phoenix-pig/4.14.0-HBase-1.3/phoenix-pig-4.14.0-HBase-1.3.jar
[INFO] Installing 
 to 
/home/jenkins/.m2/repository/org/apache/phoenix/phoenix-pig/4.14.0-HBase-1.3/phoenix-pig-4.14.0-HBase-1.3.pom
[INFO] Installing 

 to 
/home/jenkins/.m2/repository/org/apache/phoenix/phoenix-pig/4.14.0-HBase-1.3/phoenix-pig-4.14.0-HBase-1.3-sources.jar
[INFO] Installing 

 to 
/home/jenkins/.m2/repository/org/apache/phoenix/phoenix-pig/4.14.0-HBase-1.3/phoenix-pig-4.14.0-HBase-1.3-tests.jar
[INFO] 
[INFO] ---< org.apache.phoenix:phoenix-queryserver-client >
[INFO] Building Phoenix Query Server Client 4.14.0-HBase-1.3 [6/15]
[INFO] [ jar ]-
[INFO] 
[INFO] --- maven-clean-plugin:2.5:clean (default-clean) @ 
phoenix-queryserver-client ---
[INFO] Deleting 

[INFO] 
[INFO] --- maven-checkstyle-plugin:2.13:check (validate) @ 
phoenix-queryserver-client ---
[INFO] 
[INFO] --- maven-remote-resources-plugin:1.5:process (default) @ 
phoenix-queryserver-client ---
[INFO] 
[INFO] --- maven-resources-plugin:2.6:resources (default-resources) @ 
phoenix-queryserver-client ---
[INFO] Using 'UTF-8' encoding to copy filtered resources.
[INFO] Copying 2 resources
[INFO] Copying 3 resources
[INFO] 
[INFO] --- maven-compiler-plugin:3.0:compile (default-compile) @ 
phoenix-queryserver-client ---
[INFO] Changes detected - recompiling the module!
[INFO] Compiling 3 source files to 

[INFO] 
[INFO] --- maven-resources-plugin:2.6:testResources (default-testResources) @ 
phoenix-queryserver-client ---
[INFO] Using 'UTF-8' encoding to copy filtered resources.
[INFO] skip non existing resourceDirectory 

[INFO] Copying 3 resources
[INFO] 
[INFO] --- maven-compiler-plugin:3.0:testCompile (default-testCompile) @ 
phoenix-queryserver-client ---
[INFO] No sources to compile
[INFO] 
[INFO] --- maven-surefire-plugin:2.20:test (default-test) @ 
phoenix-queryserver-client ---
[INFO] 
[INFO] --- maven-source-plugin:2.2.1:jar-no-fork (attach-sources) @ 
phoenix-queryserver-client ---
[INFO] Building jar: 

[INFO] 
[INFO] --- maven-jar-plugin:2.4:test-jar (default) @ phoenix-queryserver-client 
---
[INFO] Building jar: 

[INFO] 
[INFO] --- maven-jar-plugin:2.4:jar (default-jar) @ phoenix-queryserver-client 
---
[INFO] Building jar: 

[INFO] 
[INFO] --- maven-site-plugin:3.2:attach-descriptor (attach-descriptor) @ 
phoenix-queryserver-client ---
[INFO] 
[INFO] --- maven-shade-plugin:2.4.3:shade (thin-client) @ 
phoenix-queryserver-client ---
[INFO] Including org.apache.calcite.avatica:avatica-core:jar:1.12.0 in the 
shaded jar.
[INFO] Including org.apache.calcite.avatica:avatica-metrics:jar:1.12.0 in the 
shaded jar.
[INFO] Including com.fasterxml.jackson.core:jackson-core:jar:2.9.4 in the 
shaded jar.
[INFO] Including com.fasterxml.jackson.core:jackson-annotations:jar:2.9.4 in 
the shaded jar.
[INFO] Including com.fasterxml.jackson.core:jackson-databind:jar:2.9.4 in the 
shaded jar.
[INFO] Including com.google.protobuf:protobuf-java:jar:3.1.0 in the shaded jar.
[INFO] Including 

[48/51] [abbrv] phoenix git commit: PHOENIX-4859 Using local index in where statement for join (only rhs table) query fails(Rajeshbabu)

2018-10-17 Thread pboado
PHOENIX-4859 Using local index in where statement for join (only rhs table) 
query fails(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/87026452
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/87026452
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/87026452

Branch: refs/heads/4.x-cdh5.15
Commit: 87026452ce92866583bd4fd6999d2c8e37ebd39f
Parents: 00ba63b
Author: Rajeshbabu Chintaguntla 
Authored: Tue Oct 9 11:30:32 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:50:43 2018 +0100

--
 .../phoenix/end2end/index/LocalIndexIT.java | 29 
 .../phoenix/compile/ExpressionCompiler.java |  2 +-
 .../apache/phoenix/compile/JoinCompiler.java|  2 +-
 .../phoenix/compile/ProjectionCompiler.java |  4 +--
 .../compile/TupleProjectionCompiler.java|  2 +-
 .../phoenix/schema/LocalIndexDataColumnRef.java | 18 ++--
 6 files changed, 44 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/87026452/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index ed1cf45..e260969 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -684,6 +684,35 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 conn1.close();
 }
 
+@Test
+public void testLocalIndexSelfJoin() throws Exception {
+  String tableName = generateUniqueName();
+  String indexName = "IDX_" + generateUniqueName();
+  Connection conn1 = DriverManager.getConnection(getUrl());
+  if (isNamespaceMapped) {
+  conn1.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + 
schemaName);
+  }
+String ddl =
+"CREATE TABLE "
++ tableName
++ " (customer_id integer primary key, postal_code 
varchar, country_code varchar)";
+conn1.createStatement().execute(ddl);
+conn1.createStatement().execute("UPSERT INTO " + tableName + " 
values(1,'560103','IN')");
+conn1.commit();
+conn1.createStatement().execute(
+"CREATE LOCAL INDEX " + indexName + " ON " + tableName + 
"(postal_code)");
+ResultSet rs =
+conn1.createStatement()
+.executeQuery(
+"SELECT * from "
++ tableName
++ " c1, "
++ tableName
++ " c2 where c1.customer_id=c2.customer_id 
and c2.postal_code='560103'");
+assertTrue(rs.next());
+conn1.close();
+}
+
 private void copyLocalIndexHFiles(Configuration conf, HRegionInfo 
fromRegion, HRegionInfo toRegion, boolean move)
 throws IOException {
 Path root = FSUtils.getRootDir(conf);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/87026452/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
index 9daa744..077e1af 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
@@ -376,7 +376,7 @@ public class ExpressionCompiler extends 
UnsupportedAllParseNodeVisitorhttp://git-wip-us.apache.org/repos/asf/phoenix/blob/87026452/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
index 36bfc5f..880fa72 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
@@ -869,7 +869,7 @@ public class JoinCompiler {
 if (columnRef.getTableRef().equals(tableRef)
 && (!retainPKColumns || 
!SchemaUtil.isPKColumn(columnRef.getColumn( {
 if (columnRef instanceof LocalIndexColumnRef) {
-sourceColumns.add(new 

[40/51] [abbrv] phoenix git commit: PHOENIX-4966 Implement unhandledFilters in PhoenixRelation so that spark only evaluates filters when required

2018-10-17 Thread pboado
PHOENIX-4966 Implement unhandledFilters in PhoenixRelation so that spark only 
evaluates filters when required


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a694638f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a694638f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a694638f

Branch: refs/heads/4.x-cdh5.15
Commit: a694638fa8b7a4c7bd1a0b3b2b8874830f7760e8
Parents: fb1e8f7
Author: Thomas D'Silva 
Authored: Thu Oct 11 23:46:48 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:50:43 2018 +0100

--
 .../org/apache/phoenix/spark/PhoenixSparkIT.scala   | 14 +++---
 .../org/apache/phoenix/spark/PhoenixRelation.scala  | 16 
 2 files changed, 19 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a694638f/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
--
diff --git 
a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala 
b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
index b8e44fe..4e11acc 100644
--- a/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
+++ b/phoenix-spark/src/it/scala/org/apache/phoenix/spark/PhoenixSparkIT.scala
@@ -285,13 +285,13 @@ class PhoenixSparkIT extends AbstractPhoenixSparkIT {
 // Make sure we got the right value back
 assert(res.first().getLong(0) == 1L)
 
-/*
-  NOTE: There doesn't appear to be any way of verifying from the Spark 
query planner that
-  filtering is being pushed down and done server-side. However, since 
PhoenixRelation
-  implements PrunedFilteredScan, debugging has shown that both the SELECT 
columns and WHERE
-  predicates are being passed along to us, which we then forward it to 
Phoenix.
-  TODO: investigate further to find a way to verify server-side pushdown
- */
+val plan = res.queryExecution.sparkPlan
+// filters should be pushed into phoenix relation
+assert(plan.toString.contains("PushedFilters: [IsNotNull(COL1), 
IsNotNull(ID), " +
+  "EqualTo(COL1,test_row_1), EqualTo(ID,1)]"))
+// spark should run the filters on the rows returned by Phoenix
+assert(!plan.toString.contains("Filter (((isnotnull(COL1#8) && 
isnotnull(ID#7L)) " +
+  "&& (COL1#8 = test_row_1)) && (ID#7L = 1))"))
   }
 
   test("Can persist a dataframe using 'DataFrame.saveToPhoenix'") {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a694638f/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRelation.scala
--
diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRelation.scala 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRelation.scala
index d2eac8c..38bf29a 100644
--- 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRelation.scala
+++ 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRelation.scala
@@ -36,11 +36,12 @@ case class PhoenixRelation(tableName: String, zkUrl: 
String, dateAsTimestamp: Bo
 but this prevents having to load the whole table into Spark first.
   */
   override def buildScan(requiredColumns: Array[String], filters: 
Array[Filter]): RDD[Row] = {
+val(pushedFilters, unhandledFilters) = buildFilter(filters)
 new PhoenixRDD(
   sqlContext.sparkContext,
   tableName,
   requiredColumns,
-  Some(buildFilter(filters)),
+  Some(pushedFilters),
   Some(zkUrl),
   new Configuration(),
   dateAsTimestamp
@@ -62,12 +63,13 @@ case class PhoenixRelation(tableName: String, zkUrl: 
String, dateAsTimestamp: Bo
 
   // Attempt to create Phoenix-accepted WHERE clauses from Spark filters,
   // mostly inspired from Spark SQL JDBCRDD and the couchbase-spark-connector
-  private def buildFilter(filters: Array[Filter]): String = {
+  private def buildFilter(filters: Array[Filter]): (String, Array[Filter]) = {
 if (filters.isEmpty) {
-  return ""
+  return ("" , Array[Filter]())
 }
 
 val filter = new StringBuilder("")
+val unsupportedFilters = Array[Filter]();
 var i = 0
 
 filters.foreach(f => {
@@ -92,12 +94,18 @@ case class PhoenixRelation(tableName: String, zkUrl: 
String, dateAsTimestamp: Bo
 case StringStartsWith(attr, value) => filter.append(s" 
${escapeKey(attr)} LIKE ${compileValue(value + "%")}")
 case StringEndsWith(attr, value) => filter.append(s" 
${escapeKey(attr)} LIKE ${compileValue("%" + value)}")
 case StringContains(attr, value) => filter.append(s" 
${escapeKey(attr)} LIKE ${compileValue("%" + value + "%")}")
+case _ => unsupportedFilters :+ f
   }
 
   i = i + 1
 })
 
-

[33/51] [abbrv] phoenix git commit: PHOENIX-4688 Support SPNEGO for python driver via requests-kerberos

2018-10-17 Thread pboado
http://git-wip-us.apache.org/repos/asf/phoenix/blob/3cac9217/python/phoenixdb/connection.py
--
diff --git a/python/phoenixdb/connection.py b/python/phoenixdb/connection.py
deleted file mode 100644
index 593a242..000
--- a/python/phoenixdb/connection.py
+++ /dev/null
@@ -1,187 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-import uuid
-import weakref
-from phoenixdb import errors
-from phoenixdb.avatica.client import OPEN_CONNECTION_PROPERTIES
-from phoenixdb.cursor import Cursor
-from phoenixdb.errors import ProgrammingError
-
-__all__ = ['Connection']
-
-logger = logging.getLogger(__name__)
-
-
-class Connection(object):
-"""Database connection.
-
-You should not construct this object manually, use 
:func:`~phoenixdb.connect` instead.
-"""
-
-cursor_factory = None
-"""
-The default cursor factory used by :meth:`cursor` if the parameter is not 
specified.
-"""
-
-def __init__(self, client, cursor_factory=None, **kwargs):
-self._client = client
-self._closed = False
-if cursor_factory is not None:
-self.cursor_factory = cursor_factory
-else:
-self.cursor_factory = Cursor
-self._cursors = []
-# Extract properties to pass to OpenConnectionRequest
-self._connection_args = {}
-# The rest of the kwargs
-self._filtered_args = {}
-for k in kwargs:
-if k in OPEN_CONNECTION_PROPERTIES:
-self._connection_args[k] = kwargs[k]
-else:
-self._filtered_args[k] = kwargs[k]
-self.open()
-self.set_session(**self._filtered_args)
-
-def __del__(self):
-if not self._closed:
-self.close()
-
-def __enter__(self):
-return self
-
-def __exit__(self, exc_type, exc_value, traceback):
-if not self._closed:
-self.close()
-
-def open(self):
-"""Opens the connection."""
-self._id = str(uuid.uuid4())
-self._client.open_connection(self._id, info=self._connection_args)
-
-def close(self):
-"""Closes the connection.
-No further operations are allowed, either on the connection or any
-of its cursors, once the connection is closed.
-
-If the connection is used in a ``with`` statement, this method will
-be automatically called at the end of the ``with`` block.
-"""
-if self._closed:
-raise ProgrammingError('the connection is already closed')
-for cursor_ref in self._cursors:
-cursor = cursor_ref()
-if cursor is not None and not cursor._closed:
-cursor.close()
-self._client.close_connection(self._id)
-self._client.close()
-self._closed = True
-
-@property
-def closed(self):
-"""Read-only attribute specifying if the connection is closed or 
not."""
-return self._closed
-
-def commit(self):
-"""Commits pending database changes.
-
-Currently, this does nothing, because the RPC does not support
-transactions. Only defined for DB API 2.0 compatibility.
-You need to use :attr:`autocommit` mode.
-"""
-# TODO can support be added for this?
-if self._closed:
-raise ProgrammingError('the connection is already closed')
-
-def cursor(self, cursor_factory=None):
-"""Creates a new cursor.
-
-:param cursor_factory:
-This argument can be used to create non-standard cursors.
-The class returned must be a subclass of
-:class:`~phoenixdb.cursor.Cursor` (for example 
:class:`~phoenixdb.cursor.DictCursor`).
-A default factory for the connection can also be specified using 
the
-:attr:`cursor_factory` attribute.
-
-:returns:
-A :class:`~phoenixdb.cursor.Cursor` object.
-"""
-if self._closed:
-raise ProgrammingError('the connection is already closed')
-cursor = (cursor_factory or self.cursor_factory)(self)
-self._cursors.append(weakref.ref(cursor, self._cursors.remove))

[41/51] [abbrv] phoenix git commit: PHOENIX-4964 ORDER BY should use a LOCAL index even if the query is not fully covered.

2018-10-17 Thread pboado
PHOENIX-4964 ORDER BY should use a LOCAL index even if the query is not fully 
covered.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e8fafd33
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e8fafd33
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e8fafd33

Branch: refs/heads/4.x-cdh5.15
Commit: e8fafd3319f2f6e8c6f5326938ede7150e2c040c
Parents: eb13ffd
Author: Lars Hofhansl 
Authored: Fri Oct 12 06:50:31 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:50:43 2018 +0100

--
 .../phoenix/end2end/index/LocalIndexIT.java | 59 
 .../apache/phoenix/optimize/QueryOptimizer.java |  9 ++-
 2 files changed, 66 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e8fafd33/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index e260969..5a59c81 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -266,6 +266,65 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 }
 indexTable.close();
 }
+
+@Test
+public void testLocalIndexUsedForUncoveredOrderBy() throws Exception {
+String tableName = schemaName + "." + generateUniqueName();
+String indexName = "IDX_" + generateUniqueName();
+TableName physicalTableName = 
SchemaUtil.getPhysicalTableName(tableName.getBytes(), isNamespaceMapped);
+String indexPhysicalTableName = physicalTableName.getNameAsString();
+
+createBaseTable(tableName, null, "('e','i','o')");
+try (Connection conn1 = getConnection()) {
+conn1.createStatement().execute("UPSERT INTO " + tableName + " 
values('b',1,2,4,'z')");
+conn1.createStatement().execute("UPSERT INTO " + tableName + " 
values('f',1,2,3,'a')");
+conn1.createStatement().execute("UPSERT INTO " + tableName + " 
values('j',2,4,2,'a')");
+conn1.createStatement().execute("UPSERT INTO " + tableName + " 
values('q',3,1,1,'c')");
+conn1.commit();
+conn1.createStatement().execute("CREATE LOCAL INDEX " + indexName 
+ " ON " + tableName + "(v1)");
+
+String query = "SELECT * FROM " + tableName +" ORDER BY V1";
+ResultSet rs = conn1.createStatement().executeQuery("EXPLAIN "+ 
query);
+
+HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), 
TestUtil.TEST_PROPERTIES).getAdmin();
+int numRegions = admin.getTableRegions(physicalTableName).size();
+
+assertEquals(
+"CLIENT PARALLEL " + numRegions + "-WAY RANGE SCAN OVER "
++ indexPhysicalTableName + " [1]\n"
++ "SERVER FILTER BY FIRST KEY ONLY\n"
++ "CLIENT MERGE SORT",
+QueryUtil.getExplainPlan(rs));
+
+rs = conn1.createStatement().executeQuery(query);
+String v = "";
+while(rs.next()) {
+String next = rs.getString("v1");
+assertTrue(v.compareTo(next) <= 0);
+v = next;
+}
+rs.close();
+
+query = "SELECT * FROM " + tableName +" ORDER BY V1 DESC NULLS 
LAST";
+rs = conn1.createStatement().executeQuery("EXPLAIN "+ query);
+assertEquals(
+"CLIENT PARALLEL " + numRegions + "-WAY REVERSE RANGE SCAN 
OVER "
++ indexPhysicalTableName + " [1]\n"
++ "SERVER FILTER BY FIRST KEY ONLY\n"
++ "CLIENT MERGE SORT",
+QueryUtil.getExplainPlan(rs));
+
+rs = conn1.createStatement().executeQuery(query);
+v = "zz";
+while(rs.next()) {
+String next = rs.getString("v1");
+assertTrue(v.compareTo(next) >= 0);
+v = next;
+}
+rs.close();
+
+}
+}
 
 @Test
 public void testLocalIndexScanJoinColumnsFromDataTable() throws Exception {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e8fafd33/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java 
b/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
index 

[37/51] [abbrv] phoenix git commit: PHOENIX-4942 Move MetaDataEndpointImplTest to integration test

2018-10-17 Thread pboado
PHOENIX-4942 Move MetaDataEndpointImplTest to integration test


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a4453b66
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a4453b66
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a4453b66

Branch: refs/heads/4.x-cdh5.15
Commit: a4453b66dbec9d78a0e44071918fc191083a7776
Parents: 1c38086
Author: Thomas D'Silva 
Authored: Tue Oct 16 06:17:24 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:50:43 2018 +0100

--
 .../phoenix/end2end/MetaDataEndpointImplIT.java | 301 +++
 .../coprocessor/MetaDataEndpointImplTest.java   | 299 --
 2 files changed, 301 insertions(+), 299 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a4453b66/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetaDataEndpointImplIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetaDataEndpointImplIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetaDataEndpointImplIT.java
new file mode 100644
index 000..f14af9e
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MetaDataEndpointImplIT.java
@@ -0,0 +1,301 @@
+package org.apache.phoenix.end2end;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.phoenix.coprocessor.TableViewFinderResult;
+import org.apache.phoenix.coprocessor.ViewFinder;
+import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.TableNotFoundException;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.junit.Test;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+public class MetaDataEndpointImplIT extends ParallelStatsDisabledIT {
+private final TableName catalogTable = 
TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
+private final TableName linkTable = 
TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME_BYTES);
+
+/*
+  The tree structure is as follows: Where ParentTable is the Base Table
+  and all children are views and child views respectively.
+
+ParentTable
+  / \
+leftChild   rightChild
+  /
+   leftGrandChild
+ */
+
+@Test
+public void testGettingChildrenAndParentViews() throws Exception {
+String baseTable = generateUniqueName();
+String leftChild = generateUniqueName();
+String rightChild = generateUniqueName();
+String leftGrandChild = generateUniqueName();
+Connection conn = DriverManager.getConnection(getUrl());
+String ddlFormat =
+"CREATE TABLE IF NOT EXISTS " + baseTable + "  (" + " PK2 VARCHAR 
NOT NULL, V1 VARCHAR, V2 VARCHAR "
++ " CONSTRAINT NAME_PK PRIMARY KEY (PK2)" + " )";
+conn.createStatement().execute(ddlFormat);
+
+conn.createStatement().execute("CREATE VIEW " + rightChild + " AS 
SELECT * FROM " + baseTable);
+conn.createStatement().execute("CREATE VIEW " + leftChild + " (carrier 
VARCHAR) AS SELECT * FROM " + baseTable);
+conn.createStatement().execute("CREATE VIEW " + leftGrandChild + " 
(dropped_calls BIGINT) AS 

[11/51] [abbrv] phoenix git commit: PHOENIX-4917 Fix ClassCastException when projecting array elements in hash join

2018-10-17 Thread pboado
PHOENIX-4917 Fix ClassCastException when projecting array elements in hash join


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/cea1c710
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/cea1c710
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/cea1c710

Branch: refs/heads/4.x-cdh5.15
Commit: cea1c710d79b7a1d3b9b7da765ff465b50efe4bf
Parents: 8774744
Author: Gerald Sangudi 
Authored: Sun Sep 23 17:01:18 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:49:38 2018 +0100

--
 .../coprocessor/HashJoinRegionScanner.java  | 50 
 1 file changed, 42 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/cea1c710/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
index 96af154..70eaa03 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
@@ -50,6 +50,7 @@ import org.apache.phoenix.schema.ValueBitSet;
 import org.apache.phoenix.schema.tuple.MultiKeyValueTuple;
 import org.apache.phoenix.schema.tuple.PositionBasedResultTuple;
 import org.apache.phoenix.schema.tuple.ResultTuple;
+import org.apache.phoenix.schema.tuple.SingleKeyValueTuple;
 import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.util.ServerUtil;
 import org.apache.phoenix.util.TupleUtil;
@@ -207,19 +208,19 @@ public class HashJoinRegionScanner implements 
RegionScanner {
 }
 if (tempTuples[i] == null) {
 Tuple joined = tempSrcBitSet[i] == 
ValueBitSet.EMPTY_VALUE_BITSET ?
-lhs : TupleProjector.mergeProjectedValue(
-(ProjectedValueTuple) lhs, schema, 
tempDestBitSet,
-null, joinInfo.getSchemas()[i], 
tempSrcBitSet[i],
-joinInfo.getFieldPositions()[i], 
useNewValueColumnQualifier);
+lhs : mergeProjectedValue(
+lhs, schema, tempDestBitSet, null,
+joinInfo.getSchemas()[i], 
tempSrcBitSet[i],
+joinInfo.getFieldPositions()[i]);
 offerResult(joined, projected, result);
 continue;
 }
 for (Tuple t : tempTuples[i]) {
 Tuple joined = tempSrcBitSet[i] == 
ValueBitSet.EMPTY_VALUE_BITSET ?
-lhs : TupleProjector.mergeProjectedValue(
-(ProjectedValueTuple) lhs, schema, 
tempDestBitSet,
-t, joinInfo.getSchemas()[i], 
tempSrcBitSet[i],
-joinInfo.getFieldPositions()[i], 
useNewValueColumnQualifier);
+lhs : mergeProjectedValue(
+lhs, schema, tempDestBitSet, t,
+joinInfo.getSchemas()[i], 
tempSrcBitSet[i],
+joinInfo.getFieldPositions()[i]);
 offerResult(joined, projected, result);
 }
 }
@@ -353,4 +354,37 @@ public class HashJoinRegionScanner implements 
RegionScanner {
 MultiKeyValueTuple multi = new MultiKeyValueTuple(cells);
 resultQueue.offer(multi);
 }
+
+// PHOENIX-4917 Merge array element cell through hash join.
+// Merge into first cell, then reattach array cell.
+private Tuple mergeProjectedValue(
+Tuple dest, KeyValueSchema destSchema, ValueBitSet destBitSet, Tuple 
src,
+KeyValueSchema srcSchema, ValueBitSet srcBitSet, int offset)
+throws IOException {
+
+if (dest instanceof ProjectedValueTuple) {
+return TupleProjector.mergeProjectedValue(
+(ProjectedValueTuple) dest, destSchema, destBitSet, src,
+srcSchema, srcBitSet, offset, useNewValueColumnQualifier);
+}
+
+ProjectedValueTuple first = projector.projectResults(
+new SingleKeyValueTuple(dest.getValue(0)));
+ProjectedValueTuple merged = TupleProjector.mergeProjectedValue(
+first, destSchema, 

[16/51] [abbrv] phoenix git commit: PHOENIX-4791 Array elements are nullified with joins

2018-10-17 Thread pboado
PHOENIX-4791 Array elements are nullified with joins


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/dedc04cc
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/dedc04cc
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/dedc04cc

Branch: refs/heads/4.x-cdh5.15
Commit: dedc04cc3d323dff8c68d21cd91951ed44a7611c
Parents: 1fcf43c
Author: Gerald Sangudi 
Authored: Thu Aug 23 00:59:12 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:49:38 2018 +0100

--
 .../ProjectArrayElemAfterHashJoinIT.java| 177 +++
 .../coprocessor/HashJoinRegionScanner.java  |  69 ++--
 .../NonAggregateRegionScannerFactory.java   |   5 +-
 .../phoenix/iterate/RegionScannerFactory.java   |   7 +-
 4 files changed, 243 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/dedc04cc/phoenix-core/src/it/java/org/apache/phoenix/end2end/ProjectArrayElemAfterHashJoinIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ProjectArrayElemAfterHashJoinIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ProjectArrayElemAfterHashJoinIT.java
new file mode 100644
index 000..170eb69
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ProjectArrayElemAfterHashJoinIT.java
@@ -0,0 +1,177 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.Statement;
+import java.util.Properties;
+
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.QueryUtil;
+import org.junit.Test;
+
+public class ProjectArrayElemAfterHashJoinIT extends ParallelStatsDisabledIT {
+
+@Test
+public void testSalted() throws Exception {
+
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+Connection conn = DriverManager.getConnection(getUrl(), props);
+
+try {
+String table = createSalted(conn);
+testTable(conn, table);
+} finally {
+conn.close();
+}
+}
+
+@Test
+public void testUnsalted() throws Exception {
+
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+Connection conn = DriverManager.getConnection(getUrl(), props);
+
+try {
+String table = createUnsalted(conn);
+testTable(conn, table);
+} finally {
+conn.close();
+}
+}
+
+private void testTable(Connection conn, String table) throws Exception {
+
+verifyExplain(conn, table, false, false);
+verifyExplain(conn, table, false, true);
+verifyExplain(conn, table, true, false);
+verifyExplain(conn, table, true, true);
+
+verifyResults(conn, table, false, false);
+verifyResults(conn, table, false, true);
+verifyResults(conn, table, true, false);
+verifyResults(conn, table, true, true);
+}
+
+private String createSalted(Connection conn) throws Exception {
+
+String table = "SALTED_" + generateUniqueName();
+String create = "CREATE TABLE " + table + " ("
++ " id INTEGER NOT NULL,"
++ " vals TINYINT[],"
++ " CONSTRAINT pk PRIMARY KEY (id)"
++ ") SALT_BUCKETS = 4";
+
+conn.createStatement().execute(create);
+return table;
+}
+
+private String createUnsalted(Connection conn) throws Exception {
+
+String table = "UNSALTED_" + generateUniqueName();
+String create = "CREATE TABLE " + table + " ("
++ " id INTEGER NOT NULL,"
++ " vals TINYINT[],"
++ " CONSTRAINT 

[42/51] [abbrv] phoenix git commit: PHOENIX-4874 psql doesn't support date/time with values smaller than milliseconds-addendum(Rajeshbabu)

2018-10-17 Thread pboado
PHOENIX-4874 psql doesn't support date/time with values smaller than 
milliseconds-addendum(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/62c67d6e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/62c67d6e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/62c67d6e

Branch: refs/heads/4.x-cdh5.15
Commit: 62c67d6e1788420812cd8e62264b0c8a41c83312
Parents: 50c2a3b
Author: Rajeshbabu Chintaguntla 
Authored: Tue Oct 16 16:53:21 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:50:43 2018 +0100

--
 .../org/apache/phoenix/util/AbstractUpsertExecutorTest.java   | 7 ---
 1 file changed, 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/62c67d6e/phoenix-core/src/test/java/org/apache/phoenix/util/AbstractUpsertExecutorTest.java
--
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/util/AbstractUpsertExecutorTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/util/AbstractUpsertExecutorTest.java
index 3ea997b..3b3ebff 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/util/AbstractUpsertExecutorTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/util/AbstractUpsertExecutorTest.java
@@ -25,20 +25,14 @@ import static org.mockito.Mockito.verifyNoMoreInteractions;
 
 import java.io.IOException;
 import java.sql.Connection;
-import java.sql.Date;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.SQLException;
 import java.sql.Timestamp;
 import java.sql.Types;
-import java.time.LocalDateTime;
-import java.time.LocalTime;
-import java.time.ZoneId;
-import java.time.format.DateTimeFormatterBuilder;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Properties;
-import java.util.TimeZone;
 
 import org.apache.hadoop.hbase.util.Base64;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -48,7 +42,6 @@ import org.apache.phoenix.schema.types.PArrayDataType;
 import org.apache.phoenix.schema.types.PBinary;
 import org.apache.phoenix.schema.types.PInteger;
 import org.apache.phoenix.schema.types.PIntegerArray;
-import org.apache.phoenix.schema.types.PTimestamp;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;



[20/51] [abbrv] phoenix git commit: PHOENIX-4949 - IndexTool - updateIndexState called too many times unnecessarily

2018-10-17 Thread pboado
PHOENIX-4949 - IndexTool - updateIndexState called too many times unnecessarily


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/adbd986f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/adbd986f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/adbd986f

Branch: refs/heads/4.x-cdh5.15
Commit: adbd986fe1f59a4dcdf8d14e8c153e96d6dc987a
Parents: cea1c71
Author: Geoffrey 
Authored: Thu Oct 4 19:20:56 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:49:38 2018 +0100

--
 .../index/PhoenixIndexImportDirectReducer.java   | 15 ++-
 1 file changed, 2 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/adbd986f/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
index 51b88c1..0786b9b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
@@ -36,22 +36,11 @@ public class PhoenixIndexImportDirectReducer extends
 Reducer {
 
 private static final Logger LOG = 
LoggerFactory.getLogger(PhoenixIndexImportDirectReducer.class);
-private Configuration configuration;
 
-/**
- * Called once at the start of the task.
- */
 @Override
-protected void setup(Context context) throws IOException, 
InterruptedException {
-configuration = context.getConfiguration();
-}
-
-@Override
-protected void reduce(ImmutableBytesWritable arg0, Iterable 
arg1,
-Reducer.Context arg2)
-throws IOException, InterruptedException {
+protected void cleanup(Context context) throws IOException, 
InterruptedException{
 try {
-IndexToolUtil.updateIndexState(configuration, PIndexState.ACTIVE);
+IndexToolUtil.updateIndexState(context.getConfiguration(), 
PIndexState.ACTIVE);
 } catch (SQLException e) {
 LOG.error(" Failed to update the status to Active");
 throw new RuntimeException(e.getMessage());



[18/51] [abbrv] phoenix git commit: PHOENIX-4855 Continue to write base table column metadata when creating a view in order to support rollback

2018-10-17 Thread pboado
http://git-wip-us.apache.org/repos/asf/phoenix/blob/a6c1aa45/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java
index a267629..361edf2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumnImpl.java
@@ -90,7 +90,8 @@ public class PColumnImpl implements PColumn {
 }
 }
 
-// a derived column has null type
+// a excluded column (a column that was derived from a parent but that has 
been deleted) is
+// denoted by a column that has a null type
 public static PColumnImpl createExcludedColumn(PName familyName, PName 
columnName, Long timestamp) {
 return new PColumnImpl(familyName, columnName, timestamp);
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a6c1aa45/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
--
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
index 1623175..8cbf757 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
@@ -549,6 +549,12 @@ public interface PTable extends PMetaDataEntity {
  * @return a list of all columns
  */
 List getColumns();
+
+/**
+ * Get all excluded columns 
+ * @return a list of excluded columns
+ */
+List getExcludedColumns();
 
 /**
  * @return A list of the column families of this table

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a6c1aa45/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index 8d57945..9f06e04 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -115,6 +115,8 @@ public class PTableImpl implements PTable {
 // Have MultiMap for String->PColumn (may need family qualifier)
 private List pkColumns;
 private List allColumns;
+// columns that were inherited from a parent table but that were dropped 
in the view
+private List excludedColumns;
 private List families;
 private Map familyByBytes;
 private Map familyByString;
@@ -217,8 +219,8 @@ public class PTableImpl implements PTable {
 
 // For indexes stored in shared physical tables
 public PTableImpl(PName tenantId, PName schemaName, PName tableName, long 
timestamp, List families, 
-List columns, List physicalNames,PDataType 
viewIndexType, Long viewIndexId, boolean multiTenant, boolean isNamespaceMpped, 
ImmutableStorageScheme storageScheme, QualifierEncodingScheme 
qualifierEncodingScheme,
-EncodedCQCounter encodedCQCounter, Boolean 
useStatsForParallelization) throws SQLException {
+List columns, List physicalNames, PDataType 
viewIndexType, Long viewIndexId, boolean multiTenant, boolean isNamespaceMpped, 
ImmutableStorageScheme storageScheme, QualifierEncodingScheme 
qualifierEncodingScheme,
+EncodedCQCounter encodedCQCounter, Boolean 
useStatsForParallelization, Integer bucketNum) throws SQLException {
 this.pkColumns = this.allColumns = Collections.emptyList();
 this.rowKeySchema = RowKeySchema.EMPTY_SCHEMA;
 this.indexes = Collections.emptyList();
@@ -229,10 +231,14 @@ public class PTableImpl implements PTable {
 familyByString.put(family.getName().getString(), family);
 }
 this.families = families;
+if (bucketNum!=null) {
+columns = columns.subList(1, columns.size());
+}
 init(tenantId, this.schemaName, this.tableName, PTableType.INDEX, 
state, timeStamp, sequenceNumber, pkName, bucketNum, columns,
 this.schemaName, parentTableName, indexes, isImmutableRows, 
physicalNames, defaultFamilyName,
 null, disableWAL, multiTenant, storeNulls, viewType, 
viewIndexType, viewIndexId, indexType, baseColumnCount, rowKeyOrderOptimizable,
-transactionProvider, updateCacheFrequency, indexDisableTimestamp, 
isNamespaceMpped, null, false, storageScheme, qualifierEncodingScheme, 
encodedCQCounter, useStatsForParallelization);
+transactionProvider, updateCacheFrequency, indexDisableTimestamp, 
isNamespaceMpped, null,
+false, storageScheme, qualifierEncodingScheme, encodedCQCounter, 
useStatsForParallelization, null);
 }
 
 

[51/51] [abbrv] phoenix git commit: PHOENIX-4358 Case Sensitive String match on SqlType in PDataType (Dave Angulo)

2018-10-17 Thread pboado
PHOENIX-4358 Case Sensitive String match on SqlType in PDataType (Dave Angulo)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/fb1e8f74
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/fb1e8f74
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/fb1e8f74

Branch: refs/heads/4.x-cdh5.15
Commit: fb1e8f74fd142b38709b8b08ed7af14d186c1e5f
Parents: e8fafd3
Author: Thomas D'Silva 
Authored: Fri Oct 12 21:46:15 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:50:43 2018 +0100

--
 .../main/java/org/apache/phoenix/schema/types/PDataType.java   | 2 +-
 .../java/org/apache/phoenix/schema/types/PDataTypeTest.java| 6 ++
 2 files changed, 7 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/fb1e8f74/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDataType.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDataType.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDataType.java
index 1e29d6f..eba6079 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDataType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDataType.java
@@ -1041,7 +1041,7 @@ public abstract class PDataType implements 
DataType, Comparablehttp://git-wip-us.apache.org/repos/asf/phoenix/blob/fb1e8f74/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDataTypeTest.java
--
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDataTypeTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDataTypeTest.java
index 4b02cea..e868f4e 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDataTypeTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDataTypeTest.java
@@ -1949,4 +1949,10 @@ public class PDataTypeTest {
 }
 }
 }
+
+@Test
+public void testFromSqlTypeName() {
+assertEquals(PVarchar.INSTANCE, PDataType.fromSqlTypeName("varchar"));
+}
+
 }



[21/51] [abbrv] phoenix git commit: PHOENIX-4666 Persistent subquery cache for hash joins

2018-10-17 Thread pboado
http://git-wip-us.apache.org/repos/asf/phoenix/blob/cb697933/phoenix-protocol/src/main/build-proto.sh
--
diff --git a/phoenix-protocol/src/main/build-proto.sh 
b/phoenix-protocol/src/main/build-proto.sh
index b80bf1d..555651b 100755
--- a/phoenix-protocol/src/main/build-proto.sh
+++ b/phoenix-protocol/src/main/build-proto.sh
@@ -27,6 +27,12 @@ if [ $? != 0 ] ; then
   exit 1
 fi
 
+if [[ `protoc --version` != *"2.5.0"* ]]; then
+echo "Must use protoc version 2.5.0"
+exit 1
+fi
+
+
 PROTO_ROOT_DIR=`dirname $0`
 PROTO_DIR=$PROTO_ROOT_DIR
 JAVA_DIR=$PROTO_ROOT_DIR/../../../phoenix-core/src/main/java



[34/51] [abbrv] phoenix git commit: PHOENIX-4688 Support SPNEGO for python driver via requests-kerberos

2018-10-17 Thread pboado
http://git-wip-us.apache.org/repos/asf/phoenix/blob/3cac9217/python/phoenixdb/avatica/proto/requests_pb2.py
--
diff --git a/python/phoenixdb/avatica/proto/requests_pb2.py 
b/python/phoenixdb/avatica/proto/requests_pb2.py
deleted file mode 100644
index 203f945..000
--- a/python/phoenixdb/avatica/proto/requests_pb2.py
+++ /dev/null
@@ -1,1206 +0,0 @@
-# Generated by the protocol buffer compiler.  DO NOT EDIT!
-# source: requests.proto
-
-import sys
-_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import message as _message
-from google.protobuf import reflection as _reflection
-from google.protobuf import symbol_database as _symbol_database
-from google.protobuf import descriptor_pb2
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-from . import common_pb2 as common__pb2
-
-
-DESCRIPTOR = _descriptor.FileDescriptor(
-  name='requests.proto',
-  package='',
-  syntax='proto3',
-  
serialized_pb=_b('\n\x0erequests.proto\x1a\x0c\x63ommon.proto\"(\n\x0f\x43\x61talogsRequest\x12\x15\n\rconnection_id\x18\x01
 \x01(\t\"0\n\x17\x44\x61tabasePropertyRequest\x12\x15\n\rconnection_id\x18\x01 
\x01(\t\"P\n\x0eSchemasRequest\x12\x0f\n\x07\x63\x61talog\x18\x01 
\x01(\t\x12\x16\n\x0eschema_pattern\x18\x02 
\x01(\t\x12\x15\n\rconnection_id\x18\x03 
\x01(\t\"\x95\x01\n\rTablesRequest\x12\x0f\n\x07\x63\x61talog\x18\x01 
\x01(\t\x12\x16\n\x0eschema_pattern\x18\x02 
\x01(\t\x12\x1a\n\x12table_name_pattern\x18\x03 
\x01(\t\x12\x11\n\ttype_list\x18\x04 \x03(\t\x12\x15\n\rhas_type_list\x18\x06 
\x01(\x08\x12\x15\n\rconnection_id\x18\x07 
\x01(\t\"*\n\x11TableTypesRequest\x12\x15\n\rconnection_id\x18\x01 
\x01(\t\"\x89\x01\n\x0e\x43olumnsRequest\x12\x0f\n\x07\x63\x61talog\x18\x01 
\x01(\t\x12\x16\n\x0eschema_pattern\x18\x02 
\x01(\t\x12\x1a\n\x12table_name_pattern\x18\x03 
\x01(\t\x12\x1b\n\x13\x63olumn_name_pattern\x18\x04 
\x01(\t\x12\x15\n\rconnection_id\x18\x05 \x01(\t\"(\n\x0fTypeInfoReque
 st\x12\x15\n\rconnection_id\x18\x01 
\x01(\t\"\xa1\x01\n\x18PrepareAndExecuteRequest\x12\x15\n\rconnection_id\x18\x01
 \x01(\t\x12\x0b\n\x03sql\x18\x02 \x01(\t\x12\x15\n\rmax_row_count\x18\x03 
\x01(\x04\x12\x14\n\x0cstatement_id\x18\x04 
\x01(\r\x12\x16\n\x0emax_rows_total\x18\x05 
\x01(\x03\x12\x1c\n\x14\x66irst_frame_max_size\x18\x06 
\x01(\x05\"c\n\x0ePrepareRequest\x12\x15\n\rconnection_id\x18\x01 
\x01(\t\x12\x0b\n\x03sql\x18\x02 \x01(\t\x12\x15\n\rmax_row_count\x18\x03 
\x01(\x04\x12\x16\n\x0emax_rows_total\x18\x04 
\x01(\x03\"\x80\x01\n\x0c\x46\x65tchRequest\x12\x15\n\rconnection_id\x18\x01 
\x01(\t\x12\x14\n\x0cstatement_id\x18\x02 \x01(\r\x12\x0e\n\x06offset\x18\x03 
\x01(\x04\x12\x1b\n\x13\x66\x65tch_max_row_count\x18\x04 
\x01(\r\x12\x16\n\x0e\x66rame_max_size\x18\x05 
\x01(\x05\"/\n\x16\x43reateStatementRequest\x12\x15\n\rconnection_id\x18\x01 
\x01(\t\"D\n\x15\x43loseStatementRequest\x12\x15\n\rconnection_id\x18\x01 
\x01(\t\x12\x14\n\x0cstatement_id\x18\x02 \x01(\r\"\x8b\x01\n\x15Op
 enConnectionRequest\x12\x15\n\rconnection_id\x18\x01 
\x01(\t\x12.\n\x04info\x18\x02 \x03(\x0b\x32 
.OpenConnectionRequest.InfoEntry\x1a+\n\tInfoEntry\x12\x0b\n\x03key\x18\x01 
\x01(\t\x12\r\n\x05value\x18\x02 
\x01(\t:\x02\x38\x01\"/\n\x16\x43loseConnectionRequest\x12\x15\n\rconnection_id\x18\x01
 \x01(\t\"Y\n\x15\x43onnectionSyncRequest\x12\x15\n\rconnection_id\x18\x01 
\x01(\t\x12)\n\nconn_props\x18\x02 
\x01(\x0b\x32\x15.ConnectionProperties\"\xc7\x01\n\x0e\x45xecuteRequest\x12)\n\x0fstatementHandle\x18\x01
 \x01(\x0b\x32\x10.StatementHandle\x12%\n\x10parameter_values\x18\x02 
\x03(\x0b\x32\x0b.TypedValue\x12\'\n\x1f\x64\x65precated_first_frame_max_size\x18\x03
 \x01(\x04\x12\x1c\n\x14has_parameter_values\x18\x04 
\x01(\x08\x12\x1c\n\x14\x66irst_frame_max_size\x18\x05 
\x01(\x05\"m\n\x12SyncResultsRequest\x12\x15\n\rconnection_id\x18\x01 
\x01(\t\x12\x14\n\x0cstatement_id\x18\x02 \x01(\r\x12\x1a\n\x05state\x18\x03 
\x01(\x0b\x32\x0b.QueryState\x12\x0e\n\x06offset\x18\x04 \x01(\x04\"&\n\rCommi
 tRequest\x12\x15\n\rconnection_id\x18\x01 
\x01(\t\"(\n\x0fRollbackRequest\x12\x15\n\rconnection_id\x18\x01 
\x01(\t\"b\n\x1dPrepareAndExecuteBatchRequest\x12\x15\n\rconnection_id\x18\x01 
\x01(\t\x12\x14\n\x0cstatement_id\x18\x02 
\x01(\r\x12\x14\n\x0csql_commands\x18\x03 
\x03(\t\"4\n\x0bUpdateBatch\x12%\n\x10parameter_values\x18\x01 
\x03(\x0b\x32\x0b.TypedValue\"a\n\x13\x45xecuteBatchRequest\x12\x15\n\rconnection_id\x18\x01
 \x01(\t\x12\x14\n\x0cstatement_id\x18\x02 \x01(\r\x12\x1d\n\x07updates\x18\x03 
\x03(\x0b\x32\x0c.UpdateBatchB\"\n org.apache.calcite.avatica.protob\x06proto3')
-  ,
-  dependencies=[common__pb2.DESCRIPTOR,])
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
-
-
-
-_CATALOGSREQUEST = _descriptor.Descriptor(
-  name='CatalogsRequest',
-  full_name='CatalogsRequest',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,

[32/51] [abbrv] phoenix git commit: PHOENIX-4688 Support SPNEGO for python driver via requests-kerberos

2018-10-17 Thread pboado
http://git-wip-us.apache.org/repos/asf/phoenix/blob/3cac9217/python/phoenixdb/phoenixdb/avatica/proto/common_pb2.py
--
diff --git a/python/phoenixdb/phoenixdb/avatica/proto/common_pb2.py 
b/python/phoenixdb/phoenixdb/avatica/proto/common_pb2.py
new file mode 100644
index 000..3c99502
--- /dev/null
+++ b/python/phoenixdb/phoenixdb/avatica/proto/common_pb2.py
@@ -0,0 +1,1667 @@
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: common.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf.internal import enum_type_wrapper
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='common.proto',
+  package='',
+  syntax='proto3',
+  
serialized_pb=_b('\n\x0c\x63ommon.proto\"\xc0\x01\n\x14\x43onnectionProperties\x12\x10\n\x08is_dirty\x18\x01
 \x01(\x08\x12\x13\n\x0b\x61uto_commit\x18\x02 
\x01(\x08\x12\x17\n\x0fhas_auto_commit\x18\x07 
\x01(\x08\x12\x11\n\tread_only\x18\x03 
\x01(\x08\x12\x15\n\rhas_read_only\x18\x08 
\x01(\x08\x12\x1d\n\x15transaction_isolation\x18\x04 
\x01(\r\x12\x0f\n\x07\x63\x61talog\x18\x05 \x01(\t\x12\x0e\n\x06schema\x18\x06 
\x01(\t\"S\n\x0fStatementHandle\x12\x15\n\rconnection_id\x18\x01 
\x01(\t\x12\n\n\x02id\x18\x02 \x01(\r\x12\x1d\n\tsignature\x18\x03 
\x01(\x0b\x32\n.Signature\"\xb0\x01\n\tSignature\x12 \n\x07\x63olumns\x18\x01 
\x03(\x0b\x32\x0f.ColumnMetaData\x12\x0b\n\x03sql\x18\x02 
\x01(\t\x12%\n\nparameters\x18\x03 
\x03(\x0b\x32\x11.AvaticaParameter\x12&\n\x0e\x63ursor_factory\x18\x04 
\x01(\x0b\x32\x0e.CursorFactory\x12%\n\rstatementType\x18\x05 
\x01(\x0e\x32\x0e.StatementType\"\xad\x03\n\x0e\x43olumnMetaData\x12\x0f\n\x07ordinal\x18\x01
 \x01(\r\x12\x16\n\x0e\x61uto_increment\x18\x02 \x
 01(\x08\x12\x16\n\x0e\x63\x61se_sensitive\x18\x03 
\x01(\x08\x12\x12\n\nsearchable\x18\x04 
\x01(\x08\x12\x10\n\x08\x63urrency\x18\x05 
\x01(\x08\x12\x10\n\x08nullable\x18\x06 \x01(\r\x12\x0e\n\x06signed\x18\x07 
\x01(\x08\x12\x14\n\x0c\x64isplay_size\x18\x08 \x01(\r\x12\r\n\x05label\x18\t 
\x01(\t\x12\x13\n\x0b\x63olumn_name\x18\n 
\x01(\t\x12\x13\n\x0bschema_name\x18\x0b \x01(\t\x12\x11\n\tprecision\x18\x0c 
\x01(\r\x12\r\n\x05scale\x18\r \x01(\r\x12\x12\n\ntable_name\x18\x0e 
\x01(\t\x12\x14\n\x0c\x63\x61talog_name\x18\x0f 
\x01(\t\x12\x11\n\tread_only\x18\x10 \x01(\x08\x12\x10\n\x08writable\x18\x11 
\x01(\x08\x12\x1b\n\x13\x64\x65\x66initely_writable\x18\x12 
\x01(\x08\x12\x19\n\x11\x63olumn_class_name\x18\x13 
\x01(\t\x12\x1a\n\x04type\x18\x14 
\x01(\x0b\x32\x0c.AvaticaType\"}\n\x0b\x41vaticaType\x12\n\n\x02id\x18\x01 
\x01(\r\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x11\n\x03rep\x18\x03 
\x01(\x0e\x32\x04.Rep\x12 \n\x07\x63olumns\x18\x04 
\x03(\x0b\x32\x0f.ColumnMetaData\x12\x1f\n\tcomponent\x18
 \x05 
\x01(\x0b\x32\x0c.AvaticaType\"\x91\x01\n\x10\x41vaticaParameter\x12\x0e\n\x06signed\x18\x01
 \x01(\x08\x12\x11\n\tprecision\x18\x02 \x01(\r\x12\r\n\x05scale\x18\x03 
\x01(\r\x12\x16\n\x0eparameter_type\x18\x04 
\x01(\r\x12\x11\n\ttype_name\x18\x05 \x01(\t\x12\x12\n\nclass_name\x18\x06 
\x01(\t\x12\x0c\n\x04name\x18\x07 
\x01(\t\"\xb3\x01\n\rCursorFactory\x12#\n\x05style\x18\x01 
\x01(\x0e\x32\x14.CursorFactory.Style\x12\x12\n\nclass_name\x18\x02 
\x01(\t\x12\x13\n\x0b\x66ield_names\x18\x03 
\x03(\t\"T\n\x05Style\x12\n\n\x06OBJECT\x10\x00\x12\n\n\x06RECORD\x10\x01\x12\x15\n\x11RECORD_PROJECTION\x10\x02\x12\t\n\x05\x41RRAY\x10\x03\x12\x08\n\x04LIST\x10\x04\x12\x07\n\x03MAP\x10\x05\"9\n\x05\x46rame\x12\x0e\n\x06offset\x18\x01
 \x01(\x04\x12\x0c\n\x04\x64one\x18\x02 \x01(\x08\x12\x12\n\x04rows\x18\x03 
\x03(\x0b\x32\x04.Row\"\"\n\x03Row\x12\x1b\n\x05value\x18\x01 
\x03(\x0b\x32\x0c.ColumnValue\"3\n\x10\x44\x61tabaseProperty\x12\x0c\n\x04name\x18\x01
 \x01(\t\x12\x11\n\tfunctions\x18\x02 \x03(
 \t\"4\n\x0bWireMessage\x12\x0c\n\x04name\x18\x01 
\x01(\t\x12\x17\n\x0fwrapped_message\x18\x02 
\x01(\x0c\"\x87\x01\n\x0b\x43olumnValue\x12\x1a\n\x05value\x18\x01 
\x03(\x0b\x32\x0b.TypedValue\x12 \n\x0b\x61rray_value\x18\x02 
\x03(\x0b\x32\x0b.TypedValue\x12\x17\n\x0fhas_array_value\x18\x03 
\x01(\x08\x12!\n\x0cscalar_value\x18\x04 
\x01(\x0b\x32\x0b.TypedValue\"\xf2\x01\n\nTypedValue\x12\x12\n\x04type\x18\x01 
\x01(\x0e\x32\x04.Rep\x12\x12\n\nbool_value\x18\x02 
\x01(\x08\x12\x14\n\x0cstring_value\x18\x03 
\x01(\t\x12\x14\n\x0cnumber_value\x18\x04 
\x01(\x12\x12\x13\n\x0b\x62ytes_value\x18\x05 
\x01(\x0c\x12\x14\n\x0c\x64ouble_value\x18\x06 
\x01(\x01\x12\x0c\n\x04null\x18\x07 \x01(\x08\x12 \n\x0b\x61rray_value\x18\x08 
\x03(\x0b\x32\x0b.TypedValue\x12\x1c\n\x0e\x63omponent_type\x18\t 

[45/51] [abbrv] phoenix git commit: PHOENIX-4785 Unable to write to table if index is made active during retry (addendum for test)

2018-10-17 Thread pboado
PHOENIX-4785 Unable to write to table if index is made active during retry 
(addendum for test)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f9cee604
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f9cee604
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f9cee604

Branch: refs/heads/4.x-cdh5.15
Commit: f9cee6043d96f146f7f36bab159570b084270490
Parents: 1cbd79d
Author: Vincent Poon 
Authored: Fri Oct 12 23:49:39 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:50:43 2018 +0100

--
 .../org/apache/phoenix/end2end/index/MutableIndexFailureIT.java | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f9cee604/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
index 9bf82fe..06f8f68 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
@@ -433,7 +433,7 @@ public class MutableIndexFailureIT extends BaseTest {
 
 private void addRowsInTableDuringRetry(final String tableName)
 throws SQLException, InterruptedException, ExecutionException {
-int threads=10;
+int threads=9;
 boolean wasFailWrite = FailingRegionObserver.FAIL_WRITE;
 boolean wasToggleFailWriteForRetry = 
FailingRegionObserver.TOGGLE_FAIL_WRITE_FOR_RETRY;
 try {
@@ -610,6 +610,9 @@ public class MutableIndexFailureIT extends BaseTest {
 }
 }
 }
+if (TOGGLE_FAIL_WRITE_FOR_RETRY) {
+FAIL_WRITE = !FAIL_WRITE;
+}
 }
 }
 if (throwException) {



[39/51] [abbrv] phoenix git commit: PHOENIX-4963 Do not throw when transitioning to PENDING_DISABLE if index is already disabled

2018-10-17 Thread pboado
PHOENIX-4963 Do not throw when transitioning to PENDING_DISABLE if index is 
already disabled


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1cbd79d5
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1cbd79d5
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1cbd79d5

Branch: refs/heads/4.x-cdh5.15
Commit: 1cbd79d520005d39b6d4376d8a0d6401d28cd573
Parents: 62c67d6
Author: Vincent Poon 
Authored: Wed Oct 17 00:13:11 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:50:43 2018 +0100

--
 .../phoenix/coprocessor/MetaDataEndpointImpl.java| 15 ---
 1 file changed, 8 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/1cbd79d5/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 83c7f4d..21370f3 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -3949,6 +3949,14 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 return;
 }
 } else if (currentState == PIndexState.DISABLE) {
+// Index already disabled, so can't revert to 
PENDING_DISABLE
+if (newState == PIndexState.PENDING_DISABLE) {
+// returning TABLE_ALREADY_EXISTS here means the 
client doesn't throw an exception
+
builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS);
+
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
+done.run(builder.build());
+return;
+}
 // Can't transition back to INACTIVE if 
INDEX_DISABLE_TIMESTAMP is 0
 if (newState != PIndexState.BUILDING && newState != 
PIndexState.DISABLE &&
 (newState != PIndexState.INACTIVE || curTimeStampVal 
== 0)) {
@@ -3961,13 +3969,6 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 if (newState == PIndexState.ACTIVE) {
 newState = PIndexState.DISABLE;
 }
-// Can't transition from DISABLE to PENDING_DISABLE
-if (newState == PIndexState.PENDING_DISABLE) {
-
builder.setReturnCode(MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION);
-
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
-done.run(builder.build());
-return;
-}
 }
 if (newState == PIndexState.PENDING_DISABLE && currentState != 
PIndexState.PENDING_DISABLE) {
 // reset count for first PENDING_DISABLE



[12/51] [abbrv] phoenix git commit: PHOENIX-4941 Handle TableExistsException when wrapped under RemoteException for SYSTEM.MUTEX table

2018-10-17 Thread pboado
PHOENIX-4941 Handle TableExistsException when wrapped under RemoteException for 
SYSTEM.MUTEX table


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ec91f62a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ec91f62a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ec91f62a

Branch: refs/heads/4.x-cdh5.15
Commit: ec91f62ac4aea6d82f9c315fbf0e7b6e3e6b513b
Parents: 3ace797
Author: Ankit Singhal 
Authored: Tue Oct 2 20:12:07 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:49:38 2018 +0100

--
 .../query/ConnectionQueryServicesImpl.java  | 47 +---
 1 file changed, 31 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ec91f62a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 9ee33a5..39ad967 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -71,6 +71,7 @@ import java.sql.ResultSetMetaData;
 import java.sql.SQLException;
 import java.sql.Types;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
@@ -2567,22 +2568,12 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 boolean foundAccessDeniedException = false;
 // when running spark/map reduce jobs the 
ADE might be wrapped
 // in a RemoteException
-for (Throwable t : 
Throwables.getCausalChain(e)) {
-if (t instanceof AccessDeniedException
-|| (t instanceof 
RemoteException
-&& ((RemoteException) 
t).getClassName()
-
.equals(AccessDeniedException.class
-
.getName( {
-foundAccessDeniedException = true;
-break;
-}
-}
-if (foundAccessDeniedException) {
+if (inspectIfAnyExceptionInChain(e, 
Collections.singletonList(AccessDeniedException.class))) {
 // Pass
 logger.warn("Could not check for 
Phoenix SYSTEM tables, assuming they exist and are properly configured");
 
checkClientServerCompatibility(SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES,
 getProps()).getName());
 success = true;
-} else if 
(!Iterables.isEmpty(Iterables.filter(Throwables.getCausalChain(e), 
NamespaceNotFoundException.class))) {
+} else if (inspectIfAnyExceptionInChain(e, 
Collections.singletonList(NamespaceNotFoundException.class))) {
 // This exception is only possible if 
SYSTEM namespace mapping is enabled and SYSTEM namespace is missing
 // It implies that SYSTEM tables are 
not created and hence we shouldn't provide a connection
 AccessDeniedException ade = new 
AccessDeniedException("Insufficient permissions to create SYSTEM namespace and 
SYSTEM Tables");
@@ -2677,15 +2668,39 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 columnDesc.setTimeToLive(TTL_FOR_MUTEX); // Let mutex expire after 
some time
 tableDesc.addFamily(columnDesc);
 admin.createTable(tableDesc);
-} catch (IOException e) {
-
if(!Iterables.isEmpty(Iterables.filter(Throwables.getCausalChain(e), 
AccessDeniedException.class)) ||
-
!Iterables.isEmpty(Iterables.filter(Throwables.getCausalChain(e), 
org.apache.hadoop.hbase.TableNotFoundException.class))) {
-// Ignore
+}
+catch (IOException e) {
+if (inspectIfAnyExceptionInChain(e, Arrays.> asList(
+AccessDeniedException.class, 

[31/51] [abbrv] phoenix git commit: PHOENIX-4688 Support SPNEGO for python driver via requests-kerberos

2018-10-17 Thread pboado
http://git-wip-us.apache.org/repos/asf/phoenix/blob/3cac9217/python/phoenixdb/phoenixdb/avatica/proto/requests_pb2.py
--
diff --git a/python/phoenixdb/phoenixdb/avatica/proto/requests_pb2.py 
b/python/phoenixdb/phoenixdb/avatica/proto/requests_pb2.py
new file mode 100644
index 000..203f945
--- /dev/null
+++ b/python/phoenixdb/phoenixdb/avatica/proto/requests_pb2.py
@@ -0,0 +1,1206 @@
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: requests.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from . import common_pb2 as common__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='requests.proto',
+  package='',
+  syntax='proto3',
+  
serialized_pb=_b('\n\x0erequests.proto\x1a\x0c\x63ommon.proto\"(\n\x0f\x43\x61talogsRequest\x12\x15\n\rconnection_id\x18\x01
 \x01(\t\"0\n\x17\x44\x61tabasePropertyRequest\x12\x15\n\rconnection_id\x18\x01 
\x01(\t\"P\n\x0eSchemasRequest\x12\x0f\n\x07\x63\x61talog\x18\x01 
\x01(\t\x12\x16\n\x0eschema_pattern\x18\x02 
\x01(\t\x12\x15\n\rconnection_id\x18\x03 
\x01(\t\"\x95\x01\n\rTablesRequest\x12\x0f\n\x07\x63\x61talog\x18\x01 
\x01(\t\x12\x16\n\x0eschema_pattern\x18\x02 
\x01(\t\x12\x1a\n\x12table_name_pattern\x18\x03 
\x01(\t\x12\x11\n\ttype_list\x18\x04 \x03(\t\x12\x15\n\rhas_type_list\x18\x06 
\x01(\x08\x12\x15\n\rconnection_id\x18\x07 
\x01(\t\"*\n\x11TableTypesRequest\x12\x15\n\rconnection_id\x18\x01 
\x01(\t\"\x89\x01\n\x0e\x43olumnsRequest\x12\x0f\n\x07\x63\x61talog\x18\x01 
\x01(\t\x12\x16\n\x0eschema_pattern\x18\x02 
\x01(\t\x12\x1a\n\x12table_name_pattern\x18\x03 
\x01(\t\x12\x1b\n\x13\x63olumn_name_pattern\x18\x04 
\x01(\t\x12\x15\n\rconnection_id\x18\x05 \x01(\t\"(\n\x0fTypeInfoReque
 st\x12\x15\n\rconnection_id\x18\x01 
\x01(\t\"\xa1\x01\n\x18PrepareAndExecuteRequest\x12\x15\n\rconnection_id\x18\x01
 \x01(\t\x12\x0b\n\x03sql\x18\x02 \x01(\t\x12\x15\n\rmax_row_count\x18\x03 
\x01(\x04\x12\x14\n\x0cstatement_id\x18\x04 
\x01(\r\x12\x16\n\x0emax_rows_total\x18\x05 
\x01(\x03\x12\x1c\n\x14\x66irst_frame_max_size\x18\x06 
\x01(\x05\"c\n\x0ePrepareRequest\x12\x15\n\rconnection_id\x18\x01 
\x01(\t\x12\x0b\n\x03sql\x18\x02 \x01(\t\x12\x15\n\rmax_row_count\x18\x03 
\x01(\x04\x12\x16\n\x0emax_rows_total\x18\x04 
\x01(\x03\"\x80\x01\n\x0c\x46\x65tchRequest\x12\x15\n\rconnection_id\x18\x01 
\x01(\t\x12\x14\n\x0cstatement_id\x18\x02 \x01(\r\x12\x0e\n\x06offset\x18\x03 
\x01(\x04\x12\x1b\n\x13\x66\x65tch_max_row_count\x18\x04 
\x01(\r\x12\x16\n\x0e\x66rame_max_size\x18\x05 
\x01(\x05\"/\n\x16\x43reateStatementRequest\x12\x15\n\rconnection_id\x18\x01 
\x01(\t\"D\n\x15\x43loseStatementRequest\x12\x15\n\rconnection_id\x18\x01 
\x01(\t\x12\x14\n\x0cstatement_id\x18\x02 \x01(\r\"\x8b\x01\n\x15Op
 enConnectionRequest\x12\x15\n\rconnection_id\x18\x01 
\x01(\t\x12.\n\x04info\x18\x02 \x03(\x0b\x32 
.OpenConnectionRequest.InfoEntry\x1a+\n\tInfoEntry\x12\x0b\n\x03key\x18\x01 
\x01(\t\x12\r\n\x05value\x18\x02 
\x01(\t:\x02\x38\x01\"/\n\x16\x43loseConnectionRequest\x12\x15\n\rconnection_id\x18\x01
 \x01(\t\"Y\n\x15\x43onnectionSyncRequest\x12\x15\n\rconnection_id\x18\x01 
\x01(\t\x12)\n\nconn_props\x18\x02 
\x01(\x0b\x32\x15.ConnectionProperties\"\xc7\x01\n\x0e\x45xecuteRequest\x12)\n\x0fstatementHandle\x18\x01
 \x01(\x0b\x32\x10.StatementHandle\x12%\n\x10parameter_values\x18\x02 
\x03(\x0b\x32\x0b.TypedValue\x12\'\n\x1f\x64\x65precated_first_frame_max_size\x18\x03
 \x01(\x04\x12\x1c\n\x14has_parameter_values\x18\x04 
\x01(\x08\x12\x1c\n\x14\x66irst_frame_max_size\x18\x05 
\x01(\x05\"m\n\x12SyncResultsRequest\x12\x15\n\rconnection_id\x18\x01 
\x01(\t\x12\x14\n\x0cstatement_id\x18\x02 \x01(\r\x12\x1a\n\x05state\x18\x03 
\x01(\x0b\x32\x0b.QueryState\x12\x0e\n\x06offset\x18\x04 \x01(\x04\"&\n\rCommi
 tRequest\x12\x15\n\rconnection_id\x18\x01 
\x01(\t\"(\n\x0fRollbackRequest\x12\x15\n\rconnection_id\x18\x01 
\x01(\t\"b\n\x1dPrepareAndExecuteBatchRequest\x12\x15\n\rconnection_id\x18\x01 
\x01(\t\x12\x14\n\x0cstatement_id\x18\x02 
\x01(\r\x12\x14\n\x0csql_commands\x18\x03 
\x03(\t\"4\n\x0bUpdateBatch\x12%\n\x10parameter_values\x18\x01 
\x03(\x0b\x32\x0b.TypedValue\"a\n\x13\x45xecuteBatchRequest\x12\x15\n\rconnection_id\x18\x01
 \x01(\t\x12\x14\n\x0cstatement_id\x18\x02 \x01(\r\x12\x1d\n\x07updates\x18\x03 
\x03(\x0b\x32\x0c.UpdateBatchB\"\n org.apache.calcite.avatica.protob\x06proto3')
+  ,
+  dependencies=[common__pb2.DESCRIPTOR,])
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+
+
+
+_CATALOGSREQUEST = _descriptor.Descriptor(
+  name='CatalogsRequest',
+  full_name='CatalogsRequest',
+  filename=None,
+  

[15/51] [abbrv] phoenix git commit: PHOENIX-4941 Handle TableExistsException when wrapped under RemoteException for SYSTEM.MUTEX table(addendum)

2018-10-17 Thread pboado
PHOENIX-4941 Handle TableExistsException when wrapped under RemoteException for 
SYSTEM.MUTEX table(addendum)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/87747449
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/87747449
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/87747449

Branch: refs/heads/4.x-cdh5.15
Commit: 877474490a031d55449dfda7be79792043e4e418
Parents: ec91f62
Author: Ankit Singhal 
Authored: Tue Oct 2 20:29:04 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:49:38 2018 +0100

--
 .../phoenix/query/ConnectionQueryServicesImpl.java   | 15 +--
 1 file changed, 9 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/87747449/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 39ad967..f78db9d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -2568,12 +2568,15 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 boolean foundAccessDeniedException = false;
 // when running spark/map reduce jobs the 
ADE might be wrapped
 // in a RemoteException
-if (inspectIfAnyExceptionInChain(e, 
Collections.singletonList(AccessDeniedException.class))) {
+if (inspectIfAnyExceptionInChain(e, 
Collections
+.> 
singletonList(AccessDeniedException.class))) {
 // Pass
 logger.warn("Could not check for 
Phoenix SYSTEM tables, assuming they exist and are properly configured");
 
checkClientServerCompatibility(SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES,
 getProps()).getName());
 success = true;
-} else if (inspectIfAnyExceptionInChain(e, 
Collections.singletonList(NamespaceNotFoundException.class))) {
+} else if (inspectIfAnyExceptionInChain(e,
+Collections.> singletonList(
+
NamespaceNotFoundException.class))) {
 // This exception is only possible if 
SYSTEM namespace mapping is enabled and SYSTEM namespace is missing
 // It implies that SYSTEM tables are 
not created and hence we shouldn't provide a connection
 AccessDeniedException ade = new 
AccessDeniedException("Insufficient permissions to create SYSTEM namespace and 
SYSTEM Tables");
@@ -2670,7 +2673,7 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 admin.createTable(tableDesc);
 }
 catch (IOException e) {
-if (inspectIfAnyExceptionInChain(e, Arrays.> asList(
+if (inspectIfAnyExceptionInChain(e, Arrays.> asList(
 AccessDeniedException.class, 
org.apache.hadoop.hbase.TableExistsException.class))) {
 // Ignore TableExistsException as another client might beat us 
during upgrade.
 // Ignore AccessDeniedException, as it may be possible 
underpriviliged user trying to use the connection
@@ -2683,10 +2686,10 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 }
 }
 
-private boolean inspectIfAnyExceptionInChain(Throwable io, List> ioList) {
+private boolean inspectIfAnyExceptionInChain(Throwable io, List> ioList) {
 boolean exceptionToIgnore = false;
 for (Throwable t : Throwables.getCausalChain(io)) {
-for (Class exception : ioList) {
+for (Class exception : ioList) {
 exceptionToIgnore |= isExceptionInstanceOf(t, exception);
 }
 if (exceptionToIgnore) {
@@ -2697,7 +2700,7 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 return exceptionToIgnore;
 }
 
-private boolean isExceptionInstanceOf(Throwable io, Class exception) {
+private boolean isExceptionInstanceOf(Throwable io, Class exception) {
 return 

[13/51] [abbrv] phoenix git commit: PHOENIX-4519 - Index rebuild MR jobs not created for "alter index rebuild async" rebuilds

2018-10-17 Thread pboado
PHOENIX-4519 - Index rebuild MR jobs not created for "alter index rebuild 
async" rebuilds


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3ace7979
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3ace7979
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3ace7979

Branch: refs/heads/4.x-cdh5.15
Commit: 3ace7979b8ecbd1f30bcf34fd6086e25c7305b84
Parents: 3dcd586
Author: Geoffrey 
Authored: Fri Sep 7 00:18:09 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:49:38 2018 +0100

--
 .../end2end/index/PhoenixMRJobSubmitterIT.java  | 113 +++
 .../index/automation/PhoenixMRJobSubmitter.java |  16 ++-
 .../apache/phoenix/schema/MetaDataClient.java   |   2 +-
 3 files changed, 126 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/3ace7979/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PhoenixMRJobSubmitterIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PhoenixMRJobSubmitterIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PhoenixMRJobSubmitterIT.java
new file mode 100644
index 000..7cc3aa0
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PhoenixMRJobSubmitterIT.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end.index;
+
+import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
+import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
+import org.apache.phoenix.mapreduce.index.automation.PhoenixAsyncIndex;
+import org.apache.phoenix.mapreduce.index.automation.PhoenixMRJobSubmitter;
+import org.apache.phoenix.schema.MetaDataClient;
+import org.apache.phoenix.schema.PIndexState;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.RunUntilFailure;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.util.Map;
+
+@Category(NeedsOwnMiniClusterTest.class)
+public class PhoenixMRJobSubmitterIT extends BaseUniqueNamesOwnClusterIT {
+
+  private static String REQUEST_INDEX_REBUILD_SQL = "ALTER INDEX %s ON %s 
REBUILD ASYNC";
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+setUpTestDriver(ReadOnlyProps.EMPTY_PROPS);
+
+  }
+
+  @Test
+  public void testGetCandidateJobs() throws Exception {
+String tableName = "TBL_" + generateUniqueName();
+String asyncIndexName = "IDX_" + generateUniqueName();
+String needsRebuildIndexName = "IDX_" + generateUniqueName();
+String tableDDL = "CREATE TABLE " + tableName + TestUtil.TEST_TABLE_SCHEMA;
+String asyncIndexDDL = "CREATE INDEX " + asyncIndexName + " ON " + 
tableName + " (a.varchar_col1) ASYNC";
+String needsRebuildIndexDDL = "CREATE INDEX " + needsRebuildIndexName + " 
ON " + tableName + " (a.char_col1)";
+long rebuildTimestamp = 100L;
+
+createTestTable(getUrl(), tableDDL);
+
+createTestTable(getUrl(), needsRebuildIndexDDL);
+Connection conn = null;
+PreparedStatement stmt = null;
+try {
+  conn = DriverManager.getConnection(getUrl());
+  TestUtil.assertIndexState(conn, needsRebuildIndexName, 
PIndexState.ACTIVE, 0L);
+
+  //first make sure that we don't return an active index
+  PhoenixMRJobSubmitter submitter = new 
PhoenixMRJobSubmitter(getUtility().getConfiguration());
+  Map candidateMap = 
submitter.getCandidateJobs(conn);
+  Assert.assertNotNull(candidateMap);
+  Assert.assertEquals(0, candidateMap.size());
+
+  //create an index with ASYNC that will need building via MapReduce
+  createTestTable(getUrl(), asyncIndexDDL);
+  TestUtil.assertIndexState(conn, asyncIndexName, PIndexState.BUILDING, 
0L);
+
+

[29/51] [abbrv] phoenix git commit: PHOENIX-4688 Support SPNEGO for python driver via requests-kerberos

2018-10-17 Thread pboado
http://git-wip-us.apache.org/repos/asf/phoenix/blob/3cac9217/python/phoenixdb/phoenixdb/types.py
--
diff --git a/python/phoenixdb/phoenixdb/types.py 
b/python/phoenixdb/phoenixdb/types.py
new file mode 100644
index 000..f41355a
--- /dev/null
+++ b/python/phoenixdb/phoenixdb/types.py
@@ -0,0 +1,202 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import time
+import datetime
+from decimal import Decimal
+from phoenixdb.avatica.proto import common_pb2
+
+__all__ = [
+'Date', 'Time', 'Timestamp', 'DateFromTicks', 'TimeFromTicks', 
'TimestampFromTicks',
+'Binary', 'STRING', 'BINARY', 'NUMBER', 'DATETIME', 'ROWID', 'BOOLEAN',
+'JAVA_CLASSES', 'JAVA_CLASSES_MAP', 'TypeHelper',
+]
+
+
+def Date(year, month, day):
+"""Constructs an object holding a date value."""
+return datetime.date(year, month, day)
+
+
+def Time(hour, minute, second):
+"""Constructs an object holding a time value."""
+return datetime.time(hour, minute, second)
+
+
+def Timestamp(year, month, day, hour, minute, second):
+"""Constructs an object holding a datetime/timestamp value."""
+return datetime.datetime(year, month, day, hour, minute, second)
+
+
+def DateFromTicks(ticks):
+"""Constructs an object holding a date value from the given UNIX 
timestamp."""
+return Date(*time.localtime(ticks)[:3])
+
+
+def TimeFromTicks(ticks):
+"""Constructs an object holding a time value from the given UNIX 
timestamp."""
+return Time(*time.localtime(ticks)[3:6])
+
+
+def TimestampFromTicks(ticks):
+"""Constructs an object holding a datetime/timestamp value from the given 
UNIX timestamp."""
+return Timestamp(*time.localtime(ticks)[:6])
+
+
+def Binary(value):
+"""Constructs an object capable of holding a binary (long) string value."""
+return bytes(value)
+
+
+def time_from_java_sql_time(n):
+dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(milliseconds=n)
+return dt.time()
+
+
+def time_to_java_sql_time(t):
+return ((t.hour * 60 + t.minute) * 60 + t.second) * 1000 + t.microsecond 
// 1000
+
+
+def date_from_java_sql_date(n):
+return datetime.date(1970, 1, 1) + datetime.timedelta(days=n)
+
+
+def date_to_java_sql_date(d):
+if isinstance(d, datetime.datetime):
+d = d.date()
+td = d - datetime.date(1970, 1, 1)
+return td.days
+
+
+def datetime_from_java_sql_timestamp(n):
+return datetime.datetime(1970, 1, 1) + datetime.timedelta(milliseconds=n)
+
+
+def datetime_to_java_sql_timestamp(d):
+td = d - datetime.datetime(1970, 1, 1)
+return td.microseconds // 1000 + (td.seconds + td.days * 24 * 3600) * 1000
+
+
+class ColumnType(object):
+
+def __init__(self, eq_types):
+self.eq_types = tuple(eq_types)
+self.eq_types_set = set(eq_types)
+
+def __eq__(self, other):
+return other in self.eq_types_set
+
+def __cmp__(self, other):
+if other in self.eq_types_set:
+return 0
+if other < self.eq_types:
+return 1
+else:
+return -1
+
+
+STRING = ColumnType(['VARCHAR', 'CHAR'])
+"""Type object that can be used to describe string-based columns."""
+
+BINARY = ColumnType(['BINARY', 'VARBINARY'])
+"""Type object that can be used to describe (long) binary columns."""
+
+NUMBER = ColumnType([
+'INTEGER', 'UNSIGNED_INT', 'BIGINT', 'UNSIGNED_LONG', 'TINYINT', 
'UNSIGNED_TINYINT',
+'SMALLINT', 'UNSIGNED_SMALLINT', 'FLOAT', 'UNSIGNED_FLOAT', 'DOUBLE', 
'UNSIGNED_DOUBLE', 'DECIMAL'
+])
+"""Type object that can be used to describe numeric columns."""
+
+DATETIME = ColumnType(['TIME', 'DATE', 'TIMESTAMP', 'UNSIGNED_TIME', 
'UNSIGNED_DATE', 'UNSIGNED_TIMESTAMP'])
+"""Type object that can be used to describe date/time columns."""
+
+ROWID = ColumnType([])
+"""Only implemented for DB API 2.0 compatibility, not used."""
+
+BOOLEAN = ColumnType(['BOOLEAN'])
+"""Type object that can be used to describe boolean columns. This is a 
phoenixdb-specific extension."""
+
+
+# XXX ARRAY
+
+if sys.version_info[0] < 3:
+_long = long  # noqa: F821
+else:
+_long = int
+
+JAVA_CLASSES = {
+'bool_value': [
+('java.lang.Boolean', 

[30/51] [abbrv] phoenix git commit: PHOENIX-4688 Support SPNEGO for python driver via requests-kerberos

2018-10-17 Thread pboado
http://git-wip-us.apache.org/repos/asf/phoenix/blob/3cac9217/python/phoenixdb/phoenixdb/cursor.py
--
diff --git a/python/phoenixdb/phoenixdb/cursor.py 
b/python/phoenixdb/phoenixdb/cursor.py
new file mode 100644
index 000..8be7bed
--- /dev/null
+++ b/python/phoenixdb/phoenixdb/cursor.py
@@ -0,0 +1,347 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import collections
+from phoenixdb.types import TypeHelper
+from phoenixdb.errors import ProgrammingError, InternalError
+from phoenixdb.avatica.proto import common_pb2
+
+__all__ = ['Cursor', 'ColumnDescription', 'DictCursor']
+
+logger = logging.getLogger(__name__)
+
+# TODO see note in Cursor.rowcount()
+MAX_INT = 2 ** 64 - 1
+
+ColumnDescription = collections.namedtuple('ColumnDescription', 'name 
type_code display_size internal_size precision scale null_ok')
+"""Named tuple for representing results from :attr:`Cursor.description`."""
+
+
+class Cursor(object):
+"""Database cursor for executing queries and iterating over results.
+
+You should not construct this object manually, use 
:meth:`Connection.cursor() ` instead.
+"""
+
+arraysize = 1
+"""
+Read/write attribute specifying the number of rows to fetch
+at a time with :meth:`fetchmany`. It defaults to 1 meaning to
+fetch a single row at a time.
+"""
+
+itersize = 2000
+"""
+Read/write attribute specifying the number of rows to fetch
+from the backend at each network roundtrip during iteration
+on the cursor. The default is 2000.
+"""
+
+def __init__(self, connection, id=None):
+self._connection = connection
+self._id = id
+self._signature = None
+self._column_data_types = []
+self._frame = None
+self._pos = None
+self._closed = False
+self.arraysize = self.__class__.arraysize
+self.itersize = self.__class__.itersize
+self._updatecount = -1
+
+def __del__(self):
+if not self._connection._closed and not self._closed:
+self.close()
+
+def __enter__(self):
+return self
+
+def __exit__(self, exc_type, exc_value, traceback):
+if not self._closed:
+self.close()
+
+def __iter__(self):
+return self
+
+def __next__(self):
+row = self.fetchone()
+if row is None:
+raise StopIteration
+return row
+
+next = __next__
+
+def close(self):
+"""Closes the cursor.
+No further operations are allowed once the cursor is closed.
+
+If the cursor is used in a ``with`` statement, this method will
+be automatically called at the end of the ``with`` block.
+"""
+if self._closed:
+raise ProgrammingError('the cursor is already closed')
+if self._id is not None:
+self._connection._client.close_statement(self._connection._id, 
self._id)
+self._id = None
+self._signature = None
+self._column_data_types = []
+self._frame = None
+self._pos = None
+self._closed = True
+
+@property
+def closed(self):
+"""Read-only attribute specifying if the cursor is closed or not."""
+return self._closed
+
+@property
+def description(self):
+if self._signature is None:
+return None
+description = []
+for column in self._signature.columns:
+description.append(ColumnDescription(
+column.column_name,
+column.type.name,
+column.display_size,
+None,
+column.precision,
+column.scale,
+None if column.nullable == 2 else bool(column.nullable),
+))
+return description
+
+def _set_id(self, id):
+if self._id is not None and self._id != id:
+self._connection._client.close_statement(self._connection._id, 
self._id)
+self._id = id
+
+def _set_signature(self, signature):
+self._signature = signature
+self._column_data_types = []
+self._parameter_data_types = []
+if signature is 

[27/51] [abbrv] phoenix git commit: PHOENIX-4688 Support SPNEGO for python driver via requests-kerberos

2018-10-17 Thread pboado
http://git-wip-us.apache.org/repos/asf/phoenix/blob/3cac9217/python/requests-kerberos/tests/test_requests_kerberos.py
--
diff --git a/python/requests-kerberos/tests/test_requests_kerberos.py 
b/python/requests-kerberos/tests/test_requests_kerberos.py
new file mode 100644
index 000..ebaca37
--- /dev/null
+++ b/python/requests-kerberos/tests/test_requests_kerberos.py
@@ -0,0 +1,904 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""Tests for requests_kerberos."""
+
+import base64
+from mock import Mock, patch
+from requests.compat import urlparse
+import requests
+import warnings
+
+
+try:
+import kerberos
+kerberos_module_name='kerberos'
+except ImportError:
+import winkerberos as kerberos  # On Windows
+kerberos_module_name = 'winkerberos'
+
+import requests_kerberos
+import unittest
+from requests_kerberos.kerberos_ import _get_certificate_hash
+
+# kerberos.authClientInit() is called with the service name (HTTP@FQDN) and
+# returns 1 and a kerberos context object on success. Returns -1 on failure.
+clientInit_complete = Mock(return_value=(1, "CTX"))
+clientInit_error = Mock(return_value=(-1, "CTX"))
+
+# kerberos.authGSSClientStep() is called with the kerberos context object
+# returned by authGSSClientInit and the negotiate auth token provided in the
+# http response's www-authenticate header. It returns 0 or 1 on success. 0
+# Indicates that authentication is progressing but not complete.
+clientStep_complete = Mock(return_value=1)
+clientStep_continue = Mock(return_value=0)
+clientStep_error = Mock(return_value=-1)
+clientStep_exception = Mock(side_effect=kerberos.GSSError)
+
+# kerberos.authGSSCLientResponse() is called with the kerberos context which
+# was initially returned by authGSSClientInit and had been mutated by a call by
+# authGSSClientStep. It returns a string.
+clientResponse = Mock(return_value="GSSRESPONSE")
+
+# Note: we're not using the @mock.patch decorator:
+# > My only word of warning is that in the past, the patch decorator hides
+# > tests when using the standard unittest library.
+# > -- sigmavirus24 in https://github.com/requests/requests-kerberos/issues/1
+
+
+class KerberosTestCase(unittest.TestCase):
+
+def setUp(self):
+"""Setup."""
+clientInit_complete.reset_mock()
+clientInit_error.reset_mock()
+clientStep_complete.reset_mock()
+clientStep_continue.reset_mock()
+clientStep_error.reset_mock()
+clientStep_exception.reset_mock()
+clientResponse.reset_mock()
+
+def tearDown(self):
+"""Teardown."""
+pass
+
+def test_negotate_value_extraction(self):
+response = requests.Response()
+response.headers = {'www-authenticate': 'negotiate token'}
+self.assertEqual(
+requests_kerberos.kerberos_._negotiate_value(response),
+'token'
+)
+
+def test_negotate_value_extraction_none(self):
+response = requests.Response()
+response.headers = {}
+self.assertTrue(
+requests_kerberos.kerberos_._negotiate_value(response) is None
+)
+
+def test_force_preemptive(self):
+with patch.multiple(kerberos_module_name,
+authGSSClientInit=clientInit_complete,
+authGSSClientResponse=clientResponse,
+authGSSClientStep=clientStep_continue):
+auth = requests_kerberos.HTTPKerberosAuth(force_preemptive=True)
+
+request = requests.Request(url="http://www.example.org;)
+
+auth.__call__(request)
+
+self.assertTrue('Authorization' in request.headers)
+self.assertEqual(request.headers.get('Authorization'), 'Negotiate 
GSSRESPONSE')
+
+def test_no_force_preemptive(self):
+with patch.multiple(kerberos_module_name,
+authGSSClientInit=clientInit_complete,
+authGSSClientResponse=clientResponse,
+authGSSClientStep=clientStep_continue):
+auth = requests_kerberos.HTTPKerberosAuth()
+
+request = requests.Request(url="http://www.example.org;)
+
+auth.__call__(request)
+
+self.assertTrue('Authorization' not in request.headers)
+
+def test_generate_request_header(self):
+with patch.multiple(kerberos_module_name,
+authGSSClientInit=clientInit_complete,
+authGSSClientResponse=clientResponse,
+authGSSClientStep=clientStep_continue):
+response = requests.Response()
+response.url = "http://www.example.org/;
+response.headers = {'www-authenticate': 'negotiate token'}
+host = urlparse(response.url).hostname
+auth = requests_kerberos.HTTPKerberosAuth()
+self.assertEqual(
+

[47/51] [abbrv] phoenix git commit: PHOENIX-4855 Continue to write base table column metadata when creating a view in order to support rollback (addendum)

2018-10-17 Thread pboado
PHOENIX-4855 Continue to write base table column metadata when creating a view 
in order to support rollback (addendum)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/00ba63b5
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/00ba63b5
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/00ba63b5

Branch: refs/heads/4.x-cdh5.15
Commit: 00ba63b5ab575365013bb15990fdda045a18d63c
Parents: 708a788
Author: Thomas D'Silva 
Authored: Sat Oct 6 20:40:54 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:50:43 2018 +0100

--
 .../java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java | 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/00ba63b5/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index 18c9000..52dfe99 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -737,9 +737,10 @@ public class PhoenixDatabaseMetaData implements 
DatabaseMetaData {
 boolean isSalted = table.getBucketNum()!=null;
 boolean tenantColSkipped = false;
 List columns = table.getColumns();
-columns = Lists.newArrayList(columns.subList(isSalted ? 1 : 0, 
columns.size()));
+int startOffset = isSalted ? 1 : 0;
+   columns = 
Lists.newArrayList(columns.subList(startOffset, columns.size()));
 for (PColumn column : columns) {
-if (isTenantSpecificConnection && 
column.equals(table.getPKColumns().get(0))) {
+if (isTenantSpecificConnection && 
column.equals(table.getPKColumns().get(startOffset))) {
 // skip the tenant column
 tenantColSkipped = true;
 continue;
@@ -874,7 +875,7 @@ public class PhoenixDatabaseMetaData implements 
DatabaseMetaData {
 byte[] keySeqBytes = ByteUtil.EMPTY_BYTE_ARRAY;
 int pkPos = table.getPKColumns().indexOf(column);
 if (pkPos!=-1) {
-short keySeq = (short) (pkPos + 1 - (isSalted ? 1 : 0) - 
(tenantColSkipped ? 1 : 0));
+short keySeq = (short) (pkPos + 1 - startOffset - 
(tenantColSkipped ? 1 : 0));
 keySeqBytes = PSmallint.INSTANCE.toBytes(keySeq);
 }
 cells.add(KeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, 
KEY_SEQ_BYTES,



[22/51] [abbrv] phoenix git commit: PHOENIX-4666 Persistent subquery cache for hash joins

2018-10-17 Thread pboado
PHOENIX-4666 Persistent subquery cache for hash joins

Signed-off-by: Josh Elser 


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/cb697933
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/cb697933
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/cb697933

Branch: refs/heads/4.x-cdh5.15
Commit: cb6979333155b3d6b9fd0684304f52e9b33f42f4
Parents: 912215c
Author: Marcell Ortutay 
Authored: Thu Mar 29 20:59:03 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:49:38 2018 +0100

--
 .../end2end/join/HashJoinPersistentCacheIT.java | 167 +++
 .../org/apache/phoenix/cache/GlobalCache.java   |  22 +-
 .../apache/phoenix/cache/ServerCacheClient.java |  59 --
 .../org/apache/phoenix/cache/TenantCache.java   |   2 +-
 .../apache/phoenix/cache/TenantCacheImpl.java   | 209 ---
 .../apache/phoenix/compile/QueryCompiler.java   |   9 +-
 .../phoenix/compile/StatementContext.java   |  21 +-
 .../coprocessor/HashJoinRegionScanner.java  |   4 +-
 .../coprocessor/ServerCachingEndpointImpl.java  |   2 +-
 .../generated/ServerCachingProtos.java  | 117 +--
 .../apache/phoenix/execute/HashJoinPlan.java| 104 +++--
 .../phoenix/iterate/BaseResultIterators.java|   8 +-
 .../phoenix/iterate/TableResultIterator.java|   6 +-
 .../apache/phoenix/join/HashCacheClient.java|  24 ++-
 .../apache/phoenix/join/HashCacheFactory.java   |  11 +
 .../java/org/apache/phoenix/parse/HintNode.java |   4 +
 .../org/apache/phoenix/query/QueryServices.java |   1 +
 .../phoenix/query/QueryServicesOptions.java |   1 +
 .../apache/phoenix/cache/TenantCacheTest.java   | 112 --
 .../src/main/ServerCachingService.proto |   1 +
 phoenix-protocol/src/main/build-proto.sh|   6 +
 21 files changed, 773 insertions(+), 117 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/cb697933/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/HashJoinPersistentCacheIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/HashJoinPersistentCacheIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/HashJoinPersistentCacheIT.java
new file mode 100644
index 000..2f072b8
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/join/HashJoinPersistentCacheIT.java
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end.join;
+
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.util.Properties;
+
+import org.apache.phoenix.end2end.join.HashJoinCacheIT.InvalidateHashCache;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Test;
+
+public class HashJoinPersistentCacheIT extends BaseJoinIT {
+
+@Override
+protected String getTableName(Connection conn, String virtualName) throws 
Exception {
+String realName = super.getTableName(conn, virtualName);
+TestUtil.addCoprocessor(conn, 
SchemaUtil.normalizeFullTableName(realName),
+InvalidateHashCache.class);
+return realName;
+}
+
+@Test
+public void testPersistentCache() throws Exception {
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+Connection conn = DriverManager.getConnection(getUrl(), props);
+
+createTestTable(getUrl(),
+"CREATE TABLE IF NOT EXISTS states (state CHAR(2) " +
+"NOT NULL, name VARCHAR NOT NULL CONSTRAINT my_pk PRIMARY KEY 
(state, name))");
+createTestTable(getUrl(),
+"CREATE TABLE IF NOT EXISTS cities (state CHAR(2) " +
+ "NOT NULL, city VARCHAR NOT NULL, population BIGINT " +
+  

[03/51] [abbrv] phoenix git commit: PHOENIX-4882 The client re-resolves the table for every projected non-indexed column when there's a local index.

2018-10-17 Thread pboado
PHOENIX-4882 The client re-resolves the table for every projected non-indexed 
column when there's a local index.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/bb297e78
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/bb297e78
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/bb297e78

Branch: refs/heads/4.x-cdh5.15
Commit: bb297e7815bedaa2253299f60d189f13b220ccef
Parents: dedc04c
Author: Lars Hofhansl 
Authored: Thu Sep 6 19:23:22 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:49:38 2018 +0100

--
 .../src/main/java/org/apache/phoenix/compile/FromCompiler.java  | 5 +
 .../java/org/apache/phoenix/schema/LocalIndexDataColumnRef.java | 2 +-
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/bb297e78/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
index 6eb5952..efc66a9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
@@ -249,6 +249,11 @@ public class FromCompiler {
 return visitor;
 }
 
+public static ColumnResolver getResolver(NamedTableNode tableNode, 
PhoenixConnection connection, boolean updateCacheImmediately) throws 
SQLException {
+SingleTableColumnResolver visitor = new 
SingleTableColumnResolver(connection, tableNode, updateCacheImmediately);
+return visitor;
+}
+
 public static ColumnResolver getResolver(NamedTableNode tableNode, 
PhoenixConnection connection, Map udfParseNodes) throws 
SQLException {
 SingleTableColumnResolver visitor =
 new SingleTableColumnResolver(connection, tableNode, true, 0, 
udfParseNodes);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/bb297e78/phoenix-core/src/main/java/org/apache/phoenix/schema/LocalIndexDataColumnRef.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/LocalIndexDataColumnRef.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/LocalIndexDataColumnRef.java
index 270c66d..0061331 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/LocalIndexDataColumnRef.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/LocalIndexDataColumnRef.java
@@ -37,7 +37,7 @@ public class LocalIndexDataColumnRef extends ColumnRef {
 super(FromCompiler.getResolver(
 FACTORY.namedTable(null, 
TableName.create(context.getCurrentTable().getTable()
 .getSchemaName().getString(), 
context.getCurrentTable().getTable()
-.getParentTableName().getString())), 
context.getConnection()).resolveTable(
+.getParentTableName().getString())), 
context.getConnection(), false).resolveTable(
 context.getCurrentTable().getTable().getSchemaName().getString(),
 
context.getCurrentTable().getTable().getParentTableName().getString()), 
IndexUtil
 .getDataColumnFamilyName(indexColumnName), IndexUtil



[24/51] [abbrv] phoenix git commit: PHOENIX-4874 psql doesn't support date/time with values smaller than milliseconds(Rajeshbabu)

2018-10-17 Thread pboado
PHOENIX-4874 psql doesn't support date/time with values smaller than 
milliseconds(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9e47f1e8
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9e47f1e8
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9e47f1e8

Branch: refs/heads/4.x-cdh5.15
Commit: 9e47f1e80967b88d402f7588cf50dd313addb22c
Parents: b881226
Author: Rajeshbabu Chintaguntla 
Authored: Mon Sep 24 00:39:31 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:49:38 2018 +0100

--
 .../phoenix/util/csv/CsvUpsertExecutor.java | 20 +---
 .../phoenix/util/json/JsonUpsertExecutor.java   |  3 ++
 .../util/AbstractUpsertExecutorTest.java| 51 +++-
 3 files changed, 54 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9e47f1e8/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
index 4f98ada..0b5881f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/util/csv/CsvUpsertExecutor.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.util.csv;
 import java.sql.Connection;
 import java.sql.PreparedStatement;
 import java.sql.SQLException;
+import java.sql.Timestamp;
 import java.sql.Types;
 import java.util.Base64;
 import java.util.List;
@@ -30,6 +31,7 @@ import javax.annotation.Nullable;
 import org.apache.commons.csv.CSVRecord;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.expression.function.EncodeFormat;
+import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.IllegalDataException;
@@ -41,6 +43,7 @@ import org.apache.phoenix.schema.types.PTimestamp;
 import org.apache.phoenix.schema.types.PVarbinary;
 import org.apache.phoenix.util.ColumnInfo;
 import org.apache.phoenix.util.DateUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.UpsertExecutor;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -125,9 +128,9 @@ public class CsvUpsertExecutor extends 
UpsertExecutor {
 private final String binaryEncoding;
 
 SimpleDatatypeConversionFunction(PDataType dataType, Connection conn) {
-Properties props;
+ReadOnlyProps props;
 try {
-props = conn.getClientInfo();
+props = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getProps();
 } catch (SQLException e) {
 throw new RuntimeException(e);
 }
@@ -139,23 +142,23 @@ public class CsvUpsertExecutor extends 
UpsertExecutor {
 String dateFormat;
 int dateSqlType = dataType.getResultSetSqlType();
 if (dateSqlType == Types.DATE) {
-dateFormat = 
props.getProperty(QueryServices.DATE_FORMAT_ATTRIB,
+dateFormat = props.get(QueryServices.DATE_FORMAT_ATTRIB,
 DateUtil.DEFAULT_DATE_FORMAT);
 } else if (dateSqlType == Types.TIME) {
-dateFormat = 
props.getProperty(QueryServices.TIME_FORMAT_ATTRIB,
+dateFormat = props.get(QueryServices.TIME_FORMAT_ATTRIB,
 DateUtil.DEFAULT_TIME_FORMAT);
 } else {
-dateFormat = 
props.getProperty(QueryServices.TIMESTAMP_FORMAT_ATTRIB,
+dateFormat = 
props.get(QueryServices.TIMESTAMP_FORMAT_ATTRIB,
 DateUtil.DEFAULT_TIMESTAMP_FORMAT);

 }
-String timeZoneId = 
props.getProperty(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB,
+String timeZoneId = 
props.get(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB,
 QueryServicesOptions.DEFAULT_DATE_FORMAT_TIMEZONE);
 this.dateTimeParser = DateUtil.getDateTimeParser(dateFormat, 
dataType, timeZoneId);
 } else {
 this.dateTimeParser = null;
 }
 this.codec = codec;
-this.binaryEncoding = 
props.getProperty(QueryServices.UPLOAD_BINARY_DATA_TYPE_ENCODING,
+this.binaryEncoding = 
props.get(QueryServices.UPLOAD_BINARY_DATA_TYPE_ENCODING,
 
QueryServicesOptions.DEFAULT_UPLOAD_BINARY_DATA_TYPE_ENCODING);
 }
 
@@ -165,6 +168,9 @@ public class 

[44/51] [abbrv] phoenix git commit: Revert "PHOENIX-4825 Replace usage of HBase Base64 implementation with java.util.Base64"

2018-10-17 Thread pboado
Revert "PHOENIX-4825 Replace usage of HBase Base64 implementation with 
java.util.Base64"

This reverts commit 22934e5af7af79580bf54feeb7667eccafaafc71 in order to 
support JDK 1.7 for 4.x releases.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/708a7885
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/708a7885
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/708a7885

Branch: refs/heads/4.x-cdh5.15
Commit: 708a7885b3878ae1f0f44248b05a6016b8a0abbe
Parents: 3cac921
Author: Ankit Singhal 
Authored: Sat Oct 6 00:53:31 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:50:43 2018 +0100

--
 .../org/apache/phoenix/end2end/QueryMoreIT.java |  7 ++---
 .../phoenix/mapreduce/CsvBulkImportUtil.java|  8 ++
 .../util/PhoenixConfigurationUtil.java  |  7 ++---
 .../apache/phoenix/schema/types/PVarbinary.java |  4 +--
 .../phoenix/util/csv/CsvUpsertExecutor.java |  4 +--
 .../phoenix/util/json/JsonUpsertExecutor.java   |  4 +--
 .../util/AbstractUpsertExecutorTest.java| 12 
 .../util/TenantIdByteConversionTest.java| 30 
 8 files changed, 26 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/708a7885/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
index 528fe7f..04272fa 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
@@ -31,13 +31,12 @@ import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.ArrayList;
-import java.util.Base64;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 
-import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Base64;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryServices;
@@ -279,7 +278,7 @@ public class QueryMoreIT extends ParallelStatsDisabledIT {
 values[i] = rs.getObject(i + 1);
 }
 conn = getTenantSpecificConnection(tenantId);
-
pkIds.add(Bytes.toString(Base64.getEncoder().encode(PhoenixRuntime.encodeColumnValues(conn,
 tableOrViewName.toUpperCase(), values, columns;
+
pkIds.add(Base64.encodeBytes(PhoenixRuntime.encodeColumnValues(conn, 
tableOrViewName.toUpperCase(), values, columns)));
 }
 return pkIds.toArray(new String[pkIds.size()]);
 }
@@ -297,7 +296,7 @@ public class QueryMoreIT extends ParallelStatsDisabledIT {
 PreparedStatement stmt = conn.prepareStatement(query);
 int bindCounter = 1;
 for (int i = 0; i < cursorIds.length; i++) {
-Object[] pkParts = PhoenixRuntime.decodeColumnValues(conn, 
tableName.toUpperCase(), Base64.getDecoder().decode(cursorIds[i]), columns);
+Object[] pkParts = PhoenixRuntime.decodeColumnValues(conn, 
tableName.toUpperCase(), Base64.decode(cursorIds[i]), columns);
 for (int j = 0; j < pkParts.length; j++) {
 stmt.setObject(bindCounter++, pkParts[j]);
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/708a7885/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
index bf5a538..ff9ff72 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
@@ -17,11 +17,9 @@
  */
 package org.apache.phoenix.mapreduce;
 
-import java.util.Base64;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Base64;
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
@@ -70,7 +68,7 @@ public class CsvBulkImportUtil {
 
 @VisibleForTesting
 static void setChar(Configuration conf, String confKey, char charValue) {
-conf.set(confKey, 
Bytes.toString(Base64.getEncoder().encode(Character.toString(charValue).getBytes(;
+conf.set(confKey, 

[43/51] [abbrv] phoenix git commit: PHOENIX-4967 Reverse scan along LOCAL index does not always return all data.

2018-10-17 Thread pboado
PHOENIX-4967 Reverse scan along LOCAL index does not always return all data.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1c380865
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1c380865
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1c380865

Branch: refs/heads/4.x-cdh5.15
Commit: 1c3808654abbeb3e2f6042064a38439b6d20589c
Parents: a694638
Author: Lars Hofhansl 
Authored: Sat Oct 13 22:45:19 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:50:43 2018 +0100

--
 .../phoenix/end2end/index/LocalIndexIT.java | 55 +++-
 .../phoenix/iterate/BaseResultIterators.java|  3 +-
 2 files changed, 56 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/1c380865/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 5a59c81..d70a505 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -298,11 +298,15 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 
 rs = conn1.createStatement().executeQuery(query);
 String v = "";
+int i = 0;
 while(rs.next()) {
 String next = rs.getString("v1");
 assertTrue(v.compareTo(next) <= 0);
 v = next;
+i++;
 }
+// see PHOENIX-4967
+assertEquals(4, i);
 rs.close();
 
 query = "SELECT * FROM " + tableName +" ORDER BY V1 DESC NULLS 
LAST";
@@ -316,16 +320,65 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 
 rs = conn1.createStatement().executeQuery(query);
 v = "zz";
+i = 0;
 while(rs.next()) {
 String next = rs.getString("v1");
 assertTrue(v.compareTo(next) >= 0);
 v = next;
+i++;
 }
+// see PHOENIX-4967
+assertEquals(4, i);
 rs.close();
 
 }
 }
-
+
+@Test
+public void testLocalIndexReverseScanShouldReturnAllRows() throws 
Exception {
+String tableName = schemaName + "." + generateUniqueName();
+String indexName = "IDX_" + generateUniqueName();
+TableName physicalTableName = 
SchemaUtil.getPhysicalTableName(tableName.getBytes(), isNamespaceMapped);
+String indexPhysicalTableName = physicalTableName.getNameAsString();
+
+createBaseTable(tableName, null, "('e','i','o')");
+try (Connection conn1 = getConnection()) {
+conn1.createStatement().execute("UPSERT INTO " + tableName + " 
values('b',1,2,4,'z')");
+conn1.createStatement().execute("UPSERT INTO " + tableName + " 
values('f',1,2,3,'a')");
+conn1.createStatement().execute("UPSERT INTO " + tableName + " 
values('j',2,4,2,'b')");
+conn1.createStatement().execute("UPSERT INTO " + tableName + " 
values('q',3,1,1,'c')");
+conn1.commit();
+conn1.createStatement().execute("CREATE LOCAL INDEX " + indexName 
+ " ON " + tableName + "(v1)");
+
+String query = "SELECT V1 FROM " + tableName +" ORDER BY V1 DESC 
NULLS LAST";
+ResultSet rs = conn1.createStatement().executeQuery("EXPLAIN "+ 
query);
+
+HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), 
TestUtil.TEST_PROPERTIES).getAdmin();
+int numRegions = admin.getTableRegions(physicalTableName).size();
+
+assertEquals(
+"CLIENT PARALLEL " + numRegions + "-WAY REVERSE RANGE SCAN 
OVER "
++ indexPhysicalTableName + " [1]\n"
++ "SERVER FILTER BY FIRST KEY ONLY\n"
++ "CLIENT MERGE SORT",
+QueryUtil.getExplainPlan(rs));
+
+rs = conn1.createStatement().executeQuery(query);
+String v = "zz";
+int i = 0;
+while(rs.next()) {
+String next = rs.getString("v1");
+assertTrue(v.compareTo(next) >= 0);
+v = next;
+i++;
+}
+// see PHOENIX-4967
+assertEquals(4, i);
+rs.close();
+
+}
+}
+
 @Test
 public void testLocalIndexScanJoinColumnsFromDataTable() throws Exception {
 String tableName = schemaName + "." + generateUniqueName();


[08/51] [abbrv] phoenix git commit: PHOENIX-4884 Update INSTR to handle literals and non-literals in either function argument

2018-10-17 Thread pboado
PHOENIX-4884 Update INSTR to handle literals and non-literals in either 
function argument

INSTR previously only handled arguments of the form non-literal and literal, 
but the documentation
doesn't clearly state this. We can support all variants though.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e83c6147
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e83c6147
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e83c6147

Branch: refs/heads/4.x-cdh5.15
Commit: e83c6147e5696b34d76de7ae16ab2233bda864ae
Parents: cb69793
Author: Josh Elser 
Authored: Fri Aug 31 15:59:47 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:49:38 2018 +0100

--
 .../apache/phoenix/end2end/InstrFunctionIT.java | 35 +
 .../expression/function/InstrFunction.java  | 78 +---
 .../expression/function/InstrFunctionTest.java  | 44 +++
 3 files changed, 114 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e83c6147/phoenix-core/src/it/java/org/apache/phoenix/end2end/InstrFunctionIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/InstrFunctionIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/InstrFunctionIT.java
index 270b1ec..bc86980 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/InstrFunctionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/InstrFunctionIT.java
@@ -131,4 +131,39 @@ public class InstrFunctionIT extends 
ParallelStatsDisabledIT {
 testInstrFilter(conn, queryToExecute,"abcdefghijkl");
 }
 
+@Test
+public void testNonLiteralExpression() throws Exception {
+Connection conn = DriverManager.getConnection(getUrl());
+String tableName = generateUniqueName();
+initTable(conn, tableName, "ASC", "asdf", "sdf");
+// Should be able to use INSTR with a non-literal expression as the 
2nd argument
+String query = "SELECT INSTR(name, substr) FROM " + tableName;
+testInstr(conn, query, 2);
+}
+
+@Test
+public void testNonLiteralSourceExpression() throws Exception {
+Connection conn = DriverManager.getConnection(getUrl());
+String tableName = generateUniqueName();
+initTable(conn, tableName, "ASC", "asdf", "sdf");
+// Using the function inside the SELECT will test client-side.
+String query = "SELECT INSTR('asdf', 'sdf') FROM " + tableName;
+testInstr(conn, query, 2);
+query = "SELECT INSTR('asdf', substr) FROM " + tableName;
+testInstr(conn, query, 2);
+query = "SELECT INSTR('qwerty', 'sdf') FROM " + tableName;
+testInstr(conn, query, 0);
+query = "SELECT INSTR('qwerty', substr) FROM " + tableName;
+testInstr(conn, query, 0);
+// Test the built-in function in a where clause to make sure
+// it works server-side (and not just client-side).
+query = "SELECT name FROM " + tableName + " WHERE INSTR(name, substr) 
= 2";
+testInstrFilter(conn, query, "asdf");
+query = "SELECT name FROM " + tableName + " WHERE INSTR(name, 'sdf') = 
2";
+testInstrFilter(conn, query, "asdf");
+query = "SELECT name FROM " + tableName + " WHERE INSTR('asdf', 
substr) = 2";
+testInstrFilter(conn, query, "asdf");
+query = "SELECT name FROM " + tableName + " WHERE INSTR('asdf', 'sdf') 
= 2";
+testInstrFilter(conn, query, "asdf");
+}
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/e83c6147/phoenix-core/src/main/java/org/apache/phoenix/expression/function/InstrFunction.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/InstrFunction.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/InstrFunction.java
index 7a002f8..e6b4c16 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/InstrFunction.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/InstrFunction.java
@@ -30,7 +30,6 @@ import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PInteger;
 import org.apache.phoenix.schema.types.PVarchar;
-import org.apache.phoenix.util.ByteUtil;
 
 @BuiltInFunction(name=InstrFunction.NAME, args={
 @Argument(allowedTypes={ PVarchar.class }),
@@ -38,8 +37,9 @@ import org.apache.phoenix.util.ByteUtil;
 public class InstrFunction extends ScalarFunction{
 
 public static final String NAME = "INSTR";
-
-private String strToSearch = null;
+
+private String literalSourceStr = null;
+private 

[25/51] [abbrv] phoenix git commit: PHOENIX-4907 - IndexScrutinyTool should use empty catalog instead of null

2018-10-17 Thread pboado
PHOENIX-4907 - IndexScrutinyTool should use empty catalog instead of null


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8208c0ab
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8208c0ab
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8208c0ab

Branch: refs/heads/4.x-cdh5.15
Commit: 8208c0ab20c02ecfcca1b532e84b688c3f57d3eb
Parents: 1c65cdc
Author: Geoffrey 
Authored: Tue Sep 18 00:09:44 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:49:38 2018 +0100

--
 .../java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/8208c0ab/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
index f3ff39e..d9a14bf 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
@@ -499,7 +499,7 @@ public class IndexScrutinyTool extends Configured 
implements Tool {
 
 ResultSet rs = null;
 try {
-rs = dbMetaData.getIndexInfo(null, schemaName, tableName, false, 
false);
+rs = dbMetaData.getIndexInfo("", schemaName, tableName, false, 
false);
 while (rs.next()) {
 final String indexName = rs.getString(6);
 if (indexTable.equalsIgnoreCase(indexName)) {



[07/51] [abbrv] phoenix git commit: PHOENIX-4854 Make LoggingPhoenixResultSet idempotent when logging metrics

2018-10-17 Thread pboado
PHOENIX-4854 Make LoggingPhoenixResultSet idempotent when logging metrics


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/08a3cf0d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/08a3cf0d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/08a3cf0d

Branch: refs/heads/4.x-cdh5.15
Commit: 08a3cf0d324cd8d5f1f8e8eadd13e1a30f5d96ff
Parents: 9d07afa
Author: Karan Mehta 
Authored: Mon Aug 20 18:12:37 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:49:38 2018 +0100

--
 .../monitoring/PhoenixLoggingMetricsIT.java | 49 +++-
 .../phoenix/jdbc/LoggingPhoenixResultSet.java   | 15 --
 2 files changed, 38 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/08a3cf0d/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
index 97b2c5d..7e56902 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
@@ -26,6 +26,7 @@ import org.junit.Test;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
+import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.Map;
 
@@ -44,6 +45,8 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 private String tableName2;
 private LoggingPhoenixConnection loggedConn;
 private String loggedSql;
+private int logOverAllReadRequestMetricsFuncCallCount;
+private int logRequestReadMetricsFuncCallCount;
 
 @Before
 public void beforeTest() throws Exception {
@@ -69,17 +72,7 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 public void testPhoenixMetricsLoggedOnCommit() throws Exception {
 // run SELECT to verify read metrics are logged
 String query = "SELECT * FROM " + tableName1;
-Statement stmt = loggedConn.createStatement();
-ResultSet rs = stmt.executeQuery(query);
-while (rs.next()) {
-}
-rs.close();
-assertTrue("Read metrics for not found for " + tableName1,
-requestReadMetricsMap.get(tableName1).size() > 0);
-assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
-
-assertTrue("Overall read metrics for not found ", 
overAllQueryMetricsMap.size() > 0);
-assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
+verifyQueryLevelMetricsLogging(query);
 
 // run UPSERT SELECT to verify mutation metrics are logged
 String upsertSelect = "UPSERT INTO " + tableName2 + " SELECT * FROM " 
+ tableName1;
@@ -117,17 +110,7 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 public void testPhoenixMetricsLoggedOnClose() throws Exception {
 // run SELECT to verify read metrics are logged
 String query = "SELECT * FROM " + tableName1;
-Statement stmt = loggedConn.createStatement();
-ResultSet rs = stmt.executeQuery(query);
-while (rs.next()) {
-}
-rs.close();
-assertTrue("Read metrics for not found for " + tableName1,
-requestReadMetricsMap.get(tableName1).size() > 0);
-assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
-
-assertTrue("Overall read metrics for not found ", 
overAllQueryMetricsMap.size() > 0);
-assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
+verifyQueryLevelMetricsLogging(query);
 
 // run UPSERT SELECT to verify mutation metrics are logged
 String upsertSelect = "UPSERT INTO " + tableName2 + " SELECT * FROM " 
+ tableName1;
@@ -151,6 +134,26 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 mutationReadMetricsMap.size() == 0);
 }
 
+private void verifyQueryLevelMetricsLogging(String query) throws 
SQLException {
+Statement stmt = loggedConn.createStatement();
+ResultSet rs = stmt.executeQuery(query);
+while (rs.next()) {
+}
+rs.close();
+assertTrue("Read metrics for not found for " + tableName1,
+requestReadMetricsMap.get(tableName1).size() > 0);
+assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
+assertTrue(logOverAllReadRequestMetricsFuncCallCount == 1);
+assertTrue(logRequestReadMetricsFuncCallCount == 1);

[10/51] [abbrv] phoenix git commit: PHOENIX-4946 Switch from HC's annotations (since removed) to JCIP annotations

2018-10-17 Thread pboado
PHOENIX-4946 Switch from HC's annotations (since removed) to JCIP annotations

Avoids an old httpclient artifact conflicting with Hadoop3 implementation.

Signed-off-by: Sergey Soldatov 


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/02995aa3
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/02995aa3
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/02995aa3

Branch: refs/heads/4.x-cdh5.15
Commit: 02995aa3b439f6f137f6f11ce48cb52a2941785f
Parents: a6c1aa4
Author: Josh Elser 
Authored: Wed Oct 3 22:43:05 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:49:38 2018 +0100

--
 phoenix-core/pom.xml   | 6 --
 .../src/main/java/org/apache/phoenix/cache/HashCache.java  | 3 ++-
 .../main/java/org/apache/phoenix/compile/GroupByCompiler.java  | 3 ++-
 .../java/org/apache/phoenix/memory/ChildMemoryManager.java | 5 +++--
 .../java/org/apache/phoenix/memory/GlobalMemoryManager.java| 4 +++-
 .../main/java/org/apache/phoenix/parse/FunctionParseNode.java  | 3 ++-
 .../src/main/java/org/apache/phoenix/query/QueryServices.java  | 3 ++-
 .../src/main/java/org/apache/phoenix/schema/ColumnRef.java | 3 ++-
 .../main/java/org/apache/phoenix/schema/KeyValueSchema.java| 3 ++-
 .../src/main/java/org/apache/phoenix/schema/PNameImpl.java | 5 +++--
 10 files changed, 21 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/02995aa3/phoenix-core/pom.xml
--
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index c20c89c..57fc81b 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -283,12 +283,6 @@
   protobuf-java
   ${protobuf-java.version}
 
-
-
-  org.apache.httpcomponents
-  httpclient
-  4.0.1
-
 
   log4j
   log4j

http://git-wip-us.apache.org/repos/asf/phoenix/blob/02995aa3/phoenix-core/src/main/java/org/apache/phoenix/cache/HashCache.java
--
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/HashCache.java 
b/phoenix-core/src/main/java/org/apache/phoenix/cache/HashCache.java
index 764fd17..80e37ce 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/HashCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/HashCache.java
@@ -21,7 +21,8 @@ import java.io.Closeable;
 import java.io.IOException;
 import java.util.List;
 
-import org.apache.http.annotation.Immutable;
+import net.jcip.annotations.Immutable;
+
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.schema.tuple.Tuple;
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/02995aa3/phoenix-core/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java
index 0a9e1bc..4777c29 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/GroupByCompiler.java
@@ -23,8 +23,9 @@ import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
 
+import net.jcip.annotations.Immutable;
+
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.http.annotation.Immutable;
 import org.apache.phoenix.compile.OrderPreservingTracker.Ordering;
 import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
 import org.apache.phoenix.exception.SQLExceptionCode;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/02995aa3/phoenix-core/src/main/java/org/apache/phoenix/memory/ChildMemoryManager.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/memory/ChildMemoryManager.java 
b/phoenix-core/src/main/java/org/apache/phoenix/memory/ChildMemoryManager.java
index da009fb..f5ad5dd 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/memory/ChildMemoryManager.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/memory/ChildMemoryManager.java
@@ -17,8 +17,9 @@
  */
 package org.apache.phoenix.memory;
 
-import org.apache.http.annotation.GuardedBy;
-import org.apache.http.annotation.ThreadSafe;
+import net.jcip.annotations.GuardedBy;
+import net.jcip.annotations.ThreadSafe;
+
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/02995aa3/phoenix-core/src/main/java/org/apache/phoenix/memory/GlobalMemoryManager.java

[28/51] [abbrv] phoenix git commit: PHOENIX-4688 Support SPNEGO for python driver via requests-kerberos

2018-10-17 Thread pboado
http://git-wip-us.apache.org/repos/asf/phoenix/blob/3cac9217/python/phoenixdb/types.py
--
diff --git a/python/phoenixdb/types.py b/python/phoenixdb/types.py
deleted file mode 100644
index f41355a..000
--- a/python/phoenixdb/types.py
+++ /dev/null
@@ -1,202 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import sys
-import time
-import datetime
-from decimal import Decimal
-from phoenixdb.avatica.proto import common_pb2
-
-__all__ = [
-'Date', 'Time', 'Timestamp', 'DateFromTicks', 'TimeFromTicks', 
'TimestampFromTicks',
-'Binary', 'STRING', 'BINARY', 'NUMBER', 'DATETIME', 'ROWID', 'BOOLEAN',
-'JAVA_CLASSES', 'JAVA_CLASSES_MAP', 'TypeHelper',
-]
-
-
-def Date(year, month, day):
-"""Constructs an object holding a date value."""
-return datetime.date(year, month, day)
-
-
-def Time(hour, minute, second):
-"""Constructs an object holding a time value."""
-return datetime.time(hour, minute, second)
-
-
-def Timestamp(year, month, day, hour, minute, second):
-"""Constructs an object holding a datetime/timestamp value."""
-return datetime.datetime(year, month, day, hour, minute, second)
-
-
-def DateFromTicks(ticks):
-"""Constructs an object holding a date value from the given UNIX 
timestamp."""
-return Date(*time.localtime(ticks)[:3])
-
-
-def TimeFromTicks(ticks):
-"""Constructs an object holding a time value from the given UNIX 
timestamp."""
-return Time(*time.localtime(ticks)[3:6])
-
-
-def TimestampFromTicks(ticks):
-"""Constructs an object holding a datetime/timestamp value from the given 
UNIX timestamp."""
-return Timestamp(*time.localtime(ticks)[:6])
-
-
-def Binary(value):
-"""Constructs an object capable of holding a binary (long) string value."""
-return bytes(value)
-
-
-def time_from_java_sql_time(n):
-dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(milliseconds=n)
-return dt.time()
-
-
-def time_to_java_sql_time(t):
-return ((t.hour * 60 + t.minute) * 60 + t.second) * 1000 + t.microsecond 
// 1000
-
-
-def date_from_java_sql_date(n):
-return datetime.date(1970, 1, 1) + datetime.timedelta(days=n)
-
-
-def date_to_java_sql_date(d):
-if isinstance(d, datetime.datetime):
-d = d.date()
-td = d - datetime.date(1970, 1, 1)
-return td.days
-
-
-def datetime_from_java_sql_timestamp(n):
-return datetime.datetime(1970, 1, 1) + datetime.timedelta(milliseconds=n)
-
-
-def datetime_to_java_sql_timestamp(d):
-td = d - datetime.datetime(1970, 1, 1)
-return td.microseconds // 1000 + (td.seconds + td.days * 24 * 3600) * 1000
-
-
-class ColumnType(object):
-
-def __init__(self, eq_types):
-self.eq_types = tuple(eq_types)
-self.eq_types_set = set(eq_types)
-
-def __eq__(self, other):
-return other in self.eq_types_set
-
-def __cmp__(self, other):
-if other in self.eq_types_set:
-return 0
-if other < self.eq_types:
-return 1
-else:
-return -1
-
-
-STRING = ColumnType(['VARCHAR', 'CHAR'])
-"""Type object that can be used to describe string-based columns."""
-
-BINARY = ColumnType(['BINARY', 'VARBINARY'])
-"""Type object that can be used to describe (long) binary columns."""
-
-NUMBER = ColumnType([
-'INTEGER', 'UNSIGNED_INT', 'BIGINT', 'UNSIGNED_LONG', 'TINYINT', 
'UNSIGNED_TINYINT',
-'SMALLINT', 'UNSIGNED_SMALLINT', 'FLOAT', 'UNSIGNED_FLOAT', 'DOUBLE', 
'UNSIGNED_DOUBLE', 'DECIMAL'
-])
-"""Type object that can be used to describe numeric columns."""
-
-DATETIME = ColumnType(['TIME', 'DATE', 'TIMESTAMP', 'UNSIGNED_TIME', 
'UNSIGNED_DATE', 'UNSIGNED_TIMESTAMP'])
-"""Type object that can be used to describe date/time columns."""
-
-ROWID = ColumnType([])
-"""Only implemented for DB API 2.0 compatibility, not used."""
-
-BOOLEAN = ColumnType(['BOOLEAN'])
-"""Type object that can be used to describe boolean columns. This is a 
phoenixdb-specific extension."""
-
-
-# XXX ARRAY
-
-if sys.version_info[0] < 3:
-_long = long  # noqa: F821
-else:
-_long = int
-
-JAVA_CLASSES = {
-'bool_value': [
-('java.lang.Boolean', common_pb2.BOOLEAN, None, None),
-],
-

[04/51] [abbrv] phoenix git commit: PHOENIX-4839 IndexHalfStoreFileReaderGenerator throws NullPointerException(Aman Poonia)

2018-10-17 Thread pboado
PHOENIX-4839 IndexHalfStoreFileReaderGenerator throws NullPointerException(Aman 
Poonia)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/2d2aff63
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/2d2aff63
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/2d2aff63

Branch: refs/heads/4.x-cdh5.15
Commit: 2d2aff6363c7296964c0ff5550f0e3a6a193b407
Parents: 08a3cf0
Author: Ankit Singhal 
Authored: Tue Aug 21 19:54:01 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:49:38 2018 +0100

--
 .../regionserver/IndexHalfStoreFileReader.java  |   6 +
 .../IndexHalfStoreFileReaderGenerator.java  | 138 ++-
 2 files changed, 18 insertions(+), 126 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d2aff63/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
index d1d12fb..8bd0d72 100644
--- 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
+++ 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
@@ -123,4 +123,10 @@ public class IndexHalfStoreFileReader extends 
StoreFile.Reader {
 public boolean isTop() {
 return top;
 }
+
+@Override
+public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean 
pread, boolean isCompaction, long readPt) {
+return new LocalIndexStoreFileScanner(this, getScanner(cacheBlocks, 
pread, isCompaction), true,
+getHFileReader().hasMVCCInfo(), readPt);
+}
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d2aff63/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
index e41086b..ab65456 100644
--- 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
+++ 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
@@ -17,16 +17,11 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import static 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver.SCAN_START_ROW_SUFFIX;
-
 import java.io.IOException;
 import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.NavigableSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -71,7 +66,7 @@ import org.apache.phoenix.util.RepairUtil;
 import com.google.common.collect.Lists;
 
 public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
-
+
 private static final String LOCAL_INDEX_AUTOMATIC_REPAIR = 
"local.index.automatic.repair";
 public static final Log LOG = 
LogFactory.getLog(IndexHalfStoreFileReaderGenerator.class);
 
@@ -153,7 +148,9 @@ public class IndexHalfStoreFileReaderGenerator extends 
BaseRegionObserver {
 try {
 conn = 
QueryUtil.getConnectionOnServer(ctx.getEnvironment().getConfiguration()).unwrap(
 PhoenixConnection.class);
-PTable dataTable = IndexUtil.getPDataTable(conn, 
ctx.getEnvironment().getRegion().getTableDesc());
+PTable dataTable =
+IndexUtil.getPDataTable(conn, 
ctx.getEnvironment().getRegion()
+.getTableDesc());
 List indexes = dataTable.getIndexes();
 Map indexMaintainers =
 new HashMap();
@@ -187,19 +184,12 @@ public class IndexHalfStoreFileReaderGenerator extends 
BaseRegionObserver {
 return reader;
 }
 
-@SuppressWarnings("deprecation")
 @Override
-public InternalScanner 
preCompactScannerOpen(ObserverContext c,
-Store store, List scanners, ScanType 
scanType,
-long earliestPutTs, InternalScanner s, CompactionRequest request) 
throws IOException {
+public InternalScanner preCompact(
+ObserverContext c, Store store,
+InternalScanner s, ScanType scanType,
+CompactionRequest request) throws IOException {
 if 

[50/51] [abbrv] phoenix git commit: PHOENIX-3955: Ensure KEEP_DELETED_CELLS, REPLICATION_SCOPE, and TTL properties stay in sync between the physical data table and index tables

2018-10-17 Thread pboado
PHOENIX-3955: Ensure KEEP_DELETED_CELLS, REPLICATION_SCOPE, and TTL properties 
stay in sync between the physical data table and index tables


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/eb13ffd8
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/eb13ffd8
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/eb13ffd8

Branch: refs/heads/4.x-cdh5.15
Commit: eb13ffd863e1a599fff07990b1e1a76badd6639f
Parents: 8702645
Author: Chinmay Kulkarni 
Authored: Mon Oct 8 05:12:55 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:50:43 2018 +0100

--
 .../apache/phoenix/end2end/AlterTableIT.java|   5 +-
 .../org/apache/phoenix/end2end/BaseQueryIT.java |  15 +-
 .../apache/phoenix/end2end/CreateTableIT.java   |  27 +-
 .../phoenix/end2end/PropertiesInSyncIT.java | 494 +++
 .../end2end/QueryDatabaseMetaDataIT.java|   7 +-
 .../apache/phoenix/end2end/SetPropertyIT.java   |  64 ++-
 .../org/apache/phoenix/end2end/SplitIT.java |  17 +
 .../org/apache/phoenix/tx/TransactionIT.java|   4 +-
 .../phoenix/exception/SQLExceptionCode.java |   6 +-
 .../query/ConnectionQueryServicesImpl.java  | 485 +-
 .../apache/phoenix/schema/MetaDataClient.java   | 112 +++--
 .../apache/phoenix/schema/TableProperty.java|   4 +-
 .../org/apache/phoenix/util/MetaDataUtil.java   |  44 +-
 .../org/apache/phoenix/util/UpgradeUtil.java| 142 +-
 14 files changed, 1187 insertions(+), 239 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/eb13ffd8/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
index 2cac1a6..7af62b3 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
@@ -925,7 +925,8 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
 // set HColumnProperty property when adding a pk column and other 
key value columns should work
 ddl = "ALTER TABLE "
 + dataTableFullName
-+ " ADD k3 DECIMAL PRIMARY KEY, col2 bigint, CF.col3 
bigint IN_MEMORY = true, CF.IN_MEMORY=false, CF.REPLICATION_SCOPE = 1";
++ " ADD k3 DECIMAL PRIMARY KEY, col2 bigint, CF.col3 
bigint IN_MEMORY = true,"
++ " CF.IN_MEMORY=false, REPLICATION_SCOPE = 1";
 conn.createStatement().execute(ddl);
 // assert that k3 was added as new pk
 ResultSet rs = conn.getMetaData().getPrimaryKeys("", schemaName, 
dataTableName);
@@ -946,7 +947,7 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
 assertEquals(2, columnFamilies.length);
 assertEquals("0", columnFamilies[0].getNameAsString());
 assertEquals(true, columnFamilies[0].isInMemory());
-assertEquals(0, columnFamilies[0].getScope());
+assertEquals(1, columnFamilies[0].getScope());
 assertEquals("CF", columnFamilies[1].getNameAsString());
 assertEquals(false, columnFamilies[1].isInMemory());
 assertEquals(1, columnFamilies[1].getScope());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/eb13ffd8/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
index ed3669c..e88dc57 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
@@ -53,20 +53,20 @@ public abstract class BaseQueryIT extends 
ParallelStatsDisabledIT {
 protected static final String[] GLOBAL_INDEX_DDLS =
 new String[] {
 "CREATE INDEX %s ON %s (a_integer DESC) INCLUDE (" + "
A_STRING, "
-+ "B_STRING, " + "A_DATE) %s",
++ "B_STRING, " + "A_DATE)",
 "CREATE INDEX %s ON %s (a_integer, a_string) INCLUDE (" + 
"B_STRING, "
-+ "A_DATE) %s",
++ "A_DATE)",
 "CREATE INDEX %s ON %s (a_integer) INCLUDE (" + "
A_STRING, "
-+ "B_STRING, " + "A_DATE) %s",
++ "B_STRING, " + "A_DATE)",
 

[02/51] [abbrv] phoenix git commit: PHOENIX-3547 Supporting more number of indices per table.

2018-10-17 Thread pboado
PHOENIX-3547 Supporting more number of indices per table.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1fcf43c6
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1fcf43c6
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1fcf43c6

Branch: refs/heads/4.x-cdh5.15
Commit: 1fcf43c6e1734d6fda90e76e9b3599d857f44187
Parents: d218505
Author: Mahdi Salarkia 
Authored: Thu Aug 30 23:57:31 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:49:38 2018 +0100

--
 .../end2end/BaseTenantSpecificViewIndexIT.java  |   4 +-
 .../org/apache/phoenix/end2end/BaseViewIT.java  |   8 +-
 .../end2end/TenantSpecificViewIndexIT.java  |   2 +-
 .../org/apache/phoenix/end2end/UpgradeIT.java   |   2 +-
 .../java/org/apache/phoenix/end2end/ViewIT.java |  10 +-
 .../index/ChildViewsUseParentViewIndexIT.java   |   4 +-
 .../index/GlobalIndexOptimizationIT.java|   2 +-
 .../phoenix/end2end/index/IndexUsageIT.java |   4 +-
 .../phoenix/end2end/index/LocalIndexIT.java |   2 +-
 .../end2end/index/MutableIndexFailureIT.java|   4 +-
 .../phoenix/end2end/index/ViewIndexIT.java  |   6 +-
 .../IndexHalfStoreFileReaderGenerator.java  |   3 +-
 .../phoenix/compile/CreateTableCompiler.java|   5 +-
 .../apache/phoenix/compile/DeleteCompiler.java  |   4 +-
 .../apache/phoenix/compile/FromCompiler.java|   2 +-
 .../apache/phoenix/compile/JoinCompiler.java|   2 +-
 .../compile/TupleProjectionCompiler.java|   4 +-
 .../apache/phoenix/compile/UnionCompiler.java   |   2 +-
 .../apache/phoenix/compile/UpsertCompiler.java  |   4 +-
 .../apache/phoenix/compile/WhereOptimizer.java  |   2 +-
 .../coprocessor/MetaDataEndpointImpl.java   |  53 ++-
 .../phoenix/coprocessor/MetaDataProtocol.java   |  53 ++-
 .../coprocessor/generated/MetaDataProtos.java   | 471 +--
 .../coprocessor/generated/PTableProtos.java | 156 --
 .../generated/ServerCachingProtos.java  | 128 -
 .../apache/phoenix/index/IndexMaintainer.java   |  15 +-
 .../index/PhoenixIndexFailurePolicy.java|   4 +-
 .../apache/phoenix/iterate/ExplainTable.java|  13 +-
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   2 +
 .../query/ConnectionQueryServicesImpl.java  |   8 +
 .../query/ConnectionlessQueryServicesImpl.java  |   2 +-
 .../apache/phoenix/query/QueryConstants.java|   5 +-
 .../apache/phoenix/schema/DelegateTable.java|   8 +-
 .../apache/phoenix/schema/MetaDataClient.java   |  21 +-
 .../java/org/apache/phoenix/schema/PTable.java  |   3 +-
 .../org/apache/phoenix/schema/PTableImpl.java   |  50 +-
 .../org/apache/phoenix/util/MetaDataUtil.java   |   5 +
 .../TenantSpecificViewIndexCompileTest.java |   6 +-
 .../phoenix/execute/CorrelatePlanTest.java  |   2 +-
 .../execute/LiteralResultIteratorPlanTest.java  |   2 +-
 phoenix-protocol/src/main/MetaDataService.proto |   6 +-
 phoenix-protocol/src/main/PTable.proto  |   3 +-
 .../src/main/ServerCachingService.proto |   1 +
 43 files changed, 781 insertions(+), 312 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/1fcf43c6/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificViewIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificViewIndexIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificViewIndexIT.java
index 9bd689e..26e2860 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificViewIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificViewIndexIT.java
@@ -148,9 +148,9 @@ public class BaseTenantSpecificViewIndexIT extends 
SplitSystemCatalogIT {
 + "CLIENT MERGE SORT", 
QueryUtil.getExplainPlan(rs));
 } else {
 String expected = saltBuckets == null ? 
-"CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_" + tableName 
+ " [-32768,'" + tenantId + "','" + valuePrefix + "v2-1']\n"
+"CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_" + tableName 
+ " [-9223372036854775808,'" + tenantId + "','" + valuePrefix + "v2-1']\n"
 + "SERVER FILTER BY FIRST KEY ONLY" :
-"CLIENT PARALLEL 3-WAY RANGE SCAN OVER _IDX_" + tableName 
+ " [0,-32768,'" + tenantId + "','" + valuePrefix + "v2-1'] - 
["+(saltBuckets.intValue()-1)+",-32768,'" + tenantId + "','" + valuePrefix + 
"v2-1']\n"
+"CLIENT PARALLEL 3-WAY RANGE SCAN OVER _IDX_" + tableName 
+ " [0,-9223372036854775808,'" + tenantId + "','" + valuePrefix + "v2-1'] - 
["+(saltBuckets.intValue()-1)+",-9223372036854775808,'" + tenantId + "','" + 
valuePrefix + 

[46/51] [abbrv] phoenix git commit: PHOENIX-4975 Fix failing unit tests for Omid due to shadow cells and no local indexes

2018-10-17 Thread pboado
PHOENIX-4975 Fix failing unit tests for Omid due to shadow cells and no local 
indexes


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/50c2a3be
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/50c2a3be
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/50c2a3be

Branch: refs/heads/4.x-cdh5.15
Commit: 50c2a3beee324823bd1a21459b62fbee1e1eca40
Parents: a4453b6
Author: James Taylor 
Authored: Tue Oct 16 17:16:43 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:50:43 2018 +0100

--
 .../org/apache/phoenix/end2end/IndexToolIT.java | 16 --
 .../StatsEnabledSplitSystemCatalogIT.java   | 34 +++-
 .../java/org/apache/phoenix/end2end/ViewIT.java | 18 +--
 .../phoenix/schema/stats/StatsCollectorIT.java  | 54 
 4 files changed, 82 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/50c2a3be/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolIT.java
index b8372c4..c99f145 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolIT.java
@@ -49,6 +49,8 @@ import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.mapreduce.index.IndexTool;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.transaction.PhoenixTransactionProvider.Feature;
+import org.apache.phoenix.transaction.TransactionFactory;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
@@ -111,12 +113,18 @@ public class IndexToolIT extends ParallelStatsEnabledIT {
 public static Collection data() {
 List list = Lists.newArrayListWithExpectedSize(48);
 boolean[] Booleans = new boolean[] { false, true };
-for (Object transactionProvider : new String[] {"TEPHRA", "OMID", 
null}) {
+for (String transactionProvider : new String[] {"TEPHRA", "OMID", 
null}) {
 for (boolean mutable : Booleans) {
 for (boolean localIndex : Booleans) {
-for (boolean directApi : Booleans) {
-for (boolean useSnapshot : Booleans) {
-list.add(new Object[] { transactionProvider, 
mutable, localIndex, directApi, useSnapshot });
+if (!localIndex 
+|| transactionProvider == null 
+|| !TransactionFactory.getTransactionProvider(
+
TransactionFactory.Provider.valueOf(transactionProvider))
+.isUnsupported(Feature.ALLOW_LOCAL_INDEX)) {
+for (boolean directApi : Booleans) {
+for (boolean useSnapshot : Booleans) {
+list.add(new Object[] { transactionProvider, 
mutable, localIndex, directApi, useSnapshot });
+}
 }
 }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/50c2a3be/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsEnabledSplitSystemCatalogIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsEnabledSplitSystemCatalogIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsEnabledSplitSystemCatalogIT.java
index 197263f..0a0dd21 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsEnabledSplitSystemCatalogIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsEnabledSplitSystemCatalogIT.java
@@ -34,10 +34,10 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.exception.SQLExceptionCode;
@@ -45,6 +45,8 @@ import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.ReadOnlyTableException;
+import org.apache.phoenix.transaction.PhoenixTransactionProvider.Feature;
+import 

[26/51] [abbrv] phoenix git commit: PHOENIX-4892 Unable to start load balancer with queryserver

2018-10-17 Thread pboado
PHOENIX-4892 Unable to start load balancer with queryserver

Added phoenix_loadbalancer_jar path to phoenix_utils.py,
added phoenix_loadbalancer_jar to classpath in queryserver.py,
added service for registry in meta-inf

Closes #343

Signed-off-by: Josh Elser 


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b8812269
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b8812269
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b8812269

Branch: refs/heads/4.x-cdh5.15
Commit: b881226919006ae6184275a2aefef078be735a94
Parents: 8208c0a
Author: Vitaliy 
Authored: Tue Sep 18 20:05:55 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:49:38 2018 +0100

--
 bin/phoenix_utils.py  | 7 +++
 bin/queryserver.py| 4 ++--
 phoenix-load-balancer/pom.xml | 1 +
 .../services/org.apache.phoenix.queryserver.register.Registry | 1 +
 4 files changed, 11 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b8812269/bin/phoenix_utils.py
--
diff --git a/bin/phoenix_utils.py b/bin/phoenix_utils.py
index b521277..aa04a5b 100755
--- a/bin/phoenix_utils.py
+++ b/bin/phoenix_utils.py
@@ -71,6 +71,7 @@ def setPath():
 PHOENIX_CLIENT_JAR_PATTERN = "phoenix-*-client.jar"
 PHOENIX_THIN_CLIENT_JAR_PATTERN = "phoenix-*-thin-client.jar"
 PHOENIX_QUERYSERVER_JAR_PATTERN = "phoenix-*-queryserver.jar"
+PHOENIX_LOADBALANCER_JAR_PATTERN = 
"phoenix-load-balancer-*[!t][!e][!s][!t][!s].jar"
 PHOENIX_TRACESERVER_JAR_PATTERN = "phoenix-tracing-webapp-*-runnable.jar"
 PHOENIX_TESTS_JAR_PATTERN = "phoenix-core-*-tests*.jar"
 PHOENIX_PHERF_JAR_PATTERN = "phoenix-pherf-*-minimal*.jar"
@@ -162,6 +163,11 @@ def setPath():
 if phoenix_queryserver_jar == "":
 phoenix_queryserver_jar = 
findFileInPathWithoutRecursion(PHOENIX_QUERYSERVER_JAR_PATTERN, 
os.path.join(current_dir, ".."))
 
+global phoenix_loadbalancer_jar
+phoenix_loadbalancer_jar = find(PHOENIX_LOADBALANCER_JAR_PATTERN, 
os.path.join(current_dir, "..", "phoenix-loadbalancer", "target", "*"))
+if phoenix_loadbalancer_jar == "":
+phoenix_loadbalancer_jar = 
findFileInPathWithoutRecursion(PHOENIX_LOADBALANCER_JAR_PATTERN, 
os.path.join(current_dir, ".."))
+
 global phoenix_traceserver_jar
 phoenix_traceserver_jar = find(PHOENIX_TRACESERVER_JAR_PATTERN, 
os.path.join(current_dir, "..", "phoenix-tracing-webapp", "target", "*"))
 if phoenix_traceserver_jar == "":
@@ -218,5 +224,6 @@ if __name__ == "__main__":
 print "hadoop_hdfs_jar:", hadoop_hdfs_jar
 print "testjar:", testjar
 print "phoenix_queryserver_jar:", phoenix_queryserver_jar
+print "phoenix_loadbalancer_jar:", phoenix_loadbalancer_jar
 print "phoenix_thin_client_jar:", phoenix_thin_client_jar
 print "hadoop_classpath:", hadoop_classpath 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b8812269/bin/queryserver.py
--
diff --git a/bin/queryserver.py b/bin/queryserver.py
index 711bcc4..0c07b3b 100755
--- a/bin/queryserver.py
+++ b/bin/queryserver.py
@@ -120,8 +120,8 @@ out_file_path = os.path.join(log_dir, phoenix_out_file)
 
 # The command is run through subprocess so environment variables are 
automatically inherited
 java_cmd = '%(java)s -cp ' + hbase_config_path + os.pathsep + 
hadoop_config_path + os.pathsep + \
-phoenix_utils.phoenix_client_jar + os.pathsep + 
phoenix_utils.phoenix_queryserver_jar + \
-os.pathsep + hadoop_classpath + \
+phoenix_utils.phoenix_client_jar + os.pathsep + 
phoenix_utils.phoenix_loadbalancer_jar + \
+os.pathsep + phoenix_utils.phoenix_queryserver_jar + os.pathsep + 
hadoop_classpath + \
 " -Dproc_phoenixserver" + \
 " -Dlog4j.configuration=file:" + os.path.join(phoenix_utils.current_dir, 
"log4j.properties") + \
 " -Dpsql.root.logger=%(root_logger)s" + \

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b8812269/phoenix-load-balancer/pom.xml
--
diff --git a/phoenix-load-balancer/pom.xml b/phoenix-load-balancer/pom.xml
index 68ae9a8..a8319e9 100644
--- a/phoenix-load-balancer/pom.xml
+++ b/phoenix-load-balancer/pom.xml
@@ -75,6 +75,7 @@
 
   
 
src/main/resources/META-INF/services/org.apache.phoenix.loadbalancer.service.LoadBalanceZookeeperConf
+
src/main/resources/META-INF/services/org.apache.phoenix.queryserver.register.Registry
   
 
   


[05/51] [abbrv] phoenix git commit: PHOENIX-4853 Add sql statement to PhoenixMetricsLog interface for query level metrics logging

2018-10-17 Thread pboado
PHOENIX-4853 Add sql statement to PhoenixMetricsLog interface for query level 
metrics logging


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9d07afa4
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9d07afa4
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9d07afa4

Branch: refs/heads/4.x-cdh5.15
Commit: 9d07afa44fc49fc6b6c60cf6194b12e1f325d667
Parents: e52fac1
Author: Karan Mehta 
Authored: Fri Aug 17 21:02:08 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:49:38 2018 +0100

--
 .../monitoring/PhoenixLoggingMetricsIT.java | 15 +---
 .../phoenix/jdbc/LoggingPhoenixConnection.java  | 16 ++---
 .../jdbc/LoggingPhoenixPreparedStatement.java   | 13 +++
 .../phoenix/jdbc/LoggingPhoenixResultSet.java   | 10 
 .../phoenix/jdbc/LoggingPhoenixStatement.java   | 24 
 .../apache/phoenix/jdbc/PhoenixMetricsLog.java  |  6 ++---
 6 files changed, 56 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9d07afa4/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
index 02640e7..97b2c5d 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
@@ -43,6 +43,7 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 private String tableName1;
 private String tableName2;
 private LoggingPhoenixConnection loggedConn;
+private String loggedSql;
 
 @Before
 public void beforeTest() throws Exception {
@@ -75,7 +76,10 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 rs.close();
 assertTrue("Read metrics for not found for " + tableName1,
 requestReadMetricsMap.get(tableName1).size() > 0);
+assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
+
 assertTrue("Overall read metrics for not found ", 
overAllQueryMetricsMap.size() > 0);
+assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
 
 // run UPSERT SELECT to verify mutation metrics are logged
 String upsertSelect = "UPSERT INTO " + tableName2 + " SELECT * FROM " 
+ tableName1;
@@ -120,7 +124,10 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 rs.close();
 assertTrue("Read metrics for not found for " + tableName1,
 requestReadMetricsMap.get(tableName1).size() > 0);
+assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
+
 assertTrue("Overall read metrics for not found ", 
overAllQueryMetricsMap.size() > 0);
+assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
 
 // run UPSERT SELECT to verify mutation metrics are logged
 String upsertSelect = "UPSERT INTO " + tableName2 + " SELECT * FROM " 
+ tableName1;
@@ -155,18 +162,20 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 return new LoggingPhoenixConnection(conn, new PhoenixMetricsLog() {
 @Override
 public void logOverAllReadRequestMetrics(
-Map overAllQueryMetrics) {
+Map overAllQueryMetrics, String sql) {
 overAllQueryMetricsMap.putAll(overAllQueryMetrics);
+loggedSql = sql;
 }
 
 @Override
 public void logRequestReadMetrics(
-Map> requestReadMetrics) {
+Map> requestReadMetrics, 
String sql) {
 requestReadMetricsMap.putAll(requestReadMetrics);
+loggedSql = sql;
 }
 
 @Override
-public void logWriteMetricsfoForMutations(
+public void logWriteMetricsfoForMutationsSinceLastReset(
 Map> mutationWriteMetrics) {
 mutationWriteMetricsMap.putAll(mutationWriteMetrics);
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9d07afa4/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java
index 9a2e00f..37917e2 100644
--- 

[36/51] [abbrv] phoenix git commit: PHOENIX-4688 Support SPNEGO for python driver via requests-kerberos

2018-10-17 Thread pboado
PHOENIX-4688 Support SPNEGO for python driver via requests-kerberos

Includes updated L for requests-kerberos. Tries to detect when the
host system doesn't have necessary dependencies to run the test

Closes #344

Signed-off-by: Josh Elser 


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3cac9217
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3cac9217
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3cac9217

Branch: refs/heads/4.x-cdh5.15
Commit: 3cac9217505df765df3595a0ae4509e09c7bab2a
Parents: d56d03e
Author: Lev Bronshtein 
Authored: Wed Aug 29 22:19:51 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:50:41 2018 +0100

--
 LICENSE |   18 +
 NOTICE  |2 +
 dev/release_files/LICENSE   |   18 +
 dev/release_files/NOTICE|4 +
 .../src/it/bin/test_phoenixdb.py|   39 +
 .../src/it/bin/test_phoenixdb.sh|   79 +
 .../end2end/SecureQueryServerPhoenixDBIT.java   |  457 +
 pom.xml |   14 +-
 python/NEWS.rst |   44 -
 python/README.md|   93 +
 python/README.rst   |  136 --
 python/RELEASING.rst|   12 -
 python/ci/build-env/Dockerfile  |7 -
 python/ci/phoenix/Dockerfile|   33 -
 python/ci/phoenix/docker-entrypoint.sh  |   24 -
 python/ci/phoenix/hbase-site.xml|   12 -
 python/doc/Makefile |  192 --
 python/doc/api.rst  |   30 -
 python/doc/conf.py  |  287 ---
 python/doc/index.rst|   27 -
 python/doc/versions.rst |3 -
 python/docker-compose.yml   |   21 -
 python/examples/basic.py|   27 -
 python/examples/shell.py|   33 -
 python/gen-protobuf.sh  |   38 -
 python/phoenixdb/NEWS.rst   |   44 +
 python/phoenixdb/README.rst |  136 ++
 python/phoenixdb/RELEASING.rst  |   12 +
 python/phoenixdb/__init__.py|   68 -
 python/phoenixdb/avatica/__init__.py|   16 -
 python/phoenixdb/avatica/client.py  |  510 --
 python/phoenixdb/avatica/proto/__init__.py  |0
 python/phoenixdb/avatica/proto/common_pb2.py| 1667 --
 python/phoenixdb/avatica/proto/requests_pb2.py  | 1206 -
 python/phoenixdb/avatica/proto/responses_pb2.py |  917 --
 python/phoenixdb/ci/build-env/Dockerfile|7 +
 python/phoenixdb/ci/phoenix/Dockerfile  |   33 +
 .../phoenixdb/ci/phoenix/docker-entrypoint.sh   |   24 +
 python/phoenixdb/ci/phoenix/hbase-site.xml  |   12 +
 python/phoenixdb/connection.py  |  187 --
 python/phoenixdb/cursor.py  |  347 
 python/phoenixdb/doc/Makefile   |  192 ++
 python/phoenixdb/doc/api.rst|   30 +
 python/phoenixdb/doc/conf.py|  287 +++
 python/phoenixdb/doc/index.rst  |   27 +
 python/phoenixdb/doc/versions.rst   |3 +
 python/phoenixdb/docker-compose.yml |   21 +
 python/phoenixdb/errors.py  |   93 -
 python/phoenixdb/examples/basic.py  |   27 +
 python/phoenixdb/examples/shell.py  |   33 +
 python/phoenixdb/gen-protobuf.sh|   39 +
 python/phoenixdb/phoenixdb/__init__.py  |   72 +
 python/phoenixdb/phoenixdb/avatica/__init__.py  |   16 +
 python/phoenixdb/phoenixdb/avatica/client.py|  502 ++
 .../phoenixdb/avatica/proto/__init__.py |0
 .../phoenixdb/avatica/proto/common_pb2.py   | 1667 ++
 .../phoenixdb/avatica/proto/requests_pb2.py | 1206 +
 .../phoenixdb/avatica/proto/responses_pb2.py|  917 ++
 python/phoenixdb/phoenixdb/connection.py|  187 ++
 python/phoenixdb/phoenixdb/cursor.py|  347 
 python/phoenixdb/phoenixdb/errors.py|   93 +
 python/phoenixdb/phoenixdb/tests/__init__.py|   44 +
 python/phoenixdb/phoenixdb/tests/dbapi20.py |  857 +
 .../phoenixdb/phoenixdb/tests/test_avatica.py   |   25 +
 .../phoenixdb/tests/test_connection.py  |   42 +
 python/phoenixdb/phoenixdb/tests/test_db.py |   99 ++
 .../phoenixdb/phoenixdb/tests/test_dbapi20.py   |  122 ++
 python/phoenixdb/phoenixdb/tests/test_errors.py |   60 +
 python/phoenixdb/phoenixdb/tests/test_types.py  |  327 
 python/phoenixdb/phoenixdb/types.py |  202 +++
 

[09/51] [abbrv] phoenix git commit: PHOENIX-4903 Use same hash cache RPC message across all calls

2018-10-17 Thread pboado
PHOENIX-4903 Use same hash cache RPC message across all calls

Signed-off-by: Josh Elser 


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1c65cdc8
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1c65cdc8
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1c65cdc8

Branch: refs/heads/4.x-cdh5.15
Commit: 1c65cdc842fb9729dcaf9f43e525ce5e760120a3
Parents: ae2fbe6
Author: Marcell Ortutay 
Authored: Sun Sep 16 05:13:54 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:49:38 2018 +0100

--
 .../apache/phoenix/cache/ServerCacheClient.java | 61 ++--
 1 file changed, 32 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/1c65cdc8/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java 
b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
index 011a6f8..93d16f5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
@@ -486,6 +486,37 @@ public class ServerCacheClient {
 throws Exception {
 byte[] keyInRegion = getKeyInRegion(key);
 final Map results;
+
+AddServerCacheRequest.Builder builder = 
AddServerCacheRequest.newBuilder();
+final byte[] tenantIdBytes;
+if (cacheUsingTable.isMultiTenant()) {
+try {
+tenantIdBytes = connection.getTenantId() == null ? null
+: 
ScanUtil.getTenantIdBytes(cacheUsingTable.getRowKeySchema(),
+cacheUsingTable.getBucketNum() != null, 
connection.getTenantId(),
+cacheUsingTable.getViewIndexId() != null);
+} catch (SQLException e) {
+throw new IOException(e);
+}
+} else {
+tenantIdBytes = connection.getTenantId() == null ? null
+: connection.getTenantId().getBytes();
+}
+if (tenantIdBytes != null) {
+builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
+}
+builder.setCacheId(ByteStringer.wrap(cacheId));
+builder.setUsePersistentCache(usePersistentCache);
+
builder.setCachePtr(org.apache.phoenix.protobuf.ProtobufUtil.toProto(cachePtr));
+builder.setHasProtoBufIndexMaintainer(true);
+ServerCacheFactoryProtos.ServerCacheFactory.Builder 
svrCacheFactoryBuider = ServerCacheFactoryProtos.ServerCacheFactory
+.newBuilder();
+svrCacheFactoryBuider.setClassName(cacheFactory.getClass().getName());
+builder.setCacheFactory(svrCacheFactoryBuider.build());
+builder.setTxState(ByteStringer.wrap(txState));
+builder.setClientVersion(MetaDataProtocol.PHOENIX_VERSION);
+final AddServerCacheRequest request = builder.build();
+
 try {
 results = htable.coprocessorService(ServerCachingService.class, 
keyInRegion, keyInRegion,
 new Batch.Call() {
@@ -493,35 +524,7 @@ public class ServerCacheClient {
 public AddServerCacheResponse 
call(ServerCachingService instance) throws IOException {
 ServerRpcController controller = new 
ServerRpcController();
 BlockingRpcCallback 
rpcCallback = new BlockingRpcCallback();
-AddServerCacheRequest.Builder builder = 
AddServerCacheRequest.newBuilder();
-final byte[] tenantIdBytes;
-if (cacheUsingTable.isMultiTenant()) {
-try {
-tenantIdBytes = connection.getTenantId() 
== null ? null
-: 
ScanUtil.getTenantIdBytes(cacheUsingTable.getRowKeySchema(),
-
cacheUsingTable.getBucketNum() != null, connection.getTenantId(),
-
cacheUsingTable.getViewIndexId() != null);
-} catch (SQLException e) {
-throw new IOException(e);
-}
-} else {
-tenantIdBytes = connection.getTenantId() == 
null ? null
-: connection.getTenantId().getBytes();
-}
-if (tenantIdBytes != null) {
-
builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
-   

[06/51] [abbrv] phoenix git commit: PHOENIX-4894 Missing Apache license in two QueryServer classes

2018-10-17 Thread pboado
PHOENIX-4894 Missing Apache license in two QueryServer classes


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/912215cb
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/912215cb
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/912215cb

Branch: refs/heads/4.x-cdh5.15
Commit: 912215cb7adb1a88ae176dca07018ebd2f619c50
Parents: 6ea2110
Author: Karan Mehta 
Authored: Fri Sep 7 18:30:57 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:49:38 2018 +0100

--
 .../server/AvaticaServerConfigurationFactory.java  | 17 +
 .../CustomAvaticaServerConfigurationTest.java  | 17 +
 2 files changed, 34 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/912215cb/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/AvaticaServerConfigurationFactory.java
--
diff --git 
a/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/AvaticaServerConfigurationFactory.java
 
b/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/AvaticaServerConfigurationFactory.java
index 87a72ea..33fd590 100644
--- 
a/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/AvaticaServerConfigurationFactory.java
+++ 
b/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/AvaticaServerConfigurationFactory.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.phoenix.queryserver.server;
 
 import org.apache.calcite.avatica.server.AvaticaServerConfiguration;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/912215cb/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/CustomAvaticaServerConfigurationTest.java
--
diff --git 
a/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/CustomAvaticaServerConfigurationTest.java
 
b/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/CustomAvaticaServerConfigurationTest.java
index 20bc868..fb59e0d 100644
--- 
a/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/CustomAvaticaServerConfigurationTest.java
+++ 
b/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/CustomAvaticaServerConfigurationTest.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.phoenix.queryserver.server;
 
 import org.apache.calcite.avatica.server.AvaticaServerConfiguration;



[35/51] [abbrv] phoenix git commit: PHOENIX-4688 Support SPNEGO for python driver via requests-kerberos

2018-10-17 Thread pboado
http://git-wip-us.apache.org/repos/asf/phoenix/blob/3cac9217/python/phoenixdb/__init__.py
--
diff --git a/python/phoenixdb/__init__.py b/python/phoenixdb/__init__.py
deleted file mode 100644
index ae7dd39..000
--- a/python/phoenixdb/__init__.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from phoenixdb import errors, types
-from phoenixdb.avatica import AvaticaClient
-from phoenixdb.connection import Connection
-from phoenixdb.errors import *  # noqa: F401,F403
-from phoenixdb.types import *  # noqa: F401,F403
-
-__all__ = ['connect', 'apilevel', 'threadsafety', 'paramstyle'] + 
types.__all__ + errors.__all__
-
-
-apilevel = "2.0"
-"""
-This module supports the `DB API 2.0 interface 
`_.
-"""
-
-threadsafety = 1
-"""
-Multiple threads can share the module, but neither connections nor cursors.
-"""
-
-paramstyle = 'qmark'
-"""
-Parmetrized queries should use the question mark as a parameter placeholder.
-
-For example::
-
- cursor.execute("SELECT * FROM table WHERE id = ?", [my_id])
-"""
-
-
-def connect(url, max_retries=None, **kwargs):
-"""Connects to a Phoenix query server.
-
-:param url:
-URL to the Phoenix query server, e.g. ``http://localhost:8765/``
-
-:param autocommit:
-Switch the connection to autocommit mode.
-
-:param readonly:
-Switch the connection to readonly mode.
-
-:param max_retries:
-The maximum number of retries in case there is a connection error.
-
-:param cursor_factory:
-If specified, the connection's 
:attr:`~phoenixdb.connection.Connection.cursor_factory` is set to it.
-
-:returns:
-:class:`~phoenixdb.connection.Connection` object.
-"""
-client = AvaticaClient(url, max_retries=max_retries)
-client.connect()
-return Connection(client, **kwargs)

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3cac9217/python/phoenixdb/avatica/__init__.py
--
diff --git a/python/phoenixdb/avatica/__init__.py 
b/python/phoenixdb/avatica/__init__.py
deleted file mode 100644
index 53776d7..000
--- a/python/phoenixdb/avatica/__init__.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from .client import AvaticaClient  # noqa: F401

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3cac9217/python/phoenixdb/avatica/client.py
--
diff --git a/python/phoenixdb/avatica/client.py 
b/python/phoenixdb/avatica/client.py
deleted file mode 100644
index ea00631..000
--- a/python/phoenixdb/avatica/client.py
+++ /dev/null
@@ -1,510 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the 

[19/51] [abbrv] phoenix git commit: PHOENIX-4855 Continue to write base table column metadata when creating a view in order to support rollback

2018-10-17 Thread pboado
PHOENIX-4855 Continue to write base table column metadata when creating a view 
in order to support rollback


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a6c1aa45
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a6c1aa45
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a6c1aa45

Branch: refs/heads/4.x-cdh5.15
Commit: a6c1aa4531ab57e21174b9c208c3b40e4e845bd5
Parents: adbd986
Author: Thomas D'Silva 
Authored: Mon Aug 20 18:42:56 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:49:38 2018 +0100

--
 .../AlterMultiTenantTableWithViewsIT.java   |  25 ++-
 .../phoenix/end2end/AlterTableWithViewsIT.java  | 196 ++---
 .../java/org/apache/phoenix/end2end/ViewIT.java |  14 +-
 .../coprocessor/MetaDataEndpointImpl.java   | 208 +++
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   | 158 +++---
 .../apache/phoenix/schema/DelegateTable.java|   5 +
 .../apache/phoenix/schema/MetaDataClient.java   |  38 ++--
 .../org/apache/phoenix/schema/PColumnImpl.java  |   3 +-
 .../java/org/apache/phoenix/schema/PTable.java  |   6 +
 .../org/apache/phoenix/schema/PTableImpl.java   |  38 ++--
 10 files changed, 461 insertions(+), 230 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a6c1aa45/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterMultiTenantTableWithViewsIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterMultiTenantTableWithViewsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterMultiTenantTableWithViewsIT.java
index 669b6f6..d5e1af2 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterMultiTenantTableWithViewsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterMultiTenantTableWithViewsIT.java
@@ -498,20 +498,24 @@ public class AlterMultiTenantTableWithViewsIT extends 
SplitSystemCatalogIT {
 String tenant = TENANT1;
 try (Connection conn = DriverManager.getConnection(getUrl());
 Connection tenant1Conn = getTenantConnection(tenant)) {
-String baseTableDDL = "CREATE TABLE " + baseTable + " (TENANT_ID 
VARCHAR NOT NULL, PK1 VARCHAR NOT NULL, V1 VARCHAR, V2 VARCHAR, V3 VARCHAR 
CONSTRAINT NAME_PK PRIMARY KEY(TENANT_ID, PK1)) MULTI_TENANT = true ";
+String baseTableDDL =
+"CREATE TABLE " + baseTable
++ " (TENANT_ID VARCHAR NOT NULL, PK1 VARCHAR NOT 
NULL, V1 VARCHAR, "
++ "V2 VARCHAR, V3 VARCHAR CONSTRAINT NAME_PK 
PRIMARY KEY(TENANT_ID, PK1))"
++ " MULTI_TENANT = true, SALT_BUCKETS = 4";
 conn.createStatement().execute(baseTableDDL);
 
 String view1DDL = "CREATE VIEW " + view1 + " ( VIEW_COL1 
DECIMAL(10,2), VIEW_COL2 CHAR(256)) AS SELECT * FROM " + baseTable;
 tenant1Conn.createStatement().execute(view1DDL);
 
-assertTableDefinition(conn, baseTable, PTableType.TABLE, null, 1, 
5, BASE_TABLE_BASE_COLUMN_COUNT, "TENANT_ID", "PK1", "V1", "V2", "V3");
-assertTableDefinition(tenant1Conn, view1, PTableType.VIEW, 
baseTable, 0, 7, 5,  "PK1", "V1", "V2", "V3", "VIEW_COL1", "VIEW_COL2");
+assertTableDefinition(conn, baseTable, PTableType.TABLE, null, 1, 
6, BASE_TABLE_BASE_COLUMN_COUNT, "TENANT_ID", "PK1", "V1", "V2", "V3");
+assertTableDefinition(tenant1Conn, view1, PTableType.VIEW, 
baseTable, 0, 8, 6,  "PK1", "V1", "V2", "V3", "VIEW_COL1", "VIEW_COL2");
 
 String alterBaseTable = "ALTER TABLE " + baseTable + " ADD KV 
VARCHAR, PK2 VARCHAR PRIMARY KEY";
 conn.createStatement().execute(alterBaseTable);
 
 assertTableDefinition(conn, baseTable, PTableType.TABLE, null, 2, 
7, BASE_TABLE_BASE_COLUMN_COUNT, "TENANT_ID", "PK1", "V1", "V2", "V3", "KV", 
"PK2");
-assertTableDefinition(tenant1Conn, view1, PTableType.VIEW, 
baseTable, 0, 7, 5,  "PK1", "V1", "V2", "V3", "VIEW_COL1", "VIEW_COL2");
+assertTableDefinition(tenant1Conn, view1, PTableType.VIEW, 
baseTable, 0, 8, 6,  "PK1", "V1", "V2", "V3", "KV", "PK2", "VIEW_COL1", 
"VIEW_COL2");
 
 // verify that the both columns were added to view1
 tenant1Conn.createStatement().execute("SELECT KV from " + view1);
@@ -526,21 +530,24 @@ public class AlterMultiTenantTableWithViewsIT extends 
SplitSystemCatalogIT {
 String tenant = TENANT1;
 try (Connection conn = DriverManager.getConnection(getUrl());
 Connection tenant1Conn = getTenantConnection(tenant)) {
-String baseTableDDL = "CREATE TABLE " + baseTable + " (TENANT_ID 
VARCHAR NOT NULL, PK1 VARCHAR NOT NULL, V1 

[23/51] [abbrv] phoenix git commit: PHOENIX-4839 IndexHalfStoreFileReaderGenerator throws NullPointerException (Aman Poonia)

2018-10-17 Thread pboado
PHOENIX-4839 IndexHalfStoreFileReaderGenerator throws NullPointerException 
(Aman Poonia)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ae2fbe65
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ae2fbe65
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ae2fbe65

Branch: refs/heads/4.x-cdh5.15
Commit: ae2fbe6525509421ae8d77378fd98e57fa492db0
Parents: e83c614
Author: Lars Hofhansl 
Authored: Fri Sep 14 20:40:06 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:49:38 2018 +0100

--
 .../regionserver/IndexHalfStoreFileReader.java  | 48 
 .../IndexHalfStoreFileReaderGenerator.java  | 12 ++---
 2 files changed, 43 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ae2fbe65/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
index 8bd0d72..273a1b0 100644
--- 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
+++ 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
+import static 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver.SCAN_START_ROW_SUFFIX;
+
 import java.io.IOException;
 import java.util.Map;
 
@@ -26,10 +28,12 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.io.Reference;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.index.IndexMaintainer;
 
 /**
@@ -56,8 +60,9 @@ public class IndexHalfStoreFileReader extends 
StoreFile.Reader {
 private final Map 
indexMaintainers;
 private final byte[][] viewConstants;
 private final int offset;
-private final HRegionInfo regionInfo;
+private final HRegionInfo childRegionInfo;
 private final byte[] regionStartKeyInHFile;
+private final HRegionInfo currentRegion;
 
 /**
  * @param fs
@@ -69,17 +74,19 @@ public class IndexHalfStoreFileReader extends 
StoreFile.Reader {
  * @param conf
  * @param indexMaintainers
  * @param viewConstants
- * @param regionInfo
+ * @param childRegionInfo
  * @param regionStartKeyInHFile
  * @param splitKey
+ * @param currentRegion
  * @throws IOException
  */
 public IndexHalfStoreFileReader(final FileSystem fs, final Path p, final 
CacheConfig cacheConf,
 final FSDataInputStreamWrapper in, long size, final Reference r,
 final Configuration conf,
 final Map 
indexMaintainers,
-final byte[][] viewConstants, final HRegionInfo regionInfo,
-byte[] regionStartKeyInHFile, byte[] splitKey) throws IOException {
+final byte[][] viewConstants, final HRegionInfo childRegionInfo,
+byte[] regionStartKeyInHFile, byte[] splitKey, HRegionInfo 
currentRegion)
+throws IOException {
 super(fs, p, in, size, cacheConf, conf);
 this.splitkey = splitKey == null ? r.getSplitKey() : splitKey;
 // Is it top or bottom half?
@@ -87,9 +94,10 @@ public class IndexHalfStoreFileReader extends 
StoreFile.Reader {
 this.splitRow = 
CellUtil.cloneRow(KeyValue.createKeyValueFromKey(splitkey));
 this.indexMaintainers = indexMaintainers;
 this.viewConstants = viewConstants;
-this.regionInfo = regionInfo;
+this.childRegionInfo = childRegionInfo;
 this.regionStartKeyInHFile = regionStartKeyInHFile;
 this.offset = regionStartKeyInHFile.length;
+this.currentRegion = currentRegion;
 }
 
 public int getOffset() {
@@ -105,7 +113,7 @@ public class IndexHalfStoreFileReader extends 
StoreFile.Reader {
 }
 
 public HRegionInfo getRegionInfo() {
-return regionInfo;
+return childRegionInfo;
 }
 
 public byte[] getRegionStartKeyInHFile() {
@@ -125,8 +133,30 @@ public class IndexHalfStoreFileReader extends 
StoreFile.Reader {
 }
 
 @Override
-public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean 
pread, boolean isCompaction, long readPt) {
-return new LocalIndexStoreFileScanner(this, 

[49/51] [abbrv] phoenix git commit: PHOENIX-3955: Ensure KEEP_DELETED_CELLS, REPLICATION_SCOPE, and TTL properties stay in sync between the physical data table and index tables

2018-10-17 Thread pboado
http://git-wip-us.apache.org/repos/asf/phoenix/blob/eb13ffd8/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index bfa332c..37cdd21 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -18,6 +18,8 @@
 package org.apache.phoenix.query;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static org.apache.hadoop.hbase.HColumnDescriptor.TTL;
+import static org.apache.hadoop.hbase.HColumnDescriptor.REPLICATION_SCOPE;
+import static org.apache.hadoop.hbase.HColumnDescriptor.KEEP_DELETED_CELLS;
 import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP;
 import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_MAJOR_VERSION;
 import static 
org.apache.phoenix.coprocessor.MetaDataProtocol.PHOENIX_MINOR_VERSION;
@@ -62,6 +64,7 @@ import static 
org.apache.phoenix.util.UpgradeUtil.addViewIndexToParentLinks;
 import static org.apache.phoenix.util.UpgradeUtil.getSysCatalogSnapshotName;
 import static org.apache.phoenix.util.UpgradeUtil.moveChildLinks;
 import static org.apache.phoenix.util.UpgradeUtil.upgradeTo4_5_0;
+import static org.apache.phoenix.util.UpgradeUtil.syncTableAndIndexProperties;
 
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
@@ -101,11 +104,13 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 import javax.annotation.concurrent.GuardedBy;
 
+import com.google.common.base.Strings;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeepDeletedCells;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.NamespaceNotFoundException;
 import org.apache.hadoop.hbase.TableExistsException;
@@ -775,62 +780,94 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 PTableType tableType, Map tableProps, 
List>> families,
 byte[][] splits, boolean isNamespaceMapped) throws SQLException {
 String defaultFamilyName = 
(String)tableProps.remove(PhoenixDatabaseMetaData.DEFAULT_COLUMN_FAMILY_NAME);
-HTableDescriptor tableDescriptor = (existingDesc != null) ? new 
HTableDescriptor(existingDesc)
-: new HTableDescriptor(physicalTableName);
+HTableDescriptor newTableDescriptor = (existingDesc != null) ? new 
HTableDescriptor(existingDesc)
+: new HTableDescriptor(TableName.valueOf(physicalTableName));
+
+HColumnDescriptor dataTableColDescForIndexTablePropSyncing = null;
+if (tableType == PTableType.INDEX || 
MetaDataUtil.isViewIndex(Bytes.toString(physicalTableName))) {
+byte[] defaultFamilyBytes =
+defaultFamilyName == null ? 
QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES : Bytes.toBytes(defaultFamilyName);
+
+final HTableDescriptor baseTableDesc;
+if (MetaDataUtil.isViewIndex(Bytes.toString(physicalTableName))) {
+// Handles indexes created on views for single-tenant tables 
and
+// global indexes created on views of multi-tenant tables
+baseTableDesc = 
this.getTableDescriptor(Bytes.toBytes(MetaDataUtil.getViewIndexUserTableName(Bytes.toString(physicalTableName;
+} else if (existingDesc == null) {
+// Global/local index creation on top of a physical base table
+baseTableDesc = 
this.getTableDescriptor(SchemaUtil.getPhysicalTableName(
+Bytes.toBytes((String) 
tableProps.get(PhoenixDatabaseMetaData.DATA_TABLE_NAME)), isNamespaceMapped)
+.getName());
+} else {
+// In case this a local index created on a view of a 
multi-tenant table, the
+// DATA_TABLE_NAME points to the name of the view instead of 
the physical base table
+baseTableDesc = existingDesc;
+}
+dataTableColDescForIndexTablePropSyncing = 
baseTableDesc.getFamily(defaultFamilyBytes);
+// It's possible that the table has specific column families and 
none of them are declared
+// to be the DEFAULT_COLUMN_FAMILY, so we choose the first column 
family for syncing properties
+if (dataTableColDescForIndexTablePropSyncing == null) {
+dataTableColDescForIndexTablePropSyncing = 
baseTableDesc.getColumnFamilies()[0];
+}
+}
 // By 

[14/51] [abbrv] phoenix git commit: PHOENIX-3178 Row count incorrect for UPSERT SELECT when auto commit is false

2018-10-17 Thread pboado
PHOENIX-3178 Row count incorrect for UPSERT SELECT when auto commit is false


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d2185050
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d2185050
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d2185050

Branch: refs/heads/4.x-cdh5.15
Commit: d218505026a35d4575a51bb5ca7a45a0311e19b4
Parents: edd00bd
Author: s.kadam 
Authored: Tue Aug 28 23:44:56 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:49:38 2018 +0100

--
 .../end2end/UpsertSelectAutoCommitIT.java   | 31 ++--
 .../apache/phoenix/compile/UpsertCompiler.java  | 10 +--
 2 files changed, 37 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d2185050/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectAutoCommitIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectAutoCommitIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectAutoCommitIT.java
index 38d48d6..3966f15 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectAutoCommitIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectAutoCommitIT.java
@@ -151,8 +151,7 @@ public class UpsertSelectAutoCommitIT extends 
ParallelStatsDisabledIT {
 stmt.executeUpdate();
 conn.commit();
 }
-
-
+
 @Test
 public void testUpsertSelectDoesntSeeUpsertedData() throws Exception {
 Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
@@ -201,4 +200,32 @@ public class UpsertSelectAutoCommitIT extends 
ParallelStatsDisabledIT {
 connection.close();
 }
 
+@Test
+public void testRowCountWithNoAutoCommitOnUpsertSelect() throws Exception {
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+props.setProperty(QueryServices.MUTATE_BATCH_SIZE_ATTRIB, 
Integer.toString(3));
+props.setProperty(QueryServices.SCAN_CACHE_SIZE_ATTRIB, 
Integer.toString(3));
+props.setProperty(QueryServices.SCAN_RESULT_CHUNK_SIZE, 
Integer.toString(3));
+Connection conn = DriverManager.getConnection(getUrl(), props);
+conn.setAutoCommit(false);
+String tableName = generateUniqueName();
+
+conn.createStatement().execute("CREATE SEQUENCE "+ tableName);
+conn.createStatement().execute(
+"CREATE TABLE " + tableName + " (pk INTEGER PRIMARY KEY, val 
INTEGER)");
+
+conn.createStatement().execute(
+"UPSERT INTO " + tableName + " VALUES (NEXT VALUE FOR 
keys,1)");
+conn.commit();
+for (int i=0; i<6; i++) {
+Statement stmt = conn.createStatement();
+int upsertCount = stmt.executeUpdate(
+"UPSERT INTO " + tableName + " SELECT NEXT VALUE FOR keys, 
val FROM "
++ tableName);
+conn.commit();
+assertEquals((int)Math.pow(2, i), upsertCount);
+}
+conn.close();
+}
+
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d2185050/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
index 9d75bba..d0dd2cf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
@@ -185,6 +185,7 @@ public class UpsertCompiler {
 QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
 int batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
 boolean isAutoCommit = connection.getAutoCommit();
+int sizeOffset = 0;
 int numSplColumns =
 (tableRef.getTable().isMultiTenant() ? 1 : 0)
 + (tableRef.getTable().getViewIndexId() != null ? 1 : 
0);
@@ -249,8 +250,13 @@ public class UpsertCompiler {
 mutation.clear();
 }
 }
-// If auto commit is true, this last batch will be committed upon 
return
-return new MutationState(tableRef, mutation, rowCount / batchSize 
* batchSize, maxSize, maxSizeBytes, connection);
+
+if (isAutoCommit) {
+// If auto commit is true, this last batch will be committed 
upon return
+sizeOffset = rowCount / batchSize * batchSize;
+}
+return new MutationState(tableRef, mutation, sizeOffset, maxSize,
+

[01/51] [abbrv] phoenix git commit: PHOENIX-3547 Supporting more number of indices per table. [Forced Update!]

2018-10-17 Thread pboado
Repository: phoenix
Updated Branches:
  refs/heads/4.x-cdh5.15 d1e82d19e -> 6b877d21b (forced update)


http://git-wip-us.apache.org/repos/asf/phoenix/blob/1fcf43c6/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
index ab61826..938ae1f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
@@ -3495,15 +3495,15 @@ public final class PTableProtos {
  */
 com.google.protobuf.ByteString getTenantId();
 
-// optional int32 viewIndexId = 21;
+// optional int64 viewIndexId = 21;
 /**
- * optional int32 viewIndexId = 21;
+ * optional int64 viewIndexId = 21;
  */
 boolean hasViewIndexId();
 /**
- * optional int32 viewIndexId = 21;
+ * optional int64 viewIndexId = 21;
  */
-int getViewIndexId();
+long getViewIndexId();
 
 // optional bytes indexType = 22;
 /**
@@ -3694,6 +3694,16 @@ public final class PTableProtos {
  * optional int32 transactionProvider = 38;
  */
 int getTransactionProvider();
+
+// optional int32 viewIndexType = 39 [default = 5];
+/**
+ * optional int32 viewIndexType = 39 [default = 5];
+ */
+boolean hasViewIndexType();
+/**
+ * optional int32 viewIndexType = 39 [default = 5];
+ */
+int getViewIndexType();
   }
   /**
* Protobuf type {@code PTable}
@@ -3859,7 +3869,7 @@ public final class PTableProtos {
 }
 case 168: {
   bitField0_ |= 0x0001;
-  viewIndexId_ = input.readInt32();
+  viewIndexId_ = input.readInt64();
   break;
 }
 case 178: {
@@ -3950,6 +3960,11 @@ public final class PTableProtos {
   transactionProvider_ = input.readInt32();
   break;
 }
+case 312: {
+  bitField1_ |= 0x0002;
+  viewIndexType_ = input.readInt32();
+  break;
+}
   }
 }
   } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -4395,19 +4410,19 @@ public final class PTableProtos {
   return tenantId_;
 }
 
-// optional int32 viewIndexId = 21;
+// optional int64 viewIndexId = 21;
 public static final int VIEWINDEXID_FIELD_NUMBER = 21;
-private int viewIndexId_;
+private long viewIndexId_;
 /**
- * optional int32 viewIndexId = 21;
+ * optional int64 viewIndexId = 21;
  */
 public boolean hasViewIndexId() {
   return ((bitField0_ & 0x0001) == 0x0001);
 }
 /**
- * optional int32 viewIndexId = 21;
+ * optional int64 viewIndexId = 21;
  */
-public int getViewIndexId() {
+public long getViewIndexId() {
   return viewIndexId_;
 }
 
@@ -4730,6 +4745,22 @@ public final class PTableProtos {
   return transactionProvider_;
 }
 
+// optional int32 viewIndexType = 39 [default = 5];
+public static final int VIEWINDEXTYPE_FIELD_NUMBER = 39;
+private int viewIndexType_;
+/**
+ * optional int32 viewIndexType = 39 [default = 5];
+ */
+public boolean hasViewIndexType() {
+  return ((bitField1_ & 0x0002) == 0x0002);
+}
+/**
+ * optional int32 viewIndexType = 39 [default = 5];
+ */
+public int getViewIndexType() {
+  return viewIndexType_;
+}
+
 private void initFields() {
   schemaNameBytes_ = com.google.protobuf.ByteString.EMPTY;
   tableNameBytes_ = com.google.protobuf.ByteString.EMPTY;
@@ -4750,7 +4781,7 @@ public final class PTableProtos {
   viewStatement_ = com.google.protobuf.ByteString.EMPTY;
   physicalNames_ = java.util.Collections.emptyList();
   tenantId_ = com.google.protobuf.ByteString.EMPTY;
-  viewIndexId_ = 0;
+  viewIndexId_ = 0L;
   indexType_ = com.google.protobuf.ByteString.EMPTY;
   statsTimeStamp_ = 0L;
   storeNulls_ = false;
@@ -4768,6 +4799,7 @@ public final class PTableProtos {
   encodedCQCounters_ = java.util.Collections.emptyList();
   useStatsForParallelization_ = false;
   transactionProvider_ = 0;
+  viewIndexType_ = 5;
 }
 private byte memoizedIsInitialized = -1;
 public final boolean isInitialized() {
@@ -4893,7 +4925,7 @@ public final class PTableProtos {
 output.writeBytes(20, tenantId_);
   }
   if (((bitField0_ & 0x0001) == 0x0001)) {
-output.writeInt32(21, viewIndexId_);
+output.writeInt64(21, viewIndexId_);
   }
   if (((bitField0_ & 0x0002) == 0x0002)) {
 output.writeBytes(22, indexType_);
@@ -4946,6 +4978,9 @@ public 

[17/51] [abbrv] phoenix git commit: PHOENIX-4870 LoggingPhoenixConnection should log metrics when AutoCommit is set to True.

2018-10-17 Thread pboado
PHOENIX-4870 LoggingPhoenixConnection should log metrics when AutoCommit is set 
to True.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6ea2110b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6ea2110b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6ea2110b

Branch: refs/heads/4.x-cdh5.15
Commit: 6ea2110bacd97943b17dbbe4484bf8a6da9dde7a
Parents: bb297e7
Author: s.kadam 
Authored: Thu Sep 6 01:00:03 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 22:49:38 2018 +0100

--
 .../monitoring/PhoenixLoggingMetricsIT.java | 61 +++-
 .../phoenix/jdbc/LoggingPhoenixConnection.java  | 37 +++-
 .../jdbc/LoggingPhoenixPreparedStatement.java   | 25 +++-
 .../phoenix/jdbc/LoggingPhoenixStatement.java   | 28 +++--
 4 files changed, 125 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/6ea2110b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
index 5d5524c..483d341 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
@@ -102,7 +102,8 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 public void testPhoenixMetricsLoggedOnCommit() throws Exception {
 // run SELECT to verify read metrics are logged
 String query = "SELECT * FROM " + tableName1;
-verifyQueryLevelMetricsLogging(query);
+ResultSet rs = upsertRows(query);
+verifyQueryLevelMetricsLogging(query, rs);
 
 // run UPSERT SELECT to verify mutation metrics are logged
 String upsertSelect = "UPSERT INTO " + tableName2 + " SELECT * FROM " 
+ tableName1;
@@ -140,7 +141,9 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 public void testPhoenixMetricsLoggedOnClose() throws Exception {
 // run SELECT to verify read metrics are logged
 String query = "SELECT * FROM " + tableName1;
-verifyQueryLevelMetricsLogging(query);
+
+ResultSet rs = upsertRows(query);
+verifyQueryLevelMetricsLogging(query, rs);
 
 // run UPSERT SELECT to verify mutation metrics are logged
 String upsertSelect = "UPSERT INTO " + tableName2 + " SELECT * FROM " 
+ tableName1;
@@ -164,13 +167,61 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 mutationReadMetricsMap.size() == 0);
 }
 
+/**
+ * This test is added to verify if metrics are being logged in case
+ * auto commit is set to true.
+ */
+@Test
+public void testPhoenixMetricsLoggedOnAutoCommitTrue() throws Exception {
+loggedConn.setAutoCommit(true);
+
+String query = "SELECT * FROM " + tableName1;
+ResultSet rs = upsertRows(query);
+verifyQueryLevelMetricsLogging(query, rs);
+
+// run UPSERT SELECT to verify mutation metrics are logged
+String upsertSelect = "UPSERT INTO " + tableName2 + " SELECT * FROM " 
+ tableName1;
+loggedConn.createStatement().executeUpdate(upsertSelect);
+
+assertTrue("Mutation write metrics are not logged for " + tableName2,
+mutationWriteMetricsMap.get(tableName2).size()  > 0);
+assertTrue("Mutation read metrics are not found for " + tableName1,
+mutationReadMetricsMap.get(tableName1).size() > 0);
+
+clearAllTestMetricMaps();
+
+loggedConn.createStatement().execute(query);
+assertTrue("Read metrics found for " + tableName1,
+mutationReadMetricsMap.size() == 0);
+loggedConn.createStatement().execute(upsertSelect);
+
+assertTrue("Mutation write metrics are not logged for " + tableName2
++ " in 
createStatement",mutationWriteMetricsMap.get(tableName2).size()  > 0);
+assertTrue("Mutation read metrics are not found for " + tableName1
++ " in 
createStatement",mutationReadMetricsMap.get(tableName1).size() > 0);
+
+clearAllTestMetricMaps();
+
+loggedConn.prepareStatement(query).executeQuery();
+assertTrue("Read metrics found for " + tableName1,
+mutationReadMetricsMap.size() == 0);
+
+loggedConn.prepareStatement(upsertSelect).executeUpdate();
+assertTrue("Mutation write metrics are not logged for " + tableName2
++ " in 
prepareStatement",mutationWriteMetricsMap.get(tableName2).size()  > 0);
+

[phoenix] Git Push Summary

2018-10-17 Thread pboado
Repository: phoenix
Updated Branches:
  refs/heads/4.x-cdh5.11 [deleted] 88d384fa6


[13/13] phoenix git commit: PHOENIX-4790 Addendum to check that query is a point lookup for delete not to run query

2018-10-17 Thread pboado
PHOENIX-4790 Addendum to check that query is a point lookup for delete not to 
run query


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d1e82d19
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d1e82d19
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d1e82d19

Branch: refs/heads/4.x-cdh5.15
Commit: d1e82d19e4a16533372b70c7e20e0408859c100b
Parents: 8119508
Author: James Taylor 
Authored: Thu Jul 12 06:01:14 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 21:26:23 2018 +0100

--
 .../src/main/java/org/apache/phoenix/compile/DeleteCompiler.java  | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d1e82d19/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index 78b2db9..a214c24 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -541,6 +541,9 @@ public class DeleteCompiler {
 Iterator iterator = queryPlans.iterator();
 while (iterator.hasNext()) {
 QueryPlan plan = iterator.next();
+// Must be a point lookup in order to not run a query since
+// we have to have the full key be enumerated.
+noQueryReqd &= plan.getContext().getScanRanges().isPointLookup();
 if (plan.getTableRef().getTable().getIndexType() == 
IndexType.LOCAL) {
 if (!plan.getContext().getDataColumns().isEmpty()) {
 iterator.remove();



[04/13] phoenix git commit: PHOENIX-4772 phoenix.sequence.saltBuckets is not honoured

2018-10-17 Thread pboado
PHOENIX-4772 phoenix.sequence.saltBuckets is not honoured


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/351b37d9
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/351b37d9
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/351b37d9

Branch: refs/heads/4.x-cdh5.15
Commit: 351b37d90f6ead9d3e871072e77e1080229a4851
Parents: abda470
Author: Ankit Singhal 
Authored: Thu Jun 7 19:23:44 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 21:25:11 2018 +0100

--
 .../org/apache/phoenix/end2end/SequenceIT.java  |  57 +-
 .../phoenix/end2end/SequencePointInTimeIT.java  | 112 +++
 .../query/ConnectionQueryServicesImpl.java  |  13 ++-
 .../query/ConnectionlessQueryServicesImpl.java  |   7 +-
 .../apache/phoenix/query/QueryConstants.java|   2 +
 .../org/apache/phoenix/schema/Sequence.java |   6 +-
 6 files changed, 139 insertions(+), 58 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/351b37d9/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
index 4cc9628..b76cc4e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
@@ -18,6 +18,8 @@
 
 package org.apache.phoenix.end2end;
 
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TYPE_SEQUENCE;
 import static 
org.apache.phoenix.query.QueryServicesTestImpl.DEFAULT_SEQUENCE_CACHE_SIZE;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
@@ -38,6 +40,7 @@ import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesTestImpl;
 import org.apache.phoenix.schema.SchemaNotFoundException;
 import org.apache.phoenix.schema.SequenceAlreadyExistsException;
 import org.apache.phoenix.schema.SequenceNotFoundException;
@@ -202,6 +205,8 @@ public class SequenceIT extends ParallelStatsDisabledIT {
 String schemaName = getSchemaName(sequenceName);
 
 conn.createStatement().execute("CREATE SEQUENCE " + sequenceName + " 
START WITH 2 INCREMENT BY 4");
+int bucketNum = PhoenixRuntime.getTableNoCache(conn, 
SYSTEM_CATALOG_SCHEMA + "." + TYPE_SEQUENCE).getBucketNum();
+assertEquals("Salt bucket for SYSTEM.SEQUENCE should be test 
default",bucketNum , QueryServicesTestImpl.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS);
 String query = "SELECT sequence_schema, sequence_name, current_value, 
increment_by FROM \"SYSTEM\".\"SEQUENCE\" WHERE sequence_name='" + 
sequenceNameWithoutSchema + "'";
 ResultSet rs = conn.prepareStatement(query).executeQuery();
 assertTrue(rs.next());
@@ -1406,56 +1411,4 @@ public class SequenceIT extends ParallelStatsDisabledIT {
return tableName.substring(tableName.indexOf(".") + 1, 
tableName.length());
 }
 
-@Test
-public void testPointInTimeSequence() throws Exception {
-String seqName = generateSequenceNameWithSchema(); 
-Properties scnProps = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-scnProps.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(EnvironmentEdgeManager.currentTimeMillis()));
-Connection beforeSeqConn = DriverManager.getConnection(getUrl(), 
scnProps);
-
-ResultSet rs;
-Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-Connection conn = DriverManager.getConnection(getUrl(), props);
-conn.createStatement().execute("CREATE SEQUENCE " + seqName + "");
-
-try {
-beforeSeqConn.createStatement().executeQuery("SELECT next value 
for " + seqName);
-fail();
-} catch (SequenceNotFoundException e) {
-beforeSeqConn.close();
-}
-
-scnProps.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(EnvironmentEdgeManager.currentTimeMillis()));
-Connection afterSeqConn = DriverManager.getConnection(getUrl(), 
scnProps);
-
-rs = conn.createStatement().executeQuery("SELECT next value for " + 
seqName);
-assertTrue(rs.next());
-assertEquals(1, rs.getInt(1));
-rs = conn.createStatement().executeQuery("SELECT next value for " + 
seqName);
-assertTrue(rs.next());
-assertEquals(2, rs.getInt(1));
-
-

[03/13] phoenix git commit: PHOENIX-4774 Disable doclint in 1.8+ JDKs

2018-10-17 Thread pboado
PHOENIX-4774 Disable doclint in 1.8+ JDKs


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/abda4707
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/abda4707
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/abda4707

Branch: refs/heads/4.x-cdh5.15
Commit: abda47074fb9652ed0cbecd0535b8e43a683f425
Parents: 0718a87
Author: Alex Araujo 
Authored: Tue Jun 5 19:20:17 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 21:25:07 2018 +0100

--
 pom.xml | 13 +
 1 file changed, 13 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/abda4707/pom.xml
--
diff --git a/pom.xml b/pom.xml
index a01f3c8..ddbeaf1 100644
--- a/pom.xml
+++ b/pom.xml
@@ -451,6 +451,9 @@
   
   
 
+
+  ${javadoc.opts}
+
   
 
   
@@ -1004,6 +1007,16 @@
   
 
   
+
+
+  java8-doclint-disabled
+  
+[1.8,)
+  
+  
+-Xdoclint:none
+  
+
 
 
   release



[09/13] phoenix git commit: PHOENIX-4790 Simplify check for client side delete

2018-10-17 Thread pboado
PHOENIX-4790 Simplify check for client side delete


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ee305562
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ee305562
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ee305562

Branch: refs/heads/4.x-cdh5.15
Commit: ee3055629fec9f0faf8b15e5f01238da2835308e
Parents: 3da21d5
Author: James Taylor 
Authored: Tue Jun 19 15:33:21 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 21:26:19 2018 +0100

--
 .../apache/phoenix/compile/DeleteCompiler.java  | 24 
 1 file changed, 5 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ee305562/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index 5f9c76c..78b2db9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -46,7 +46,6 @@ import org.apache.phoenix.execute.AggregatePlan;
 import org.apache.phoenix.execute.MutationState;
 import org.apache.phoenix.execute.MutationState.MultiRowMutationState;
 import org.apache.phoenix.execute.MutationState.RowMutationState;
-import org.apache.phoenix.filter.SkipScanFilter;
 import org.apache.phoenix.hbase.index.ValueGetter;
 import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
@@ -481,6 +480,7 @@ public class DeleteCompiler {
 projectedColumns.add(column);
 aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(null, 
'"' + column.getName().getString() + '"', null)));
 }
+boolean noQueryReqd = true;
 // Project all non PK indexed columns so that we can do the proper 
index maintenance
 for (PTable index : table.getIndexes()) {
 IndexMaintainer maintainer = index.getIndexMaintainer(table, 
connection);
@@ -492,6 +492,8 @@ public class DeleteCompiler {
 boolean hasNoColumnFamilies = 
table.getColumnFamilies().isEmpty();
 PColumn column = hasNoColumnFamilies ? 
table.getColumnForColumnName(columnName) : 
table.getColumnFamily(familyName).getPColumnForColumnName(columnName);
 if(!projectedColumns.contains(column)) {
+// We must run a query if any index contains a non pk 
column
+noQueryReqd = false;
 projectedColumns.add(column);
 aliasedNodes.add(FACTORY.aliasedNode(null, 
FACTORY.column(hasNoColumnFamilies ? null : TableName.create(null, familyName), 
'"' + columnName + '"', null)));
 }
@@ -511,7 +513,7 @@ public class DeleteCompiler {
 select = StatementNormalizer.normalize(transformedSelect, 
resolverToBe);
 }
 final boolean hasPreOrPostProcessing = hasPreProcessing || 
hasPostProcessing;
-boolean noQueryReqd = !hasPreOrPostProcessing;
+noQueryReqd &= !hasPreOrPostProcessing;
 // No limit and no sub queries, joins, etc in where clause
 // Can't run on same server for transactional data, as we need the row 
keys for the data
 // that is being upserted for conflict detection purposes.
@@ -550,24 +552,8 @@ public class DeleteCompiler {
 }
 
 runOnServer &= queryPlans.get(0).getTableRef().getTable().getType() != 
PTableType.INDEX;
-
-// We need to have all indexed columns available in all immutable 
indexes in order
-// to generate the delete markers from the query. We also cannot have 
any filters
-// except for our SkipScanFilter for point lookups.
-// A simple check of the non existence of a where clause in the parse 
node is not sufficient, as the where clause
-// may have been optimized out. Instead, we check that there's a 
single SkipScanFilter
-// If we can generate a plan for every index, that means all the 
required columns are available in every index,
-// hence we can drive the delete from any of the plans.
-noQueryReqd &= queryPlans.size() == 1 + clientSideIndexes.size();
-int queryPlanIndex = 0;
-while (noQueryReqd && queryPlanIndex < queryPlans.size()) {
-QueryPlan plan = queryPlans.get(queryPlanIndex++);
-StatementContext context = plan.getContext();
-noQueryReqd &= (!context.getScan().hasFilter()
-|| context.getScan().getFilter() instanceof SkipScanFilter)
-  

[06/13] phoenix git commit: PHOENIX-4776 Remove creation of .md5 files from dev/make_rc.sh

2018-10-17 Thread pboado
PHOENIX-4776 Remove creation of .md5 files from dev/make_rc.sh


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/2c008068
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/2c008068
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/2c008068

Branch: refs/heads/4.x-cdh5.15
Commit: 2c008068e4768dfb2dc1fbd907d1072ae507769e
Parents: 39a7785
Author: Pedro Boado 
Authored: Mon Jun 11 23:35:56 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 21:25:37 2018 +0100

--
 dev/make_rc.sh | 2 --
 1 file changed, 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/2c008068/dev/make_rc.sh
--
diff --git a/dev/make_rc.sh b/dev/make_rc.sh
index 9bff7d1..963a9d8 100755
--- a/dev/make_rc.sh
+++ b/dev/make_rc.sh
@@ -123,13 +123,11 @@ function_sign() {
   # if on MAC OS
   if [[ "$OSTYPE" == "darwin"* ]]; then
 gpg2 --armor --output $file.asc --detach-sig $file;
-openssl md5 $file > $file.md5;
 openssl dgst -sha512 $file > $file.sha512;
 openssl dgst -sha256 $file >> $file.sha256;
   # all other OS
   else
 gpg --armor --output $file.asc --detach-sig $file;
-md5sum -b $file > $file.md5;
 sha512sum -b $file > $file.sha512;
 sha256sum -b $file >> $file.sha256;
   fi



[10/13] phoenix git commit: PHOENIX-3383 Comparison between descending row keys used in RVC is reverse

2018-10-17 Thread pboado
http://git-wip-us.apache.org/repos/asf/phoenix/blob/81195083/phoenix-core/src/main/java/org/apache/phoenix/execute/PhoenixTxIndexMutationGenerator.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/execute/PhoenixTxIndexMutationGenerator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/execute/PhoenixTxIndexMutationGenerator.java
index a7b5687..877c939 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/execute/PhoenixTxIndexMutationGenerator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/execute/PhoenixTxIndexMutationGenerator.java
@@ -177,7 +177,7 @@ public class PhoenixTxIndexMutationGenerator {
 
 // Project empty key value column
 scan.addColumn(indexMaintainers.get(0).getDataEmptyKeyValueCF(), 
emptyKeyValueQualifier);
-ScanRanges scanRanges = 
ScanRanges.create(SchemaUtil.VAR_BINARY_SCHEMA, 
Collections.singletonList(keys), ScanUtil.SINGLE_COLUMN_SLOT_SPAN, 
KeyRange.EVERYTHING_RANGE, null, true, -1);
+ScanRanges scanRanges = 
ScanRanges.create(SchemaUtil.VAR_BINARY_SCHEMA, 
Collections.singletonList(keys), ScanUtil.SINGLE_COLUMN_SLOT_SPAN, null, true, 
-1);
 scanRanges.initializeScan(scan);
 Table txTable = 
indexMetaData.getTransactionContext().getTransactionalTable(htable, 
isImmutable);
 // For rollback, we need to see all versions, including

http://git-wip-us.apache.org/repos/asf/phoenix/blob/81195083/phoenix-core/src/main/java/org/apache/phoenix/expression/function/FunctionExpression.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/FunctionExpression.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/FunctionExpression.java
index b45706a..bc9fa9f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/FunctionExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/FunctionExpression.java
@@ -30,7 +30,15 @@ import org.apache.phoenix.expression.Expression;
  * @since 0.1
  */
 public abstract class FunctionExpression extends BaseCompoundExpression {
-public enum OrderPreserving {NO, YES_IF_LAST, YES};
+public enum OrderPreserving {NO, YES_IF_LAST, YES;
+
+public OrderPreserving combine(OrderPreserving that) {
+if (that == null) {
+return this;
+}
+return OrderPreserving.values()[Math.min(this.ordinal(), 
that.ordinal())];
+}};
+
 public FunctionExpression() {
 }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/81195083/phoenix-core/src/main/java/org/apache/phoenix/expression/function/InvertFunction.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/InvertFunction.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/InvertFunction.java
index 3615cbe..8ef5914 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/InvertFunction.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/InvertFunction.java
@@ -96,7 +96,24 @@ public class InvertFunction extends ScalarFunction {
 @Override
 public KeyRange getKeyRange(CompareOp op, Expression rhs) {
 KeyRange range = childPart.getKeyRange(op, rhs);
-return range.invert();
+byte[] lower = range.getLowerRange();
+if (!range.lowerUnbound()) {
+lower = SortOrder.invert(lower, 0, lower.length);
+}
+byte[] upper;
+if (range.isSingleKey()) {
+upper = lower;
+} else {
+upper = range.getUpperRange();
+if (!range.upperUnbound()) {
+upper = SortOrder.invert(upper, 0, upper.length);
+}
+}
+range = KeyRange.getKeyRange(lower, range.isLowerInclusive(), 
upper, range.isUpperInclusive());
+if (getColumn().getSortOrder() == SortOrder.DESC) {
+range = range.invert();
+}
+return range;
 }
 
 @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/81195083/phoenix-core/src/main/java/org/apache/phoenix/expression/function/PrefixFunction.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/PrefixFunction.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/PrefixFunction.java
index cb98e28..ff3e74d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/PrefixFunction.java
+++ 

[11/13] phoenix git commit: PHOENIX-3383 Comparison between descending row keys used in RVC is reverse

2018-10-17 Thread pboado
http://git-wip-us.apache.org/repos/asf/phoenix/blob/81195083/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
index 87f00e4..a5287cb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
@@ -17,8 +17,6 @@
  */
 package org.apache.phoenix.compile;
 
-import static java.util.Collections.singletonList;
-
 import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -27,10 +25,14 @@ import java.util.Comparator;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
+import java.util.NoSuchElementException;
 import java.util.Set;
 
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.expression.AndExpression;
 import org.apache.phoenix.expression.BaseExpression;
 import 
org.apache.phoenix.expression.BaseExpression.ExpressionComparabilityWrapper;
@@ -61,7 +63,6 @@ import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.RowKeySchema;
 import org.apache.phoenix.schema.SaltingUtil;
 import org.apache.phoenix.schema.SortOrder;
-import org.apache.phoenix.schema.ValueSchema.Field;
 import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PChar;
 import org.apache.phoenix.schema.types.PDataType;
@@ -74,8 +75,11 @@ import org.apache.phoenix.util.SchemaUtil;
 
 import com.google.common.collect.Iterators;
 import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 
+import edu.umd.cs.findbugs.annotations.NonNull;
+
 
 /**
  *
@@ -115,6 +119,7 @@ public class WhereOptimizer {
RowKeySchema schema = table.getRowKeySchema();
boolean isMultiTenant = tenantId != null && table.isMultiTenant();
boolean isSharedIndex = table.getViewIndexId() != null;
+   ImmutableBytesWritable ptr = context.getTempPtr();

if (isMultiTenant) {
 tenantIdBytes = ScanUtil.getTenantIdBytes(schema, isSalted, 
tenantId, isSharedIndex);
@@ -158,31 +163,13 @@ public class WhereOptimizer {
 
 int pkPos = 0;
 int nPKColumns = table.getPKColumns().size();
-int[] slotSpan = new int[nPKColumns];
+int[] slotSpanArray = new int[nPKColumns];
 List> cnf = 
Lists.newArrayListWithExpectedSize(schema.getMaxFields());
-KeyRange minMaxRange = keySlots.getMinMaxRange();
-if (minMaxRange == null) {
-minMaxRange = KeyRange.EVERYTHING_RANGE;
-}
-boolean hasMinMaxRange = (minMaxRange != KeyRange.EVERYTHING_RANGE);
-int minMaxRangeOffset = 0;
-byte[] minMaxRangePrefix = null;
 boolean hasViewIndex = table.getViewIndexId() != null;
-if (hasMinMaxRange) {
-int minMaxRangeSize = (isSalted ? SaltingUtil.NUM_SALTING_BYTES : 
0)
-+ (isMultiTenant ? tenantIdBytes.length + 1 : 0)
-+ (hasViewIndex ? 
MetaDataUtil.getViewIndexIdDataType().getByteSize() : 0);
-minMaxRangePrefix = new byte[minMaxRangeSize];
-}
-
-Iterator iterator = keySlots.iterator();
+Iterator iterator = 
keySlots.getSlots().iterator();
 // Add placeholder for salt byte ranges
 if (isSalted) {
 cnf.add(SALT_PLACEHOLDER);
-if (hasMinMaxRange) {
-   System.arraycopy(SALT_PLACEHOLDER.get(0).getLowerRange(), 
0, minMaxRangePrefix, minMaxRangeOffset, SaltingUtil.NUM_SALTING_BYTES);
-   minMaxRangeOffset += SaltingUtil.NUM_SALTING_BYTES;
-}
 // Increment the pkPos, as the salt column is in the row schema
 // Do not increment the iterator, though, as there will never be
 // an expression in the keySlots for the salt column
@@ -194,35 +181,17 @@ public class WhereOptimizer {
 if (hasViewIndex) {
 byte[] viewIndexBytes = 
MetaDataUtil.getViewIndexIdDataType().toBytes(table.getViewIndexId());
 KeyRange indexIdKeyRange = KeyRange.getKeyRange(viewIndexBytes);
-cnf.add(singletonList(indexIdKeyRange));
-if (hasMinMaxRange) {
-System.arraycopy(viewIndexBytes, 0, minMaxRangePrefix, 
minMaxRangeOffset, viewIndexBytes.length);
-minMaxRangeOffset += viewIndexBytes.length;
-}
+cnf.add(Collections.singletonList(indexIdKeyRange));
 pkPos++;
 }
 
 // Add tenant data isolation for 

[01/13] phoenix git commit: PHOENIX-4758 Validate that HADOOP_CONF_DIR is not set for HiveMRIT

2018-10-17 Thread pboado
Repository: phoenix
Updated Branches:
  refs/heads/4.x-cdh5.15 1f7b56192 -> d1e82d19e


PHOENIX-4758 Validate that HADOOP_CONF_DIR is not set for HiveMRIT


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4f0929fb
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4f0929fb
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4f0929fb

Branch: refs/heads/4.x-cdh5.15
Commit: 4f0929fb9d8dfd6c57a52d548e01d736c9ed0c4e
Parents: 1f7b561
Author: Josh Elser 
Authored: Tue May 29 19:14:04 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 21:25:01 2018 +0100

--
 .../src/it/java/org/apache/phoenix/hive/HiveMapReduceIT.java | 8 
 1 file changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/4f0929fb/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveMapReduceIT.java
--
diff --git 
a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveMapReduceIT.java 
b/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveMapReduceIT.java
index 644ff24..4bc5a7d 100644
--- a/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveMapReduceIT.java
+++ b/phoenix-hive/src/it/java/org/apache/phoenix/hive/HiveMapReduceIT.java
@@ -18,6 +18,10 @@
 
 package org.apache.phoenix.hive;
 
+import static org.junit.Assert.fail;
+
+import java.util.Map;
+
 import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
 import org.junit.BeforeClass;
 import org.junit.experimental.categories.Category;
@@ -28,6 +32,10 @@ public class HiveMapReduceIT extends HivePhoenixStoreIT {
 
 @BeforeClass
 public static void setUpBeforeClass() throws Exception {
+final String hadoopConfDir = System.getenv("HADOOP_CONF_DIR");
+if (hadoopConfDir != null && hadoopConfDir.length() != 0) {
+fail("HADOOP_CONF_DIR is non-empty in the current shell 
environment which will very likely cause this test to fail.");
+}
 setup(HiveTestUtil.MiniClusterType.mr);
 }
 }



[02/13] phoenix git commit: PHOENIX-4773 Move HTable rollback wrapper into Tephra TAL method

2018-10-17 Thread pboado
PHOENIX-4773 Move HTable rollback wrapper into Tephra TAL method


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/0718a87b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/0718a87b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/0718a87b

Branch: refs/heads/4.x-cdh5.15
Commit: 0718a87b7f6e9cf1c65fc28141c276ca2713a499
Parents: 4f0929f
Author: James Taylor 
Authored: Tue Jun 5 04:27:36 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 21:25:04 2018 +0100

--
 .../apache/phoenix/cache/ServerCacheClient.java |  21 +-
 .../apache/phoenix/execute/HashJoinPlan.java|   7 +-
 .../apache/phoenix/execute/MutationState.java   | 190 ++-
 .../PhoenixTxIndexMutationGenerator.java|  42 
 .../phoenix/index/IndexMetaDataCacheClient.java |  67 ++-
 .../apache/phoenix/join/HashCacheClient.java|   5 +-
 .../transaction/OmidTransactionContext.java |   3 +-
 .../transaction/PhoenixTransactionContext.java  |   5 +-
 .../transaction/TephraTransactionContext.java   |  91 -
 .../java/org/apache/phoenix/util/IndexUtil.java |   8 +
 10 files changed, 230 insertions(+), 209 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/0718a87b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java 
b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
index 68de747..5e284bd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
@@ -70,7 +70,6 @@ import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTable.IndexType;
-import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.util.Closeables;
 import org.apache.phoenix.util.SQLCloseable;
 import org.apache.phoenix.util.SQLCloseables;
@@ -90,7 +89,7 @@ public class ServerCacheClient {
 private static final Random RANDOM = new Random();
public static final String HASH_JOIN_SERVER_CACHE_RESEND_PER_SERVER = 
"hash.join.server.cache.resend.per.server";
 private final PhoenixConnection connection;
-private final Map cacheUsingTableRefMap = new 
ConcurrentHashMap();
+private final Map cacheUsingTableMap = new 
ConcurrentHashMap();
 
 /**
  * Construct client used to create a serialized cached snapshot of a table 
and send it to each region server
@@ -220,12 +219,12 @@ public class ServerCacheClient {
 }
 
 public ServerCache addServerCache(ScanRanges keyRanges, final 
ImmutableBytesWritable cachePtr, final byte[] txState,
-final ServerCacheFactory cacheFactory, final TableRef 
cacheUsingTableRef) throws SQLException {
-return addServerCache(keyRanges, cachePtr, txState, cacheFactory, 
cacheUsingTableRef, false);
+final ServerCacheFactory cacheFactory, final PTable 
cacheUsingTable) throws SQLException {
+return addServerCache(keyRanges, cachePtr, txState, cacheFactory, 
cacheUsingTable, false);
 }
 
 public ServerCache addServerCache(ScanRanges keyRanges, final 
ImmutableBytesWritable cachePtr, final byte[] txState,
-final ServerCacheFactory cacheFactory, final TableRef 
cacheUsingTableRef, boolean storeCacheOnClient)
+final ServerCacheFactory cacheFactory, final PTable 
cacheUsingTable, boolean storeCacheOnClient)
 throws SQLException {
 ConnectionQueryServices services = connection.getQueryServices();
 List closeables = new ArrayList();
@@ -241,7 +240,6 @@ public class ServerCacheClient {
 ExecutorService executor = services.getExecutor();
 List> futures = Collections.emptyList();
 try {
-final PTable cacheUsingTable = cacheUsingTableRef.getTable();
 List locations = 
services.getAllTableRegions(cacheUsingTable.getPhysicalName().getBytes());
 int nRegions = locations.size();
 // Size these based on worst case
@@ -258,7 +256,7 @@ public class ServerCacheClient {
 servers.add(entry);
 if (LOG.isDebugEnabled()) 
{LOG.debug(addCustomAnnotations("Adding cache entry to be sent for " + entry, 
connection));}
 final byte[] key = 
getKeyInRegion(entry.getRegionInfo().getStartKey());
-final HTableInterface htable = 
services.getTable(cacheUsingTableRef.getTable().getPhysicalName().getBytes());
+final HTableInterface htable = 

[05/13] phoenix git commit: PHOENIX-4544 Update statistics inconsistent behavior

2018-10-17 Thread pboado
PHOENIX-4544 Update statistics inconsistent behavior


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/39a77859
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/39a77859
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/39a77859

Branch: refs/heads/4.x-cdh5.15
Commit: 39a77859087ba2c1b4f9a1f940e30a0b9e330746
Parents: 351b37d
Author: Ankit Singhal 
Authored: Thu Jun 7 19:23:56 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 21:25:14 2018 +0100

--
 .../StatisticsCollectionRunTrackerIT.java   | 32 --
 .../UngroupedAggregateRegionObserver.java   |  4 +-
 .../apache/phoenix/schema/MetaDataClient.java   | 10 -
 .../stats/StatisticsCollectionRunTracker.java   | 45 +---
 .../java/org/apache/phoenix/util/ByteUtil.java  | 16 ++-
 5 files changed, 85 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/39a77859/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
index cf475f9..a643383 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/coprocessor/StatisticsCollectionRunTrackerIT.java
@@ -25,6 +25,8 @@ import static org.junit.Assert.assertTrue;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
+import java.util.Arrays;
+import java.util.HashSet;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
@@ -61,12 +63,15 @@ public class StatisticsCollectionRunTrackerIT extends 
ParallelStatsEnabledIT {
 StatisticsCollectionRunTracker tracker =
 StatisticsCollectionRunTracker.getInstance(new 
Configuration());
 // assert that the region wasn't added to the tracker
-assertTrue(tracker.addUpdateStatsCommandRegion(regionInfo));
+assertTrue(tracker.addUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("0");
+assertTrue(tracker.addUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("L#0");
 // assert that removing the region from the tracker works
-assertTrue(tracker.removeUpdateStatsCommandRegion(regionInfo));
+assertTrue(tracker.removeUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("0");
+assertTrue(tracker.removeUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("L#0");
 runUpdateStats(tableName);
 // assert that after update stats is complete, tracker isn't tracking 
the region any more
-assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo));
+assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("0");
+assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo, new 
HashSet(Arrays.asList(Bytes.toBytes("L#0");;
 }
 
 @Test
@@ -103,25 +108,27 @@ public class StatisticsCollectionRunTrackerIT extends 
ParallelStatsEnabledIT {
 HRegionInfo regionInfo = createTableAndGetRegion(tableName);
 // simulate stats collection via major compaction by marking the 
region as compacting in the tracker
 markRegionAsCompacting(regionInfo);
-Assert.assertEquals("Row count didn't match", 
COMPACTION_UPDATE_STATS_ROW_COUNT, runUpdateStats(tableName));
-StatisticsCollectionRunTracker tracker =
+// there will be no update for local index and a table , so checking 2 
* COMPACTION_UPDATE_STATS_ROW_COUNT
+Assert.assertEquals("Row count didn't match", 
COMPACTION_UPDATE_STATS_ROW_COUNT * 2, runUpdateStats(tableName));
 StatisticsCollectionRunTracker.getInstance(new 
Configuration());
 // assert that the tracker state was cleared.
-assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo));
+HashSet familyMap = new 
HashSet(Arrays.asList(Bytes.toBytes("0")));
+assertFalse(tracker.removeUpdateStatsCommandRegion(regionInfo, 
familyMap));
 }
 
 @Test
 public void testUpdateStatsPreventsAnotherUpdateStatsFromRunning() throws 
Exception {
 String tableName = fullTableName;
 HRegionInfo regionInfo = createTableAndGetRegion(tableName);
-markRunningUpdateStats(regionInfo);
-Assert.assertEquals("Row count didn't match", 
CONCURRENT_UPDATE_STATS_ROW_COUNT,
-

[07/13] phoenix git commit: PHOENIX-4789 Exception when setting TTL on Tephra transactional table

2018-10-17 Thread pboado
PHOENIX-4789 Exception when setting TTL on Tephra transactional table


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d50f3e34
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d50f3e34
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d50f3e34

Branch: refs/heads/4.x-cdh5.15
Commit: d50f3e343a2212d655b5e45f99f00a6a551bb396
Parents: 2c00806
Author: James Taylor 
Authored: Mon Jun 18 14:00:02 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 21:26:15 2018 +0100

--
 .../org/apache/phoenix/tx/TransactionIT.java| 30 
 .../query/ConnectionQueryServicesImpl.java  |  3 ++
 2 files changed, 33 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d50f3e34/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
--
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
index c0ec6b8..12c3b7a 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
@@ -25,6 +25,7 @@ import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.IOException;
 import java.sql.Connection;
 import java.sql.DatabaseMetaData;
 import java.sql.DriverManager;
@@ -38,6 +39,9 @@ import java.util.Properties;
 
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
 import org.apache.phoenix.exception.SQLExceptionCode;
@@ -54,6 +58,7 @@ import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.StringUtil;
 import org.apache.phoenix.util.TestUtil;
+import org.apache.tephra.TxConstants;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -370,4 +375,29 @@ public class TransactionIT  extends 
ParallelStatsDisabledIT {
 conn.close();
 }
 }
+
+private static void assertTTL(Admin admin, String tableName, int ttl) 
throws TableNotFoundException, IOException {
+HTableDescriptor tableDesc = 
admin.getTableDescriptor(TableName.valueOf(tableName));
+for (HColumnDescriptor colDesc : tableDesc.getFamilies()) {
+
assertEquals(ttl,Integer.parseInt(colDesc.getValue(TxConstants.PROPERTY_TTL)));
+
assertEquals(HColumnDescriptor.DEFAULT_TTL,colDesc.getTimeToLive());
+}
+}
+
+@Test
+public void testSetTTL() throws Exception {
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+TransactionFactory.Provider txProvider = 
TransactionFactory.Provider.valueOf(this.txProvider);
+try (Connection conn = DriverManager.getConnection(getUrl(), props); 
Admin admin = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
+String tableName = generateUniqueName();
+conn.createStatement().execute("CREATE TABLE " + tableName + 
+"(K VARCHAR PRIMARY KEY) 
TRANSACTIONAL=true,TRANSACTION_PROVIDER='" + txProvider + "',TTL=100");
+assertTTL(admin, tableName, 100);
+tableName = generateUniqueName();
+conn.createStatement().execute("CREATE TABLE " + tableName + 
+"(K VARCHAR PRIMARY KEY) 
TRANSACTIONAL=true,TRANSACTION_PROVIDER='" + txProvider + "'");
+conn.createStatement().execute("ALTER TABLE " + tableName + " SET 
TTL=" + 200);
+assertTTL(admin, tableName, 200);
+}
+}
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d50f3e34/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index bfaffbd..f8ca7a4 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -2234,6 +2234,8 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 Map props = entry.getValue();
 if (props == null) {
  

[08/13] phoenix git commit: PHOENIX-4785 Unable to write to table if index is made active during retry

2018-10-17 Thread pboado
PHOENIX-4785 Unable to write to table if index is made active during retry


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3da21d5e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3da21d5e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3da21d5e

Branch: refs/heads/4.x-cdh5.15
Commit: 3da21d5e45b0aceb683eb01867bbe3a8fef0abb1
Parents: d50f3e3
Author: Ankit Singhal 
Authored: Fri Jun 22 00:13:50 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 21:26:17 2018 +0100

--
 .../end2end/index/MutableIndexFailureIT.java| 128 ++-
 .../MutableIndexFailureWithNamespaceIT.java |  80 
 .../coprocessor/MetaDataEndpointImpl.java   |  30 +
 .../index/PhoenixIndexFailurePolicy.java|  71 +-
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   2 +-
 5 files changed, 276 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/3da21d5e/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
index 715e37f..aac20ac 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
@@ -28,10 +28,16 @@ import java.sql.Connection;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
@@ -104,10 +110,10 @@ public class MutableIndexFailureIT extends BaseTest {
 private final boolean throwIndexWriteFailure;
 private String schema = generateUniqueName();
 private List exceptions = Lists.newArrayList();
-private static RegionCoprocessorEnvironment 
indexRebuildTaskRegionEnvironment;
-private static final int forwardOverlapMs = 1000;
-private static final int disableTimestampThresholdMs = 1;
-private static final int numRpcRetries = 2;
+protected static RegionCoprocessorEnvironment 
indexRebuildTaskRegionEnvironment;
+protected static final int forwardOverlapMs = 1000;
+protected static final int disableTimestampThresholdMs = 1;
+protected static final int numRpcRetries = 2;
 
 public MutableIndexFailureIT(boolean transactional, boolean localIndex, 
boolean isNamespaceMapped, Boolean disableIndexOnWriteFailure, boolean 
failRebuildTask, Boolean throwIndexWriteFailure) {
 this.transactional = transactional;
@@ -127,6 +133,23 @@ public class MutableIndexFailureIT extends BaseTest {
 
 @BeforeClass
 public static void doSetup() throws Exception {
+Map serverProps = getServerProps();
+Map clientProps = Maps.newHashMapWithExpectedSize(2);
+clientProps.put(HConstants.HBASE_CLIENT_RETRIES_NUMBER, "2");
+NUM_SLAVES_BASE = 4;
+setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), 
new ReadOnlyProps(clientProps.entrySet().iterator()));
+indexRebuildTaskRegionEnvironment =
+(RegionCoprocessorEnvironment) getUtility()
+.getRSForFirstRegionInTable(
+
PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)
+
.getOnlineRegions(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)
+.get(0).getCoprocessorHost()
+
.findCoprocessorEnvironment(MetaDataRegionObserver.class.getName());
+MetaDataRegionObserver.initRebuildIndexConnectionProps(
+indexRebuildTaskRegionEnvironment.getConfiguration());
+}
+
+protected static Map getServerProps(){
 Map serverProps = Maps.newHashMapWithExpectedSize(10);
 serverProps.put("hbase.coprocessor.region.classes", 
FailingRegionObserver.class.getName());
 serverProps.put(HConstants.HBASE_RPC_TIMEOUT_KEY, "1");
@@ -142,19 +165,7 @@ public class MutableIndexFailureIT extends BaseTest {
  * because we want to control it's execution ourselves
  */
 serverProps.put(QueryServices.INDEX_REBUILD_TASK_INITIAL_DELAY, 
Long.toString(Long.MAX_VALUE));
-Map clientProps = 

[12/13] phoenix git commit: PHOENIX-3383 Comparison between descending row keys used in RVC is reverse

2018-10-17 Thread pboado
PHOENIX-3383 Comparison between descending row keys used in RVC is reverse


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/81195083
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/81195083
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/81195083

Branch: refs/heads/4.x-cdh5.15
Commit: 811950835e301a43a5e22ebc1e0c1b3656473285
Parents: ee30556
Author: James Taylor 
Authored: Fri Jul 6 05:38:28 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 21:26:21 2018 +0100

--
 .../org/apache/phoenix/end2end/QueryMoreIT.java |   12 +-
 .../org/apache/phoenix/compile/KeyPart.java |2 -
 .../org/apache/phoenix/compile/ScanRanges.java  |   77 +-
 .../apache/phoenix/compile/WhereOptimizer.java  | 1304 +-
 .../PhoenixTxIndexMutationGenerator.java|2 +-
 .../expression/function/FunctionExpression.java |   10 +-
 .../expression/function/InvertFunction.java |   19 +-
 .../expression/function/PrefixFunction.java |6 +-
 .../expression/function/RTrimFunction.java  |6 +-
 .../function/RoundDateExpression.java   |   22 +-
 .../function/RoundDecimalExpression.java|7 +-
 .../phoenix/iterate/BaseResultIterators.java|4 +-
 .../apache/phoenix/iterate/ExplainTable.java|   10 -
 .../java/org/apache/phoenix/query/KeyRange.java |   28 +-
 .../org/apache/phoenix/schema/RowKeySchema.java |   78 ++
 .../phoenix/compile/QueryCompilerTest.java  |2 +-
 .../phoenix/compile/QueryOptimizerTest.java |5 +-
 .../TenantSpecificViewIndexCompileTest.java |8 +-
 .../phoenix/compile/WhereOptimizerTest.java |  359 -
 .../RoundFloorCeilExpressionsTest.java  |   59 +-
 .../apache/phoenix/query/KeyRangeClipTest.java  |2 +-
 .../org/apache/phoenix/query/QueryPlanTest.java |8 +-
 .../apache/phoenix/schema/RowKeySchemaTest.java |   48 +
 23 files changed, 1567 insertions(+), 511 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/81195083/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
index 9109c12..04272fa 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
@@ -372,9 +372,6 @@ public class QueryMoreIT extends ParallelStatsDisabledIT {
 }
 }
 
-// FIXME: this repros PHOENIX-3382, but turned up two more issues:
-// 1) PHOENIX-3383 Comparison between descending row keys used in RVC is 
reverse
-// 2) PHOENIX-3384 Optimize RVC expressions for non leading row key columns
 @Test
 public void testRVCOnDescWithLeadingPKEquality() throws Exception {
 final Connection conn = DriverManager.getConnection(getUrl());
@@ -398,14 +395,11 @@ public class QueryMoreIT extends ParallelStatsDisabledIT {
 conn.createStatement().execute("UPSERT INTO " + fullTableName + " 
VALUES ('org1',1,'02')");
 conn.commit();
 
-// FIXME: PHOENIX-3383
-// This comparison is really backwards: it should be (score, 
entity_id) < (2, '04'),
-// but because we're matching a descending key, our comparison has to 
be switched.
 try (Statement stmt = conn.createStatement()) {
 final ResultSet rs = stmt.executeQuery("SELECT entity_id, score\n" 
+ 
 "FROM " + fullTableName + "\n" + 
 "WHERE organization_id = 'org1'\n" + 
-"AND (score, entity_id) > (2, '04')\n" + 
+"AND (score, entity_id) < (2, '04')\n" + 
 "ORDER BY score DESC, entity_id DESC\n" + 
 "LIMIT 3");
 assertTrue(rs.next());
@@ -416,13 +410,11 @@ public class QueryMoreIT extends ParallelStatsDisabledIT {
 assertEquals(1.0, rs.getDouble(2), 0.001);
 assertFalse(rs.next());
 }
-// FIXME: PHOENIX-3384
-// It should not be necessary to specify organization_id in this query
 try (Statement stmt = conn.createStatement()) {
 final ResultSet rs = stmt.executeQuery("SELECT entity_id, score\n" 
+ 
 "FROM " + fullTableName + "\n" + 
 "WHERE organization_id = 'org1'\n" + 
-"AND (organization_id, score, entity_id) > ('org1', 2, 
'04')\n" + 
+"AND (organization_id, score, entity_id) < ('org1', 2, 
'04')\n" + 
 "ORDER BY score DESC, entity_id DESC\n" + 
 "LIMIT 3");
 assertTrue(rs.next());


[phoenix] Git Push Summary

2018-10-17 Thread pboado
Repository: phoenix
Updated Branches:
  refs/heads/4.x-cdh5.12 [deleted] 5356476ae


[phoenix] Git Push Summary

2018-10-17 Thread pboado
Repository: phoenix
Updated Branches:
  refs/heads/4.x-cdh5.14 [deleted] fd9342328


[phoenix] Git Push Summary

2018-10-17 Thread pboado
Repository: phoenix
Updated Branches:
  refs/heads/4.x-cdh5.13 [deleted] b8127085d


phoenix git commit: Changes for CDH 5.15.x

2018-10-17 Thread pboado
Repository: phoenix
Updated Branches:
  refs/heads/4.x-cdh5.15 [created] 1f7b56192


Changes for CDH 5.15.x


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1f7b5619
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1f7b5619
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1f7b5619

Branch: refs/heads/4.x-cdh5.15
Commit: 1f7b56192d1f701d65d7b14c825e065173747c85
Parents: 0a06e59
Author: Pedro Boado 
Authored: Wed Oct 17 21:19:38 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 21:19:38 2018 +0100

--
 phoenix-assembly/pom.xml|  2 +-
 phoenix-client/pom.xml  |  2 +-
 phoenix-core/pom.xml|  2 +-
 .../hadoop/hbase/ipc/PhoenixRpcScheduler.java   | 34 ++--
 .../apache/phoenix/execute/DelegateHTable.java  | 19 +++
 .../transaction/TephraTransactionContext.java   | 21 
 phoenix-flume/pom.xml   |  2 +-
 phoenix-hive/pom.xml|  2 +-
 phoenix-kafka/pom.xml   |  2 +-
 phoenix-load-balancer/pom.xml   |  2 +-
 phoenix-parcel/pom.xml  |  4 +--
 phoenix-pherf/pom.xml   |  2 +-
 phoenix-pig/pom.xml |  2 +-
 phoenix-queryserver-client/pom.xml  |  2 +-
 phoenix-queryserver/pom.xml |  2 +-
 phoenix-server/pom.xml  |  2 +-
 phoenix-spark/pom.xml   |  2 +-
 phoenix-tracing-webapp/pom.xml  |  2 +-
 pom.xml |  4 +--
 19 files changed, 90 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f7b5619/phoenix-assembly/pom.xml
--
diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index 7d98c25..bce2089 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.0-cdh5.11.2
+4.15.0-cdh5.15.1
   
   phoenix-assembly
   Phoenix Assembly

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f7b5619/phoenix-client/pom.xml
--
diff --git a/phoenix-client/pom.xml b/phoenix-client/pom.xml
index fe5836b..83c7ad9 100644
--- a/phoenix-client/pom.xml
+++ b/phoenix-client/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.0-cdh5.11.2
+4.15.0-cdh5.15.1
   
   phoenix-client
   Phoenix Client

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f7b5619/phoenix-core/pom.xml
--
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 8b09c67..c20c89c 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -4,7 +4,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.0-cdh5.11.2
+4.15.0-cdh5.15.1
   
   phoenix-core
   Phoenix Core

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f7b5619/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
index 4fdddf5..d1f05f8 100644
--- 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
+++ 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcScheduler.java
@@ -124,6 +124,36 @@ public class PhoenixRpcScheduler extends RpcScheduler {
 public void setMetadataExecutorForTesting(RpcExecutor executor) {
 this.metadataCallExecutor = executor;
 }
-
-
+
+@Override
+public int getReadQueueLength() {
+return delegate.getReadQueueLength();
+}
+
+@Override
+public int getWriteQueueLength() {
+return delegate.getWriteQueueLength();
+}
+
+@Override
+public int getScanQueueLength() {
+return delegate.getScanQueueLength();
+}
+
+@Override
+public int getActiveReadRpcHandlerCount() {
+return delegate.getActiveReadRpcHandlerCount();
+}
+
+@Override
+public int getActiveWriteRpcHandlerCount() {
+return delegate.getActiveWriteRpcHandlerCount();
+}
+
+@Override
+public int getActiveScanRpcHandlerCount() {
+return delegate.getActiveScanRpcHandlerCount();
+}
+
+
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f7b5619/phoenix-core/src/main/java/org/apache/phoenix/execute/DelegateHTable.java

[20/32] phoenix git commit: PHOENIX-4818 Fix RAT check for missing licenses

2018-10-17 Thread pboado
PHOENIX-4818 Fix RAT check for missing licenses


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/694d0d56
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/694d0d56
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/694d0d56

Branch: refs/heads/4.14-cdh5.14
Commit: 694d0d566461d26d7fb1168e63d745c543e1f309
Parents: 3420a81
Author: Vincent Poon 
Authored: Sun Jul 22 22:19:15 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:42:29 2018 +0100

--
 .../apache/phoenix/query/ConnectionCachingIT.java  | 17 +
 1 file changed, 17 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/694d0d56/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
index b2ef052..d1dda04 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.phoenix.query;
 
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;



[17/32] phoenix git commit: Revert "PHOENIX-4790 Simplify check for client side delete"

2018-10-17 Thread pboado
Revert "PHOENIX-4790 Simplify check for client side delete"

This reverts commit 5cc9a25a185e596a39e4f2916f90b4c576f4f82f.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ab177a7c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ab177a7c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ab177a7c

Branch: refs/heads/4.14-cdh5.14
Commit: ab177a7c9425f9395ad28d8a10724edb8f91aaa9
Parents: 56f765c
Author: James Taylor 
Authored: Fri Jul 13 04:01:41 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:42:19 2018 +0100

--
 .../apache/phoenix/compile/DeleteCompiler.java  | 24 
 1 file changed, 19 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ab177a7c/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index 78b2db9..5f9c76c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -46,6 +46,7 @@ import org.apache.phoenix.execute.AggregatePlan;
 import org.apache.phoenix.execute.MutationState;
 import org.apache.phoenix.execute.MutationState.MultiRowMutationState;
 import org.apache.phoenix.execute.MutationState.RowMutationState;
+import org.apache.phoenix.filter.SkipScanFilter;
 import org.apache.phoenix.hbase.index.ValueGetter;
 import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
@@ -480,7 +481,6 @@ public class DeleteCompiler {
 projectedColumns.add(column);
 aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(null, 
'"' + column.getName().getString() + '"', null)));
 }
-boolean noQueryReqd = true;
 // Project all non PK indexed columns so that we can do the proper 
index maintenance
 for (PTable index : table.getIndexes()) {
 IndexMaintainer maintainer = index.getIndexMaintainer(table, 
connection);
@@ -492,8 +492,6 @@ public class DeleteCompiler {
 boolean hasNoColumnFamilies = 
table.getColumnFamilies().isEmpty();
 PColumn column = hasNoColumnFamilies ? 
table.getColumnForColumnName(columnName) : 
table.getColumnFamily(familyName).getPColumnForColumnName(columnName);
 if(!projectedColumns.contains(column)) {
-// We must run a query if any index contains a non pk 
column
-noQueryReqd = false;
 projectedColumns.add(column);
 aliasedNodes.add(FACTORY.aliasedNode(null, 
FACTORY.column(hasNoColumnFamilies ? null : TableName.create(null, familyName), 
'"' + columnName + '"', null)));
 }
@@ -513,7 +511,7 @@ public class DeleteCompiler {
 select = StatementNormalizer.normalize(transformedSelect, 
resolverToBe);
 }
 final boolean hasPreOrPostProcessing = hasPreProcessing || 
hasPostProcessing;
-noQueryReqd &= !hasPreOrPostProcessing;
+boolean noQueryReqd = !hasPreOrPostProcessing;
 // No limit and no sub queries, joins, etc in where clause
 // Can't run on same server for transactional data, as we need the row 
keys for the data
 // that is being upserted for conflict detection purposes.
@@ -552,8 +550,24 @@ public class DeleteCompiler {
 }
 
 runOnServer &= queryPlans.get(0).getTableRef().getTable().getType() != 
PTableType.INDEX;
-
+
+// We need to have all indexed columns available in all immutable 
indexes in order
+// to generate the delete markers from the query. We also cannot have 
any filters
+// except for our SkipScanFilter for point lookups.
+// A simple check of the non existence of a where clause in the parse 
node is not sufficient, as the where clause
+// may have been optimized out. Instead, we check that there's a 
single SkipScanFilter
+// If we can generate a plan for every index, that means all the 
required columns are available in every index,
+// hence we can drive the delete from any of the plans.
 noQueryReqd &= queryPlans.size() == 1 + clientSideIndexes.size();
+int queryPlanIndex = 0;
+while (noQueryReqd && queryPlanIndex < queryPlans.size()) {
+QueryPlan plan = queryPlans.get(queryPlanIndex++);
+StatementContext context = plan.getContext();
+noQueryReqd &= (!context.getScan().hasFilter()
+

[32/32] phoenix git commit: PHOENIX-4979 Fix IndexToolForPartialBuildWithNamespaceEnabledIT for branches <= HBase 1.2

2018-10-17 Thread pboado
PHOENIX-4979 Fix IndexToolForPartialBuildWithNamespaceEnabledIT for branches <= 
HBase 1.2


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/af474b93
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/af474b93
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/af474b93

Branch: refs/heads/4.14-cdh5.14
Commit: af474b93786a786e7fbc1e0eb5b872d3e34f0e4b
Parents: f3a279f
Author: Vincent Poon 
Authored: Wed Oct 17 01:41:26 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:45:04 2018 +0100

--
 .../apache/phoenix/end2end/IndexToolForPartialBuildIT.java| 4 +++-
 .../IndexToolForPartialBuildWithNamespaceEnabledIT.java   | 7 +--
 2 files changed, 8 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/af474b93/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForPartialBuildIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForPartialBuildIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForPartialBuildIT.java
index 85209b2..51ccabd 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForPartialBuildIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForPartialBuildIT.java
@@ -107,7 +107,9 @@ public class IndexToolForPartialBuildIT extends 
BaseOwnClusterIT {
 @BeforeClass
 public static void doSetup() throws Exception {
 Map serverProps = getServerProperties();
-setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), 
ReadOnlyProps.EMPTY_PROPS);
+Map clientProps = Maps.newHashMapWithExpectedSize(2);
+clientProps.put(HConstants.HBASE_CLIENT_RETRIES_NUMBER, "2");
+setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), 
new ReadOnlyProps(clientProps.entrySet().iterator()));
 }
 
 @Parameters(name="localIndex = {0}")

http://git-wip-us.apache.org/repos/asf/phoenix/blob/af474b93/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForPartialBuildWithNamespaceEnabledIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForPartialBuildWithNamespaceEnabledIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForPartialBuildWithNamespaceEnabledIT.java
index a8c1f1e..aae51ca 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForPartialBuildWithNamespaceEnabledIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForPartialBuildWithNamespaceEnabledIT.java
@@ -21,6 +21,7 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.Map;
 
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.junit.BeforeClass;
@@ -46,13 +47,15 @@ public class IndexToolForPartialBuildWithNamespaceEnabledIT 
extends IndexToolFor
 serverProps.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true");
 Map clientProps = Maps.newHashMapWithExpectedSize(1);
 clientProps.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true");
+clientProps.put(HConstants.HBASE_CLIENT_RETRIES_NUMBER, "2");
 setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), 
new ReadOnlyProps(clientProps.entrySet().iterator()));
 }
 
 @Parameters(name="localIndex = {0} , isNamespaceEnabled = {1}")
 public static Collection data() {
-return Arrays.asList(new Boolean[][] { 
- { false, true},{ true, false }
+return Arrays.asList(new Boolean[][] {
+ { false, true},
+ { true, true }
});
 }
 



[29/32] phoenix git commit: PHOENIX-4957 Add NO_INDEX hint so IndexTool uses the data table instead of index table

2018-10-17 Thread pboado
PHOENIX-4957 Add NO_INDEX hint so IndexTool uses the data table instead of 
index table


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/0abc9ca2
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/0abc9ca2
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/0abc9ca2

Branch: refs/heads/4.14-cdh5.13
Commit: 0abc9ca2f83d02c5f43b79a62cdf65ec4309d2a5
Parents: 81d679e
Author: Vincent Poon 
Authored: Tue Oct 9 00:38:09 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:38:41 2018 +0100

--
 .../phoenix/compile/PostIndexDDLCompiler.java   |  2 +-
 .../compile/PostIndexDDLCompilerTest.java   | 68 
 2 files changed, 69 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/0abc9ca2/phoenix-core/src/main/java/org/apache/phoenix/compile/PostIndexDDLCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/PostIndexDDLCompiler.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/PostIndexDDLCompiler.java
index b3cedf6..1bd3aed 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/PostIndexDDLCompiler.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/PostIndexDDLCompiler.java
@@ -104,7 +104,7 @@ public class PostIndexDDLCompiler {
 updateStmtStr.append("UPSERT /*+ NO_INDEX */ INTO 
").append(schemaName.length() == 0 ? "" : '"' + schemaName + 
"\".").append('"').append(tableName).append("\"(")
.append(indexColumns).append(") ");
 final StringBuilder selectQueryBuilder = new StringBuilder();
-selectQueryBuilder.append(" SELECT ").append(dataColumns).append(" 
FROM ")
+selectQueryBuilder.append(" SELECT /*+ NO_INDEX */ 
").append(dataColumns).append(" FROM ")
 .append(schemaName.length() == 0 ? "" : '"' + schemaName + 
"\".").append('"').append(dataTable.getTableName().getString()).append('"');
 this.selectQuery = selectQueryBuilder.toString();
 updateStmtStr.append(this.selectQuery);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/0abc9ca2/phoenix-core/src/test/java/org/apache/phoenix/compile/PostIndexDDLCompilerTest.java
--
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/compile/PostIndexDDLCompilerTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/PostIndexDDLCompilerTest.java
new file mode 100644
index 000..9df2fec
--- /dev/null
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/PostIndexDDLCompilerTest.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compile;
+
+import static org.junit.Assert.assertEquals;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.query.BaseConnectionlessQueryTest;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.schema.TableRef;
+import org.junit.Test;
+
+public class PostIndexDDLCompilerTest extends BaseConnectionlessQueryTest {
+
+@Test
+public void testHintInSubquery() throws Exception {
+try (Connection conn = DriverManager.getConnection(getUrl())) {
+setupTables(conn);
+PhoenixStatement stmt = 
conn.createStatement().unwrap(PhoenixStatement.class);
+String query = "UPSERT /*+ NO_INDEX */ INTO T(k, v1) SELECT /*+ 
NO_INDEX */  k,v1 FROM T WHERE v1 = '4'";
+MutationPlan plan = stmt.compileMutation(query);
+assertEquals("T", 
plan.getQueryPlan().getTableRef().getTable().getTableName().getString());
+query = "UPSERT INTO T(k, v1) SELECT /*+ NO_INDEX */  k,v1 FROM T 
WHERE v1 = '4'";
+plan = stmt.compileMutation(query);
+// TODO the following 

[21/32] phoenix git commit: Set version to 4.14.1-cdh5.13 for release

2018-10-17 Thread pboado
Set version to 4.14.1-cdh5.13 for release


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/73e6629b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/73e6629b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/73e6629b

Branch: refs/heads/4.14-cdh5.13
Commit: 73e6629b0f84c4e0f6d6f8726ca0700f61cf1a39
Parents: a026cd3
Author: Vincent Poon 
Authored: Fri Aug 3 22:38:44 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:38:14 2018 +0100

--
 phoenix-assembly/pom.xml   | 2 +-
 phoenix-client/pom.xml | 2 +-
 phoenix-core/pom.xml   | 2 +-
 phoenix-flume/pom.xml  | 2 +-
 phoenix-hive/pom.xml   | 2 +-
 phoenix-kafka/pom.xml  | 2 +-
 phoenix-load-balancer/pom.xml  | 2 +-
 phoenix-parcel/pom.xml | 4 ++--
 phoenix-pherf/pom.xml  | 2 +-
 phoenix-pig/pom.xml| 2 +-
 phoenix-queryserver-client/pom.xml | 2 +-
 phoenix-queryserver/pom.xml| 2 +-
 phoenix-server/pom.xml | 2 +-
 phoenix-spark/pom.xml  | 2 +-
 phoenix-tracing-webapp/pom.xml | 2 +-
 pom.xml| 2 +-
 16 files changed, 17 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/73e6629b/phoenix-assembly/pom.xml
--
diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index 560c1c0..b0ff1a7 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.0-cdh5.13.2
+4.14.1-cdh5.13.2
   
   phoenix-assembly
   Phoenix Assembly

http://git-wip-us.apache.org/repos/asf/phoenix/blob/73e6629b/phoenix-client/pom.xml
--
diff --git a/phoenix-client/pom.xml b/phoenix-client/pom.xml
index 6ed997e..84bb712 100644
--- a/phoenix-client/pom.xml
+++ b/phoenix-client/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.0-cdh5.13.2
+4.14.1-cdh5.13.2
   
   phoenix-client
   Phoenix Client

http://git-wip-us.apache.org/repos/asf/phoenix/blob/73e6629b/phoenix-core/pom.xml
--
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 753b5a9..6f36158 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -4,7 +4,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.0-cdh5.13.2
+4.14.1-cdh5.13.2
   
   phoenix-core
   Phoenix Core

http://git-wip-us.apache.org/repos/asf/phoenix/blob/73e6629b/phoenix-flume/pom.xml
--
diff --git a/phoenix-flume/pom.xml b/phoenix-flume/pom.xml
index 1f2e8c4..e6152b0 100644
--- a/phoenix-flume/pom.xml
+++ b/phoenix-flume/pom.xml
@@ -26,7 +26,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.0-cdh5.13.2
+4.14.1-cdh5.13.2
   
   phoenix-flume
   Phoenix - Flume

http://git-wip-us.apache.org/repos/asf/phoenix/blob/73e6629b/phoenix-hive/pom.xml
--
diff --git a/phoenix-hive/pom.xml b/phoenix-hive/pom.xml
index 8de911d..045538d 100644
--- a/phoenix-hive/pom.xml
+++ b/phoenix-hive/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.0-cdh5.13.2
+4.14.1-cdh5.13.2
   
   phoenix-hive
   Phoenix - Hive

http://git-wip-us.apache.org/repos/asf/phoenix/blob/73e6629b/phoenix-kafka/pom.xml
--
diff --git a/phoenix-kafka/pom.xml b/phoenix-kafka/pom.xml
index 0d99b22..fde8835 100644
--- a/phoenix-kafka/pom.xml
+++ b/phoenix-kafka/pom.xml
@@ -26,7 +26,7 @@

org.apache.phoenix
phoenix
-   4.14.0-cdh5.13.2
+   4.14.1-cdh5.13.2

phoenix-kafka
Phoenix - Kafka

http://git-wip-us.apache.org/repos/asf/phoenix/blob/73e6629b/phoenix-load-balancer/pom.xml
--
diff --git a/phoenix-load-balancer/pom.xml b/phoenix-load-balancer/pom.xml
index 6b4be37..498199e 100644
--- a/phoenix-load-balancer/pom.xml
+++ b/phoenix-load-balancer/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.0-cdh5.13.2
+4.14.1-cdh5.13.2
   
   phoenix-load-balancer
   Phoenix Load Balancer

http://git-wip-us.apache.org/repos/asf/phoenix/blob/73e6629b/phoenix-parcel/pom.xml
--
diff --git a/phoenix-parcel/pom.xml b/phoenix-parcel/pom.xml
index 41c782e..5946cb8 100644
--- a/phoenix-parcel/pom.xml
+++ b/phoenix-parcel/pom.xml
@@ -27,7 +27,7 @@
   
 

[23/32] phoenix git commit: PHOENIX-4843 InListExpression toString() converts the values in the list to ASC sort order always

2018-10-17 Thread pboado
PHOENIX-4843 InListExpression toString() converts the values in the list to ASC 
sort order always


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7aa63af2
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7aa63af2
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7aa63af2

Branch: refs/heads/4.14-cdh5.13
Commit: 7aa63af2205f168380bcb7a1f97b1562b512f375
Parents: d9c80f8
Author: Thomas D'Silva 
Authored: Fri Aug 10 01:33:09 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:38:24 2018 +0100

--
 .../org/apache/phoenix/end2end/InListIT.java| 72 
 .../phoenix/expression/InListExpression.java| 11 ++-
 2 files changed, 80 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/7aa63af2/phoenix-core/src/it/java/org/apache/phoenix/end2end/InListIT.java
--
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/InListIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/InListIT.java
index fe88dc8..2820fdd 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/InListIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/InListIT.java
@@ -18,6 +18,7 @@
 package org.apache.phoenix.end2end;
 
 import static java.util.Collections.singletonList;
+import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -27,6 +28,7 @@ import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.sql.Statement;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
@@ -46,6 +48,8 @@ import com.google.common.collect.Lists;
 
 
 public class InListIT extends ParallelStatsDisabledIT {
+
+private final String TENANT_SPECIFIC_URL1 = getUrl() + ';' + 
TENANT_ID_ATTRIB + "=tenant1";
 
 @Test
 public void testLeadingPKWithTrailingRVC() throws Exception {
@@ -481,5 +485,73 @@ public class InListIT extends ParallelStatsDisabledIT {
 
 conn.close();
 }
+
+@Test
+public void testInListExpressionWithDesc() throws Exception {
+String fullTableName = generateUniqueName();
+String fullViewName = generateUniqueName();
+String tenantView = generateUniqueName();
+// create base table and global view using global connection
+try (Connection conn = DriverManager.getConnection(getUrl())) {
+Statement stmt = conn.createStatement();
+stmt.execute("CREATE TABLE " + fullTableName + "(\n" + 
+"TENANT_ID CHAR(15) NOT NULL,\n" + 
+"KEY_PREFIX CHAR(3) NOT NULL,\n" + 
+"CREATED_DATE DATE,\n" + 
+"CREATED_BY CHAR(15),\n" + 
+"SYSTEM_MODSTAMP DATE\n" + 
+"CONSTRAINT PK PRIMARY KEY (\n" + 
+"   TENANT_ID," + 
+"   KEY_PREFIX" + 
+")) MULTI_TENANT=TRUE");
+
+stmt.execute("CREATE VIEW " + fullViewName + "(\n" + 
+"MODEL VARCHAR NOT NULL,\n" + 
+"MILEAGE  BIGINT NOT NULL,\n" +  
+"MILES_DRIVEN BIGINT NOT NULL,\n" + 
+"MAKE VARCHAR,\n" + 
+"CONSTRAINT PKVIEW PRIMARY KEY\n" + 
+"(\n" + 
+"MODEL, MILEAGE DESC, MILES_DRIVEN\n" + 
+")) AS SELECT * FROM " + fullTableName + " WHERE 
KEY_PREFIX = '0CY'");
+
+}
+
+// create and use a tenant specific view to write data
+try (Connection viewConn = 
DriverManager.getConnection(TENANT_SPECIFIC_URL1) ) { 
+Statement stmt = viewConn.createStatement();
+stmt.execute("CREATE VIEW IF NOT EXISTS " + tenantView + " AS 
SELECT * FROM " + fullViewName );
+viewConn.createStatement().execute("UPSERT INTO " + tenantView + 
"(CREATED_BY, CREATED_DATE, SYSTEM_MODSTAMP, MODEL, MILEAGE, MILES_DRIVEN, 
MAKE) VALUES ('005xx01Sv6o', 1532458254819, 1532458254819, 'a5', 23, 1, 
'AUDI')");
+viewConn.createStatement().execute("UPSERT INTO " + tenantView + 
"(CREATED_BY, CREATED_DATE, SYSTEM_MODSTAMP, MODEL, MILEAGE, MILES_DRIVEN, 
MAKE) VALUES ('005xx01Sv6o', 1532458254819, 1532458254819, 'a4', 27, 3, 
'AUDI')");
+viewConn.createStatement().execute("UPSERT INTO " + tenantView + 
"(CREATED_BY, CREATED_DATE, SYSTEM_MODSTAMP, MODEL, MILEAGE, MILES_DRIVEN, 

[19/32] phoenix git commit: PHOENIX-4797 file not found or file exist exception when create global index use -snapshot option

2018-10-17 Thread pboado
PHOENIX-4797 file not found or file exist exception when create global index 
use -snapshot option


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ac2725d6
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ac2725d6
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ac2725d6

Branch: refs/heads/4.14-cdh5.13
Commit: ac2725d65fc98ac41dc347bf3c9b0ae66f5c9f6c
Parents: 5db8699
Author: 492066199 <492066...@qq.com>
Authored: Fri Jul 6 03:45:38 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:36:19 2018 +0100

--
 .../org/apache/phoenix/iterate/TableSnapshotResultIterator.java  | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ac2725d6/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java
index df60339..016d3be 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java
@@ -39,6 +39,7 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
+import java.util.UUID;
 
 public class TableSnapshotResultIterator implements ResultIterator {
 
@@ -65,7 +66,8 @@ public class TableSnapshotResultIterator implements 
ResultIterator {
 this.scan = scan;
 this.scanMetricsHolder = scanMetricsHolder;
 this.scanIterator = UNINITIALIZED_SCANNER;
-this.restoreDir = new 
Path(configuration.get(PhoenixConfigurationUtil.RESTORE_DIR_KEY));
+this.restoreDir = new 
Path(configuration.get(PhoenixConfigurationUtil.RESTORE_DIR_KEY),
+UUID.randomUUID().toString());
 this.snapshotName = configuration.get(
 PhoenixConfigurationUtil.SNAPSHOT_NAME_KEY);
 this.rootDir = FSUtils.getRootDir(configuration);



[22/32] phoenix git commit: PHOENIX-4848 - Do not propagate unrequired metadata changes and handle missing/corrupted child views

2018-10-17 Thread pboado
PHOENIX-4848 - Do not propagate unrequired metadata changes and handle 
missing/corrupted child views

This patch fixes two related issues on propagating metadata changes to views.
The first issue is a bug in the logic to determine if a given change
should be propagated.
The second issue is in handling missing or corrupted views while attempting
to propagate a change. If a view is missing or corrupted, this patch simply
ignores the view by catching its loadTable() exception and logging it.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d9c80f8c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d9c80f8c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d9c80f8c

Branch: refs/heads/4.14-cdh5.13
Commit: d9c80f8cd905a085d07aead91ddb10cbda86f099
Parents: 73e6629
Author: Kadir 
Authored: Wed Aug 15 18:34:17 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:38:18 2018 +0100

--
 .../phoenix/end2end/AlterTableWithViewsIT.java  | 128 ++-
 .../coprocessor/MetaDataEndpointImpl.java   |  19 ++-
 2 files changed, 138 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d9c80f8c/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
index ab3a4ab..c4e4995 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
@@ -36,6 +36,7 @@ import java.util.Collection;
 import java.util.Properties;
 
 import org.apache.commons.lang.ArrayUtils;
+import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.coprocessor.TephraTransactionalProcessor;
@@ -49,6 +50,7 @@ import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.StringUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.junit.Test;
@@ -100,7 +102,7 @@ public class AlterTableWithViewsIT extends 
ParallelStatsDisabledIT {
 @Test
 public void testAddNewColumnsToBaseTableWithViews() throws Exception {
 try (Connection conn = DriverManager.getConnection(getUrl());
-Connection viewConn = isMultiTenant ? 
DriverManager.getConnection(TENANT_SPECIFIC_URL1) : conn ) {   
+Connection viewConn = isMultiTenant ? 
DriverManager.getConnection(TENANT_SPECIFIC_URL1) : conn ) {
 String tableName = generateUniqueName();
 String viewOfTable = tableName + "_VIEW";
 String ddlFormat = "CREATE TABLE IF NOT EXISTS " + tableName + " ("
@@ -111,17 +113,64 @@ public class AlterTableWithViewsIT extends 
ParallelStatsDisabledIT {
 + " ) %s";
 conn.createStatement().execute(generateDDL(ddlFormat));
 assertTableDefinition(conn, tableName, PTableType.TABLE, null, 0, 
3, QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT, "ID", "COL1", "COL2");
-
+
 viewConn.createStatement().execute("CREATE VIEW " + viewOfTable + 
" ( VIEW_COL1 DECIMAL(10,2), VIEW_COL2 VARCHAR ) AS SELECT * FROM " + 
tableName);
 assertTableDefinition(conn, viewOfTable, PTableType.VIEW, 
tableName, 0, 5, 3, "ID", "COL1", "COL2", "VIEW_COL1", "VIEW_COL2");
-
+
 // adding a new pk column and a new regular column
 conn.createStatement().execute("ALTER TABLE " + tableName + " ADD 
COL3 varchar(10) PRIMARY KEY, COL4 integer");
 assertTableDefinition(conn, tableName, PTableType.TABLE, null, 
columnEncoded ? 2 : 1, 5, QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT, "ID", 
"COL1", "COL2", "COL3", "COL4");
 assertTableDefinition(conn, viewOfTable, PTableType.VIEW, 
tableName, 1, 7, 5, "ID", "COL1", "COL2", "COL3", "COL4", "VIEW_COL1", 
"VIEW_COL2");
-} 
+}
 }
-
+
+@Test
+public void testAddNewColumnsToBaseTableWithVCorruptedViews() throws 
Exception {
+try (Connection conn = DriverManager.getConnection(getUrl());
+ Connection viewConn = isMultiTenant ? 
DriverManager.getConnection(TENANT_SPECIFIC_URL1) : conn ) {
+String tableName = generateUniqueName();
+String viewOfTable = tableName + "_VIEW";
+String ddlFormat = "CREATE TABLE IF NOT EXISTS " + 

[16/32] phoenix git commit: Revert "PHOENIX-4790 Addendum to check that query is a point lookup for delete not to run query"

2018-10-17 Thread pboado
Revert "PHOENIX-4790 Addendum to check that query is a point lookup for delete 
not to run query"

This reverts commit 2920dfa19101e395293460ae1e5575137c6628c9.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/045f4c61
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/045f4c61
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/045f4c61

Branch: refs/heads/4.14-cdh5.13
Commit: 045f4c6167dd47dd96b22db11d19a783df325c20
Parents: 656df7e
Author: James Taylor 
Authored: Fri Jul 13 04:01:21 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:36:10 2018 +0100

--
 .../src/main/java/org/apache/phoenix/compile/DeleteCompiler.java  | 3 ---
 1 file changed, 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/045f4c61/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index a214c24..78b2db9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -541,9 +541,6 @@ public class DeleteCompiler {
 Iterator iterator = queryPlans.iterator();
 while (iterator.hasNext()) {
 QueryPlan plan = iterator.next();
-// Must be a point lookup in order to not run a query since
-// we have to have the full key be enumerated.
-noQueryReqd &= plan.getContext().getScanRanges().isPointLookup();
 if (plan.getTableRef().getTable().getIndexType() == 
IndexType.LOCAL) {
 if (!plan.getContext().getDataColumns().isEmpty()) {
 iterator.remove();



[20/32] phoenix git commit: PHOENIX-4818 Fix RAT check for missing licenses

2018-10-17 Thread pboado
PHOENIX-4818 Fix RAT check for missing licenses


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/eccdc9c3
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/eccdc9c3
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/eccdc9c3

Branch: refs/heads/4.14-cdh5.12
Commit: eccdc9c39ff9e2aee1935f163614fb2e48feca1d
Parents: 1e2297b
Author: Vincent Poon 
Authored: Sun Jul 22 22:19:15 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:28:53 2018 +0100

--
 .../apache/phoenix/query/ConnectionCachingIT.java  | 17 +
 1 file changed, 17 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/eccdc9c3/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
index b2ef052..d1dda04 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.phoenix.query;
 
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;



[22/32] phoenix git commit: Set version to 4.14.1-cdh5.12 for release

2018-10-17 Thread pboado
Set version to 4.14.1-cdh5.12 for release


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3dad0aa6
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3dad0aa6
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3dad0aa6

Branch: refs/heads/4.14-cdh5.12
Commit: 3dad0aa66469de4b67e679410afbbaec139ea951
Parents: eccdc9c
Author: Vincent Poon 
Authored: Fri Aug 3 22:38:44 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:31:31 2018 +0100

--
 phoenix-assembly/pom.xml   | 2 +-
 phoenix-client/pom.xml | 2 +-
 phoenix-core/pom.xml   | 2 +-
 phoenix-flume/pom.xml  | 2 +-
 phoenix-hive/pom.xml   | 2 +-
 phoenix-kafka/pom.xml  | 2 +-
 phoenix-load-balancer/pom.xml  | 2 +-
 phoenix-parcel/pom.xml | 4 ++--
 phoenix-pherf/pom.xml  | 2 +-
 phoenix-pig/pom.xml| 2 +-
 phoenix-queryserver-client/pom.xml | 2 +-
 phoenix-queryserver/pom.xml| 2 +-
 phoenix-server/pom.xml | 2 +-
 phoenix-spark/pom.xml  | 2 +-
 phoenix-tracing-webapp/pom.xml | 2 +-
 pom.xml| 2 +-
 16 files changed, 17 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/3dad0aa6/phoenix-assembly/pom.xml
--
diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index 47b593f..60a8baf 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.0-cdh5.12.2
+4.14.1-cdh5.12.2
   
   phoenix-assembly
   Phoenix Assembly

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3dad0aa6/phoenix-client/pom.xml
--
diff --git a/phoenix-client/pom.xml b/phoenix-client/pom.xml
index 5736f9b..dfbce58 100644
--- a/phoenix-client/pom.xml
+++ b/phoenix-client/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.0-cdh5.12.2
+4.14.1-cdh5.12.2
   
   phoenix-client
   Phoenix Client

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3dad0aa6/phoenix-core/pom.xml
--
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index ad429bf..37ea4b0 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -4,7 +4,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.0-cdh5.12.2
+4.14.1-cdh5.12.2
   
   phoenix-core
   Phoenix Core

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3dad0aa6/phoenix-flume/pom.xml
--
diff --git a/phoenix-flume/pom.xml b/phoenix-flume/pom.xml
index bb2654c..d3e8c0a 100644
--- a/phoenix-flume/pom.xml
+++ b/phoenix-flume/pom.xml
@@ -26,7 +26,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.0-cdh5.12.2
+4.14.1-cdh5.12.2
   
   phoenix-flume
   Phoenix - Flume

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3dad0aa6/phoenix-hive/pom.xml
--
diff --git a/phoenix-hive/pom.xml b/phoenix-hive/pom.xml
index 235f919..baa3e3a 100644
--- a/phoenix-hive/pom.xml
+++ b/phoenix-hive/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.0-cdh5.12.2
+4.14.1-cdh5.12.2
   
   phoenix-hive
   Phoenix - Hive

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3dad0aa6/phoenix-kafka/pom.xml
--
diff --git a/phoenix-kafka/pom.xml b/phoenix-kafka/pom.xml
index 0dcfaf9..092f88c 100644
--- a/phoenix-kafka/pom.xml
+++ b/phoenix-kafka/pom.xml
@@ -26,7 +26,7 @@

org.apache.phoenix
phoenix
-   4.14.0-cdh5.12.2
+   4.14.1-cdh5.12.2

phoenix-kafka
Phoenix - Kafka

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3dad0aa6/phoenix-load-balancer/pom.xml
--
diff --git a/phoenix-load-balancer/pom.xml b/phoenix-load-balancer/pom.xml
index 2f69a7e..910abf3 100644
--- a/phoenix-load-balancer/pom.xml
+++ b/phoenix-load-balancer/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.0-cdh5.12.2
+4.14.1-cdh5.12.2
   
   phoenix-load-balancer
   Phoenix Load Balancer

http://git-wip-us.apache.org/repos/asf/phoenix/blob/3dad0aa6/phoenix-parcel/pom.xml
--
diff --git a/phoenix-parcel/pom.xml b/phoenix-parcel/pom.xml
index 9835b8a..3b671e1 100644
--- a/phoenix-parcel/pom.xml
+++ b/phoenix-parcel/pom.xml
@@ -27,7 +27,7 @@
   
 

[15/32] phoenix git commit: PHOENIX-4809 Only cache PhoenixConnections when lease renewal is on

2018-10-17 Thread pboado
PHOENIX-4809 Only cache PhoenixConnections when lease renewal is on

Lease renewal is the only mechanism under which connections are removed
from the connectionQueue. Calling close() on a connection doesn't proactively
remove it from the instance of ConnectionQueryServicesImpl.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e70d8552
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e70d8552
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e70d8552

Branch: refs/heads/4.14-cdh5.12
Commit: e70d85529ecd23e1d363b4b50d76effc9ff98298
Parents: 8697738
Author: Josh Elser 
Authored: Wed Jul 11 22:02:46 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:28:37 2018 +0100

--
 .../phoenix/query/ConnectionCachingIT.java  | 87 
 .../query/ConnectionQueryServicesImpl.java  | 11 ++-
 2 files changed, 97 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e70d8552/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
new file mode 100644
index 000..b2ef052
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
@@ -0,0 +1,87 @@
+package org.apache.phoenix.query;
+
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertTrue;
+
+import java.lang.ref.WeakReference;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.util.Arrays;
+import java.util.Properties;
+import java.util.concurrent.LinkedBlockingQueue;
+
+import org.apache.phoenix.end2end.ParallelStatsEnabledIT;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.ConnectionQueryServices;
+import org.apache.phoenix.query.ConnectionQueryServicesImpl;
+import org.apache.phoenix.query.DelegateConnectionQueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@RunWith(Parameterized.class)
+public class ConnectionCachingIT extends ParallelStatsEnabledIT {
+  private static final Logger LOG = 
LoggerFactory.getLogger(ConnectionCachingIT.class);
+
+  @Parameters(name= "phoenix.scanner.lease.renew.enabled={0}")
+  public static Iterable data() {
+return Arrays.asList("true", "false");
+  }
+
+  private String leaseRenewal;
+
+  public ConnectionCachingIT(String leaseRenewalValue) {
+this.leaseRenewal = leaseRenewalValue;
+  }
+
+  @Test
+  public void test() throws Exception {
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+props.put("phoenix.scanner.lease.renew.enabled", leaseRenewal);
+
+// The test driver works correctly, the real one doesn't.
+String url = getUrl();
+url = url.replace(";" + PhoenixRuntime.PHOENIX_TEST_DRIVER_URL_PARAM, "");
+LOG.info("URL to use is: {}", url);
+
+Connection conn = DriverManager.getConnection(url, props);
+long before = getNumCachedConnections(conn);
+for (int i = 0; i < 10_000; i++) {
+  Connection c = DriverManager.getConnection(url, props);
+  c.close();
+}
+
Thread.sleep(QueryServicesOptions.DEFAULT_RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS
 / 2);
+long after = getNumCachedConnections(conn);
+for (int i = 0; i < 6; i++) {
+  LOG.info("Found {} connections cached", after);
+  if (after <= before) {
+break;
+  }
+  
Thread.sleep(QueryServicesOptions.DEFAULT_RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS
 / 2);
+  after = getNumCachedConnections(conn);
+}
+assertTrue("Saw " + before + " connections, but ended with " + after, 
after <= before);
+  }
+
+  long getNumCachedConnections(Connection conn) throws Exception {
+PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);
+ConnectionQueryServices cqs = pConn.getQueryServices();
+// For whatever reason, we sometimes get a delegate here, and sometimes 
the real thing.
+if (cqs instanceof DelegateConnectionQueryServices) {
+  cqs = ((DelegateConnectionQueryServices) cqs).getDelegate();
+}
+assertTrue("ConnectionQueryServices was a " + cqs.getClass(), cqs 
instanceof ConnectionQueryServicesImpl);
+ConnectionQueryServicesImpl cqsi = (ConnectionQueryServicesImpl) cqs;
+long cachedConnections = 0L;
+for 

[19/32] phoenix git commit: PHOENIX-4797 file not found or file exist exception when create global index use -snapshot option

2018-10-17 Thread pboado
PHOENIX-4797 file not found or file exist exception when create global index 
use -snapshot option


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3420a81b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3420a81b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3420a81b

Branch: refs/heads/4.14-cdh5.14
Commit: 3420a81bb0dbf1d2b3f1802a8d68829fff4bb091
Parents: 5660452
Author: 492066199 <492066...@qq.com>
Authored: Fri Jul 6 03:45:38 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:42:26 2018 +0100

--
 .../org/apache/phoenix/iterate/TableSnapshotResultIterator.java  | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/3420a81b/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java
index df60339..016d3be 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java
@@ -39,6 +39,7 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
+import java.util.UUID;
 
 public class TableSnapshotResultIterator implements ResultIterator {
 
@@ -65,7 +66,8 @@ public class TableSnapshotResultIterator implements 
ResultIterator {
 this.scan = scan;
 this.scanMetricsHolder = scanMetricsHolder;
 this.scanIterator = UNINITIALIZED_SCANNER;
-this.restoreDir = new 
Path(configuration.get(PhoenixConfigurationUtil.RESTORE_DIR_KEY));
+this.restoreDir = new 
Path(configuration.get(PhoenixConfigurationUtil.RESTORE_DIR_KEY),
+UUID.randomUUID().toString());
 this.snapshotName = configuration.get(
 PhoenixConfigurationUtil.SNAPSHOT_NAME_KEY);
 this.rootDir = FSUtils.getRootDir(configuration);



[13/32] phoenix git commit: PHOENIX-3383 Comparison between descending row keys used in RVC is reverse

2018-10-17 Thread pboado
PHOENIX-3383 Comparison between descending row keys used in RVC is reverse


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/0e612b2b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/0e612b2b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/0e612b2b

Branch: refs/heads/4.14-cdh5.14
Commit: 0e612b2bab54751f99ec6830a0526e8efc6882a5
Parents: 48b5fe6
Author: James Taylor 
Authored: Fri Jul 6 05:38:28 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:42:09 2018 +0100

--
 .../org/apache/phoenix/end2end/QueryMoreIT.java |   12 +-
 .../org/apache/phoenix/compile/KeyPart.java |2 -
 .../org/apache/phoenix/compile/ScanRanges.java  |   77 +-
 .../apache/phoenix/compile/WhereOptimizer.java  | 1304 +-
 .../PhoenixTxIndexMutationGenerator.java|2 +-
 .../expression/function/FunctionExpression.java |   10 +-
 .../expression/function/InvertFunction.java |   19 +-
 .../expression/function/PrefixFunction.java |6 +-
 .../expression/function/RTrimFunction.java  |6 +-
 .../function/RoundDateExpression.java   |   22 +-
 .../function/RoundDecimalExpression.java|7 +-
 .../phoenix/iterate/BaseResultIterators.java|4 +-
 .../apache/phoenix/iterate/ExplainTable.java|   10 -
 .../java/org/apache/phoenix/query/KeyRange.java |   28 +-
 .../org/apache/phoenix/schema/RowKeySchema.java |   78 ++
 .../phoenix/compile/QueryCompilerTest.java  |2 +-
 .../phoenix/compile/QueryOptimizerTest.java |5 +-
 .../TenantSpecificViewIndexCompileTest.java |8 +-
 .../phoenix/compile/WhereOptimizerTest.java |  359 -
 .../RoundFloorCeilExpressionsTest.java  |   59 +-
 .../apache/phoenix/query/KeyRangeClipTest.java  |2 +-
 .../org/apache/phoenix/query/QueryPlanTest.java |8 +-
 .../apache/phoenix/schema/RowKeySchemaTest.java |   48 +
 23 files changed, 1567 insertions(+), 511 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/0e612b2b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
index 9109c12..04272fa 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
@@ -372,9 +372,6 @@ public class QueryMoreIT extends ParallelStatsDisabledIT {
 }
 }
 
-// FIXME: this repros PHOENIX-3382, but turned up two more issues:
-// 1) PHOENIX-3383 Comparison between descending row keys used in RVC is 
reverse
-// 2) PHOENIX-3384 Optimize RVC expressions for non leading row key columns
 @Test
 public void testRVCOnDescWithLeadingPKEquality() throws Exception {
 final Connection conn = DriverManager.getConnection(getUrl());
@@ -398,14 +395,11 @@ public class QueryMoreIT extends ParallelStatsDisabledIT {
 conn.createStatement().execute("UPSERT INTO " + fullTableName + " 
VALUES ('org1',1,'02')");
 conn.commit();
 
-// FIXME: PHOENIX-3383
-// This comparison is really backwards: it should be (score, 
entity_id) < (2, '04'),
-// but because we're matching a descending key, our comparison has to 
be switched.
 try (Statement stmt = conn.createStatement()) {
 final ResultSet rs = stmt.executeQuery("SELECT entity_id, score\n" 
+ 
 "FROM " + fullTableName + "\n" + 
 "WHERE organization_id = 'org1'\n" + 
-"AND (score, entity_id) > (2, '04')\n" + 
+"AND (score, entity_id) < (2, '04')\n" + 
 "ORDER BY score DESC, entity_id DESC\n" + 
 "LIMIT 3");
 assertTrue(rs.next());
@@ -416,13 +410,11 @@ public class QueryMoreIT extends ParallelStatsDisabledIT {
 assertEquals(1.0, rs.getDouble(2), 0.001);
 assertFalse(rs.next());
 }
-// FIXME: PHOENIX-3384
-// It should not be necessary to specify organization_id in this query
 try (Statement stmt = conn.createStatement()) {
 final ResultSet rs = stmt.executeQuery("SELECT entity_id, score\n" 
+ 
 "FROM " + fullTableName + "\n" + 
 "WHERE organization_id = 'org1'\n" + 
-"AND (organization_id, score, entity_id) > ('org1', 2, 
'04')\n" + 
+"AND (organization_id, score, entity_id) < ('org1', 2, 
'04')\n" + 
 "ORDER BY score DESC, entity_id DESC\n" + 
 "LIMIT 3");
 assertTrue(rs.next());


[23/32] phoenix git commit: PHOENIX-4843 InListExpression toString() converts the values in the list to ASC sort order always

2018-10-17 Thread pboado
PHOENIX-4843 InListExpression toString() converts the values in the list to ASC 
sort order always


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f45fd6c5
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f45fd6c5
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f45fd6c5

Branch: refs/heads/4.14-cdh5.14
Commit: f45fd6c52b3ec228388363c8f29e60f5affe0488
Parents: 1e32e89
Author: Thomas D'Silva 
Authored: Fri Aug 10 01:33:09 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:44:35 2018 +0100

--
 .../org/apache/phoenix/end2end/InListIT.java| 72 
 .../phoenix/expression/InListExpression.java| 11 ++-
 2 files changed, 80 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f45fd6c5/phoenix-core/src/it/java/org/apache/phoenix/end2end/InListIT.java
--
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/InListIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/InListIT.java
index fe88dc8..2820fdd 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/InListIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/InListIT.java
@@ -18,6 +18,7 @@
 package org.apache.phoenix.end2end;
 
 import static java.util.Collections.singletonList;
+import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -27,6 +28,7 @@ import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.sql.Statement;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
@@ -46,6 +48,8 @@ import com.google.common.collect.Lists;
 
 
 public class InListIT extends ParallelStatsDisabledIT {
+
+private final String TENANT_SPECIFIC_URL1 = getUrl() + ';' + 
TENANT_ID_ATTRIB + "=tenant1";
 
 @Test
 public void testLeadingPKWithTrailingRVC() throws Exception {
@@ -481,5 +485,73 @@ public class InListIT extends ParallelStatsDisabledIT {
 
 conn.close();
 }
+
+@Test
+public void testInListExpressionWithDesc() throws Exception {
+String fullTableName = generateUniqueName();
+String fullViewName = generateUniqueName();
+String tenantView = generateUniqueName();
+// create base table and global view using global connection
+try (Connection conn = DriverManager.getConnection(getUrl())) {
+Statement stmt = conn.createStatement();
+stmt.execute("CREATE TABLE " + fullTableName + "(\n" + 
+"TENANT_ID CHAR(15) NOT NULL,\n" + 
+"KEY_PREFIX CHAR(3) NOT NULL,\n" + 
+"CREATED_DATE DATE,\n" + 
+"CREATED_BY CHAR(15),\n" + 
+"SYSTEM_MODSTAMP DATE\n" + 
+"CONSTRAINT PK PRIMARY KEY (\n" + 
+"   TENANT_ID," + 
+"   KEY_PREFIX" + 
+")) MULTI_TENANT=TRUE");
+
+stmt.execute("CREATE VIEW " + fullViewName + "(\n" + 
+"MODEL VARCHAR NOT NULL,\n" + 
+"MILEAGE  BIGINT NOT NULL,\n" +  
+"MILES_DRIVEN BIGINT NOT NULL,\n" + 
+"MAKE VARCHAR,\n" + 
+"CONSTRAINT PKVIEW PRIMARY KEY\n" + 
+"(\n" + 
+"MODEL, MILEAGE DESC, MILES_DRIVEN\n" + 
+")) AS SELECT * FROM " + fullTableName + " WHERE 
KEY_PREFIX = '0CY'");
+
+}
+
+// create and use a tenant specific view to write data
+try (Connection viewConn = 
DriverManager.getConnection(TENANT_SPECIFIC_URL1) ) { 
+Statement stmt = viewConn.createStatement();
+stmt.execute("CREATE VIEW IF NOT EXISTS " + tenantView + " AS 
SELECT * FROM " + fullViewName );
+viewConn.createStatement().execute("UPSERT INTO " + tenantView + 
"(CREATED_BY, CREATED_DATE, SYSTEM_MODSTAMP, MODEL, MILEAGE, MILES_DRIVEN, 
MAKE) VALUES ('005xx01Sv6o', 1532458254819, 1532458254819, 'a5', 23, 1, 
'AUDI')");
+viewConn.createStatement().execute("UPSERT INTO " + tenantView + 
"(CREATED_BY, CREATED_DATE, SYSTEM_MODSTAMP, MODEL, MILEAGE, MILES_DRIVEN, 
MAKE) VALUES ('005xx01Sv6o', 1532458254819, 1532458254819, 'a4', 27, 3, 
'AUDI')");
+viewConn.createStatement().execute("UPSERT INTO " + tenantView + 
"(CREATED_BY, CREATED_DATE, SYSTEM_MODSTAMP, MODEL, MILEAGE, MILES_DRIVEN, 

[14/32] phoenix git commit: PHOENIX-4790 Addendum to check that query is a point lookup for delete not to run query

2018-10-17 Thread pboado
PHOENIX-4790 Addendum to check that query is a point lookup for delete not to 
run query


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/07d41385
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/07d41385
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/07d41385

Branch: refs/heads/4.14-cdh5.14
Commit: 07d41385856fbfda0158c30e9c82db6aba1c358e
Parents: 0e612b2
Author: James Taylor 
Authored: Thu Jul 12 06:01:14 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:42:11 2018 +0100

--
 .../src/main/java/org/apache/phoenix/compile/DeleteCompiler.java  | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/07d41385/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index 78b2db9..a214c24 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -541,6 +541,9 @@ public class DeleteCompiler {
 Iterator iterator = queryPlans.iterator();
 while (iterator.hasNext()) {
 QueryPlan plan = iterator.next();
+// Must be a point lookup in order to not run a query since
+// we have to have the full key be enumerated.
+noQueryReqd &= plan.getContext().getScanRanges().isPointLookup();
 if (plan.getTableRef().getTable().getIndexType() == 
IndexType.LOCAL) {
 if (!plan.getContext().getDataColumns().isEmpty()) {
 iterator.remove();



[31/32] phoenix git commit: PHOENIX-4785 Unable to write to table if index is made active during retry (addendum for test)

2018-10-17 Thread pboado
PHOENIX-4785 Unable to write to table if index is made active during retry 
(addendum for test)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f3a279f6
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f3a279f6
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f3a279f6

Branch: refs/heads/4.14-cdh5.14
Commit: f3a279f6f75b24e6687fef5989881ab0abda0846
Parents: 161fbf0
Author: Vincent Poon 
Authored: Fri Oct 12 23:49:39 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:45:01 2018 +0100

--
 .../org/apache/phoenix/end2end/index/MutableIndexFailureIT.java | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f3a279f6/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
index aac20ac..46a512f 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
@@ -423,7 +423,7 @@ public class MutableIndexFailureIT extends BaseTest {
 
 private void addRowsInTableDuringRetry(final String tableName)
 throws SQLException, InterruptedException, ExecutionException {
-int threads=10;
+int threads=9;
 boolean wasFailWrite = FailingRegionObserver.FAIL_WRITE;
 boolean wasToggleFailWriteForRetry = 
FailingRegionObserver.TOGGLE_FAIL_WRITE_FOR_RETRY;
 try {
@@ -601,6 +601,9 @@ public class MutableIndexFailureIT extends BaseTest {
 }
 }
 }
+if (TOGGLE_FAIL_WRITE_FOR_RETRY) {
+FAIL_WRITE = !FAIL_WRITE;
+}
 }
 }
 if (throwException) {



[21/32] phoenix git commit: Set version to 4.14.1-cdh5.14 for release

2018-10-17 Thread pboado
Set version to 4.14.1-cdh5.14 for release


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/af67eda4
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/af67eda4
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/af67eda4

Branch: refs/heads/4.14-cdh5.14
Commit: af67eda4d953dad34a6c7c5d4c7a81d761a3924f
Parents: 694d0d5
Author: Vincent Poon 
Authored: Fri Aug 3 22:38:44 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:44:29 2018 +0100

--
 phoenix-assembly/pom.xml   | 2 +-
 phoenix-client/pom.xml | 2 +-
 phoenix-core/pom.xml   | 2 +-
 phoenix-flume/pom.xml  | 2 +-
 phoenix-hive/pom.xml   | 2 +-
 phoenix-kafka/pom.xml  | 2 +-
 phoenix-load-balancer/pom.xml  | 2 +-
 phoenix-parcel/pom.xml | 4 ++--
 phoenix-pherf/pom.xml  | 2 +-
 phoenix-pig/pom.xml| 2 +-
 phoenix-queryserver-client/pom.xml | 2 +-
 phoenix-queryserver/pom.xml| 2 +-
 phoenix-server/pom.xml | 2 +-
 phoenix-spark/pom.xml  | 2 +-
 phoenix-tracing-webapp/pom.xml | 2 +-
 pom.xml| 2 +-
 16 files changed, 17 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/af67eda4/phoenix-assembly/pom.xml
--
diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index e28d9f7..3d34c42 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.0-cdh5.14.2
+4.14.1-cdh5.14.2
   
   phoenix-assembly
   Phoenix Assembly

http://git-wip-us.apache.org/repos/asf/phoenix/blob/af67eda4/phoenix-client/pom.xml
--
diff --git a/phoenix-client/pom.xml b/phoenix-client/pom.xml
index 9d0dee1..3d522bc 100644
--- a/phoenix-client/pom.xml
+++ b/phoenix-client/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.0-cdh5.14.2
+4.14.1-cdh5.14.2
   
   phoenix-client
   Phoenix Client

http://git-wip-us.apache.org/repos/asf/phoenix/blob/af67eda4/phoenix-core/pom.xml
--
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index ab4c891..c286680 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -4,7 +4,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.0-cdh5.14.2
+4.14.1-cdh5.14.2
   
   phoenix-core
   Phoenix Core

http://git-wip-us.apache.org/repos/asf/phoenix/blob/af67eda4/phoenix-flume/pom.xml
--
diff --git a/phoenix-flume/pom.xml b/phoenix-flume/pom.xml
index d1e1363..182a5c2 100644
--- a/phoenix-flume/pom.xml
+++ b/phoenix-flume/pom.xml
@@ -26,7 +26,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.0-cdh5.14.2
+4.14.1-cdh5.14.2
   
   phoenix-flume
   Phoenix - Flume

http://git-wip-us.apache.org/repos/asf/phoenix/blob/af67eda4/phoenix-hive/pom.xml
--
diff --git a/phoenix-hive/pom.xml b/phoenix-hive/pom.xml
index 1cfdcf6..cf3826c 100644
--- a/phoenix-hive/pom.xml
+++ b/phoenix-hive/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.0-cdh5.14.2
+4.14.1-cdh5.14.2
   
   phoenix-hive
   Phoenix - Hive

http://git-wip-us.apache.org/repos/asf/phoenix/blob/af67eda4/phoenix-kafka/pom.xml
--
diff --git a/phoenix-kafka/pom.xml b/phoenix-kafka/pom.xml
index 6b11511..809b9f0 100644
--- a/phoenix-kafka/pom.xml
+++ b/phoenix-kafka/pom.xml
@@ -26,7 +26,7 @@

org.apache.phoenix
phoenix
-   4.14.0-cdh5.14.2
+   4.14.1-cdh5.14.2

phoenix-kafka
Phoenix - Kafka

http://git-wip-us.apache.org/repos/asf/phoenix/blob/af67eda4/phoenix-load-balancer/pom.xml
--
diff --git a/phoenix-load-balancer/pom.xml b/phoenix-load-balancer/pom.xml
index 892477e..3967c22 100644
--- a/phoenix-load-balancer/pom.xml
+++ b/phoenix-load-balancer/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.0-cdh5.14.2
+4.14.1-cdh5.14.2
   
   phoenix-load-balancer
   Phoenix Load Balancer

http://git-wip-us.apache.org/repos/asf/phoenix/blob/af67eda4/phoenix-parcel/pom.xml
--
diff --git a/phoenix-parcel/pom.xml b/phoenix-parcel/pom.xml
index 08e7734..8581501 100644
--- a/phoenix-parcel/pom.xml
+++ b/phoenix-parcel/pom.xml
@@ -27,7 +27,7 @@
   
 

[22/32] phoenix git commit: PHOENIX-4848 - Do not propagate unrequired metadata changes and handle missing/corrupted child views

2018-10-17 Thread pboado
PHOENIX-4848 - Do not propagate unrequired metadata changes and handle 
missing/corrupted child views

This patch fixes two related issues on propagating metadata changes to views.
The first issue is a bug in the logic to determine if a given change
should be propagated.
The second issue is in handling missing or corrupted views while attempting
to propagate a change. If a view is missing or corrupted, this patch simply
ignores the view by catching its loadTable() exception and logging it.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1e32e89e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1e32e89e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1e32e89e

Branch: refs/heads/4.14-cdh5.14
Commit: 1e32e89e0742e445f7b8c05d0c1043e43c85b21b
Parents: af67eda
Author: Kadir 
Authored: Wed Aug 15 18:34:17 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:44:33 2018 +0100

--
 .../phoenix/end2end/AlterTableWithViewsIT.java  | 128 ++-
 .../coprocessor/MetaDataEndpointImpl.java   |  19 ++-
 2 files changed, 138 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/1e32e89e/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
index ab3a4ab..c4e4995 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
@@ -36,6 +36,7 @@ import java.util.Collection;
 import java.util.Properties;
 
 import org.apache.commons.lang.ArrayUtils;
+import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.coprocessor.TephraTransactionalProcessor;
@@ -49,6 +50,7 @@ import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.StringUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.junit.Test;
@@ -100,7 +102,7 @@ public class AlterTableWithViewsIT extends 
ParallelStatsDisabledIT {
 @Test
 public void testAddNewColumnsToBaseTableWithViews() throws Exception {
 try (Connection conn = DriverManager.getConnection(getUrl());
-Connection viewConn = isMultiTenant ? 
DriverManager.getConnection(TENANT_SPECIFIC_URL1) : conn ) {   
+Connection viewConn = isMultiTenant ? 
DriverManager.getConnection(TENANT_SPECIFIC_URL1) : conn ) {
 String tableName = generateUniqueName();
 String viewOfTable = tableName + "_VIEW";
 String ddlFormat = "CREATE TABLE IF NOT EXISTS " + tableName + " ("
@@ -111,17 +113,64 @@ public class AlterTableWithViewsIT extends 
ParallelStatsDisabledIT {
 + " ) %s";
 conn.createStatement().execute(generateDDL(ddlFormat));
 assertTableDefinition(conn, tableName, PTableType.TABLE, null, 0, 
3, QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT, "ID", "COL1", "COL2");
-
+
 viewConn.createStatement().execute("CREATE VIEW " + viewOfTable + 
" ( VIEW_COL1 DECIMAL(10,2), VIEW_COL2 VARCHAR ) AS SELECT * FROM " + 
tableName);
 assertTableDefinition(conn, viewOfTable, PTableType.VIEW, 
tableName, 0, 5, 3, "ID", "COL1", "COL2", "VIEW_COL1", "VIEW_COL2");
-
+
 // adding a new pk column and a new regular column
 conn.createStatement().execute("ALTER TABLE " + tableName + " ADD 
COL3 varchar(10) PRIMARY KEY, COL4 integer");
 assertTableDefinition(conn, tableName, PTableType.TABLE, null, 
columnEncoded ? 2 : 1, 5, QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT, "ID", 
"COL1", "COL2", "COL3", "COL4");
 assertTableDefinition(conn, viewOfTable, PTableType.VIEW, 
tableName, 1, 7, 5, "ID", "COL1", "COL2", "COL3", "COL4", "VIEW_COL1", 
"VIEW_COL2");
-} 
+}
 }
-
+
+@Test
+public void testAddNewColumnsToBaseTableWithVCorruptedViews() throws 
Exception {
+try (Connection conn = DriverManager.getConnection(getUrl());
+ Connection viewConn = isMultiTenant ? 
DriverManager.getConnection(TENANT_SPECIFIC_URL1) : conn ) {
+String tableName = generateUniqueName();
+String viewOfTable = tableName + "_VIEW";
+String ddlFormat = "CREATE TABLE IF NOT EXISTS " + 

[15/32] phoenix git commit: PHOENIX-4809 Only cache PhoenixConnections when lease renewal is on

2018-10-17 Thread pboado
PHOENIX-4809 Only cache PhoenixConnections when lease renewal is on

Lease renewal is the only mechanism under which connections are removed
from the connectionQueue. Calling close() on a connection doesn't proactively
remove it from the instance of ConnectionQueryServicesImpl.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7f1c1fab
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7f1c1fab
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7f1c1fab

Branch: refs/heads/4.14-cdh5.14
Commit: 7f1c1fab89e7004fec98121b1cfea1f552ba2718
Parents: 07d4138
Author: Josh Elser 
Authored: Wed Jul 11 22:02:46 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:42:14 2018 +0100

--
 .../phoenix/query/ConnectionCachingIT.java  | 87 
 .../query/ConnectionQueryServicesImpl.java  | 11 ++-
 2 files changed, 97 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/7f1c1fab/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
new file mode 100644
index 000..b2ef052
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
@@ -0,0 +1,87 @@
+package org.apache.phoenix.query;
+
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertTrue;
+
+import java.lang.ref.WeakReference;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.util.Arrays;
+import java.util.Properties;
+import java.util.concurrent.LinkedBlockingQueue;
+
+import org.apache.phoenix.end2end.ParallelStatsEnabledIT;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.ConnectionQueryServices;
+import org.apache.phoenix.query.ConnectionQueryServicesImpl;
+import org.apache.phoenix.query.DelegateConnectionQueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@RunWith(Parameterized.class)
+public class ConnectionCachingIT extends ParallelStatsEnabledIT {
+  private static final Logger LOG = 
LoggerFactory.getLogger(ConnectionCachingIT.class);
+
+  @Parameters(name= "phoenix.scanner.lease.renew.enabled={0}")
+  public static Iterable data() {
+return Arrays.asList("true", "false");
+  }
+
+  private String leaseRenewal;
+
+  public ConnectionCachingIT(String leaseRenewalValue) {
+this.leaseRenewal = leaseRenewalValue;
+  }
+
+  @Test
+  public void test() throws Exception {
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+props.put("phoenix.scanner.lease.renew.enabled", leaseRenewal);
+
+// The test driver works correctly, the real one doesn't.
+String url = getUrl();
+url = url.replace(";" + PhoenixRuntime.PHOENIX_TEST_DRIVER_URL_PARAM, "");
+LOG.info("URL to use is: {}", url);
+
+Connection conn = DriverManager.getConnection(url, props);
+long before = getNumCachedConnections(conn);
+for (int i = 0; i < 10_000; i++) {
+  Connection c = DriverManager.getConnection(url, props);
+  c.close();
+}
+
Thread.sleep(QueryServicesOptions.DEFAULT_RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS
 / 2);
+long after = getNumCachedConnections(conn);
+for (int i = 0; i < 6; i++) {
+  LOG.info("Found {} connections cached", after);
+  if (after <= before) {
+break;
+  }
+  
Thread.sleep(QueryServicesOptions.DEFAULT_RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS
 / 2);
+  after = getNumCachedConnections(conn);
+}
+assertTrue("Saw " + before + " connections, but ended with " + after, 
after <= before);
+  }
+
+  long getNumCachedConnections(Connection conn) throws Exception {
+PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);
+ConnectionQueryServices cqs = pConn.getQueryServices();
+// For whatever reason, we sometimes get a delegate here, and sometimes 
the real thing.
+if (cqs instanceof DelegateConnectionQueryServices) {
+  cqs = ((DelegateConnectionQueryServices) cqs).getDelegate();
+}
+assertTrue("ConnectionQueryServices was a " + cqs.getClass(), cqs 
instanceof ConnectionQueryServicesImpl);
+ConnectionQueryServicesImpl cqsi = (ConnectionQueryServicesImpl) cqs;
+long cachedConnections = 0L;
+for 

[27/32] phoenix git commit: After HBASE-20940 any local index query will open all HFiles of every Region involved in the query.

2018-10-17 Thread pboado
After HBASE-20940 any local index query will open all HFiles of every Region 
involved in the query.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/87e0164a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/87e0164a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/87e0164a

Branch: refs/heads/4.14-cdh5.14
Commit: 87e0164a6b3bb343b35ed665252debdf7e7ee753
Parents: 9bd4645
Author: Lars Hofhansl 
Authored: Mon Sep 24 06:35:38 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:44:51 2018 +0100

--
 .../phoenix/iterate/RegionScannerFactory.java| 19 +--
 1 file changed, 1 insertion(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/87e0164a/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerFactory.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerFactory.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerFactory.java
index aed5805..d81224d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerFactory.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerFactory.java
@@ -37,7 +37,6 @@ import 
org.apache.phoenix.hbase.index.covered.update.ColumnReference;
 import org.apache.phoenix.index.IndexMaintainer;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.KeyValueSchema;
-import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.ValueBitSet;
 import org.apache.phoenix.schema.tuple.*;
 import org.apache.phoenix.transaction.PhoenixTransactionContext;
@@ -45,7 +44,6 @@ import org.apache.phoenix.util.EncodedColumnsUtil;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.ServerUtil;
-import org.apache.tephra.Transaction;
 
 import java.io.IOException;
 import java.util.List;
@@ -103,25 +101,10 @@ public abstract class RegionScannerFactory {
   final ImmutableBytesWritable ptr, final boolean useQualifierAsListIndex) 
{
 return new RegionScanner() {
 
-  private boolean hasReferences = checkForReferenceFiles();
   private HRegionInfo regionInfo = env.getRegionInfo();
   private byte[] actualStartKey = getActualStartKey();
   private boolean useNewValueColumnQualifier = 
EncodedColumnsUtil.useNewValueColumnQualifier(scan);
 
-  // If there are any reference files after local index region merge some 
cases we might
-  // get the records less than scan start row key. This will happen when 
we replace the
-  // actual region start key with merge region start key. This method 
gives whether are
-  // there any reference files in the region or not.
-  private boolean checkForReferenceFiles() {
-if(!ScanUtil.isLocalIndex(scan)) return false;
-for (byte[] family : scan.getFamilies()) {
-  if (getRegion().getStore(family).hasReferences()) {
-return true;
-  }
-}
-return false;
-  }
-
   // Get the actual scan start row of local index. This will be used to 
compare the row
   // key of the results less than scan start row when there are references.
   public byte[] getActualStartKey() {
@@ -182,7 +165,7 @@ public abstract class RegionScannerFactory {
 arrayElementCell = result.get(arrayElementCellPosition);
   }
   if (ScanUtil.isLocalIndex(scan) && !ScanUtil.isAnalyzeTable(scan)) {
-if(hasReferences && actualStartKey!=null) {
+if(actualStartKey!=null) {
   next = scanTillScanStartRow(s, arrayKVRefs, arrayFuncRefs, 
result,
   null, arrayElementCell);
   if (result.isEmpty()) {



[16/32] phoenix git commit: Revert "PHOENIX-4790 Addendum to check that query is a point lookup for delete not to run query"

2018-10-17 Thread pboado
Revert "PHOENIX-4790 Addendum to check that query is a point lookup for delete 
not to run query"

This reverts commit 2920dfa19101e395293460ae1e5575137c6628c9.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/56f765c6
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/56f765c6
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/56f765c6

Branch: refs/heads/4.14-cdh5.14
Commit: 56f765c6bf53e50b488a17d64c63d71ee85617fb
Parents: 7f1c1fa
Author: James Taylor 
Authored: Fri Jul 13 04:01:21 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:42:17 2018 +0100

--
 .../src/main/java/org/apache/phoenix/compile/DeleteCompiler.java  | 3 ---
 1 file changed, 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/56f765c6/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index a214c24..78b2db9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -541,9 +541,6 @@ public class DeleteCompiler {
 Iterator iterator = queryPlans.iterator();
 while (iterator.hasNext()) {
 QueryPlan plan = iterator.next();
-// Must be a point lookup in order to not run a query since
-// we have to have the full key be enumerated.
-noQueryReqd &= plan.getContext().getScanRanges().isPointLookup();
 if (plan.getTableRef().getTable().getIndexType() == 
IndexType.LOCAL) {
 if (!plan.getContext().getDataColumns().isEmpty()) {
 iterator.remove();



[19/32] phoenix git commit: PHOENIX-4797 file not found or file exist exception when create global index use -snapshot option

2018-10-17 Thread pboado
PHOENIX-4797 file not found or file exist exception when create global index 
use -snapshot option


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c6725956
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c6725956
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c6725956

Branch: refs/heads/4.14-cdh5.11
Commit: c6725956c7ddeb1f677ffef029f5367f334ba14c
Parents: 3493203
Author: 492066199 <492066...@qq.com>
Authored: Fri Jul 6 03:45:38 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:10:18 2018 +0100

--
 .../org/apache/phoenix/iterate/TableSnapshotResultIterator.java  | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c6725956/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java
index df60339..016d3be 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableSnapshotResultIterator.java
@@ -39,6 +39,7 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
+import java.util.UUID;
 
 public class TableSnapshotResultIterator implements ResultIterator {
 
@@ -65,7 +66,8 @@ public class TableSnapshotResultIterator implements 
ResultIterator {
 this.scan = scan;
 this.scanMetricsHolder = scanMetricsHolder;
 this.scanIterator = UNINITIALIZED_SCANNER;
-this.restoreDir = new 
Path(configuration.get(PhoenixConfigurationUtil.RESTORE_DIR_KEY));
+this.restoreDir = new 
Path(configuration.get(PhoenixConfigurationUtil.RESTORE_DIR_KEY),
+UUID.randomUUID().toString());
 this.snapshotName = configuration.get(
 PhoenixConfigurationUtil.SNAPSHOT_NAME_KEY);
 this.rootDir = FSUtils.getRootDir(configuration);



[25/32] phoenix git commit: PHOENIX-4839 IndexHalfStoreFileReaderGenerator throws NullPointerException (Aman Poonia)

2018-10-17 Thread pboado
PHOENIX-4839 IndexHalfStoreFileReaderGenerator throws NullPointerException 
(Aman Poonia)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8064c640
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8064c640
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8064c640

Branch: refs/heads/4.14-cdh5.11
Commit: 8064c64094f0ff8a83bb1ebb9ec7f22fcad6f417
Parents: 56c63c6
Author: Lars Hofhansl 
Authored: Fri Sep 14 20:40:06 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:23:25 2018 +0100

--
 .../regionserver/IndexHalfStoreFileReader.java  | 48 
 .../IndexHalfStoreFileReaderGenerator.java  | 12 ++---
 2 files changed, 43 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/8064c640/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
index 8bd0d72..273a1b0 100644
--- 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
+++ 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
+import static 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver.SCAN_START_ROW_SUFFIX;
+
 import java.io.IOException;
 import java.util.Map;
 
@@ -26,10 +28,12 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.io.Reference;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.index.IndexMaintainer;
 
 /**
@@ -56,8 +60,9 @@ public class IndexHalfStoreFileReader extends 
StoreFile.Reader {
 private final Map 
indexMaintainers;
 private final byte[][] viewConstants;
 private final int offset;
-private final HRegionInfo regionInfo;
+private final HRegionInfo childRegionInfo;
 private final byte[] regionStartKeyInHFile;
+private final HRegionInfo currentRegion;
 
 /**
  * @param fs
@@ -69,17 +74,19 @@ public class IndexHalfStoreFileReader extends 
StoreFile.Reader {
  * @param conf
  * @param indexMaintainers
  * @param viewConstants
- * @param regionInfo
+ * @param childRegionInfo
  * @param regionStartKeyInHFile
  * @param splitKey
+ * @param currentRegion
  * @throws IOException
  */
 public IndexHalfStoreFileReader(final FileSystem fs, final Path p, final 
CacheConfig cacheConf,
 final FSDataInputStreamWrapper in, long size, final Reference r,
 final Configuration conf,
 final Map 
indexMaintainers,
-final byte[][] viewConstants, final HRegionInfo regionInfo,
-byte[] regionStartKeyInHFile, byte[] splitKey) throws IOException {
+final byte[][] viewConstants, final HRegionInfo childRegionInfo,
+byte[] regionStartKeyInHFile, byte[] splitKey, HRegionInfo 
currentRegion)
+throws IOException {
 super(fs, p, in, size, cacheConf, conf);
 this.splitkey = splitKey == null ? r.getSplitKey() : splitKey;
 // Is it top or bottom half?
@@ -87,9 +94,10 @@ public class IndexHalfStoreFileReader extends 
StoreFile.Reader {
 this.splitRow = 
CellUtil.cloneRow(KeyValue.createKeyValueFromKey(splitkey));
 this.indexMaintainers = indexMaintainers;
 this.viewConstants = viewConstants;
-this.regionInfo = regionInfo;
+this.childRegionInfo = childRegionInfo;
 this.regionStartKeyInHFile = regionStartKeyInHFile;
 this.offset = regionStartKeyInHFile.length;
+this.currentRegion = currentRegion;
 }
 
 public int getOffset() {
@@ -105,7 +113,7 @@ public class IndexHalfStoreFileReader extends 
StoreFile.Reader {
 }
 
 public HRegionInfo getRegionInfo() {
-return regionInfo;
+return childRegionInfo;
 }
 
 public byte[] getRegionStartKeyInHFile() {
@@ -125,8 +133,30 @@ public class IndexHalfStoreFileReader extends 
StoreFile.Reader {
 }
 
 @Override
-public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean 
pread, boolean isCompaction, long readPt) {
-return new LocalIndexStoreFileScanner(this, 

[14/32] phoenix git commit: PHOENIX-4790 Addendum to check that query is a point lookup for delete not to run query

2018-10-17 Thread pboado
PHOENIX-4790 Addendum to check that query is a point lookup for delete not to 
run query


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/abba660d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/abba660d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/abba660d

Branch: refs/heads/4.14-cdh5.13
Commit: abba660ddea22abda4df927643bc63811c7e60d8
Parents: dc383eb
Author: James Taylor 
Authored: Thu Jul 12 06:01:14 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:35:44 2018 +0100

--
 .../src/main/java/org/apache/phoenix/compile/DeleteCompiler.java  | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/abba660d/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index 78b2db9..a214c24 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -541,6 +541,9 @@ public class DeleteCompiler {
 Iterator iterator = queryPlans.iterator();
 while (iterator.hasNext()) {
 QueryPlan plan = iterator.next();
+// Must be a point lookup in order to not run a query since
+// we have to have the full key be enumerated.
+noQueryReqd &= plan.getContext().getScanRanges().isPointLookup();
 if (plan.getTableRef().getTable().getIndexType() == 
IndexType.LOCAL) {
 if (!plan.getContext().getDataColumns().isEmpty()) {
 iterator.remove();



[20/32] phoenix git commit: PHOENIX-4818 Fix RAT check for missing licenses

2018-10-17 Thread pboado
PHOENIX-4818 Fix RAT check for missing licenses


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a026cd34
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a026cd34
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a026cd34

Branch: refs/heads/4.14-cdh5.13
Commit: a026cd34f1de5eba9fc195b9e1ee7b6eff419c3e
Parents: ac2725d
Author: Vincent Poon 
Authored: Sun Jul 22 22:19:15 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:36:21 2018 +0100

--
 .../apache/phoenix/query/ConnectionCachingIT.java  | 17 +
 1 file changed, 17 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a026cd34/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
index b2ef052..d1dda04 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.phoenix.query;
 
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;



[17/32] phoenix git commit: Revert "PHOENIX-4790 Simplify check for client side delete"

2018-10-17 Thread pboado
Revert "PHOENIX-4790 Simplify check for client side delete"

This reverts commit 5cc9a25a185e596a39e4f2916f90b4c576f4f82f.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a8c2adbe
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a8c2adbe
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a8c2adbe

Branch: refs/heads/4.14-cdh5.13
Commit: a8c2adbee755b2e8b71f4357b3c7124400fb7336
Parents: 045f4c6
Author: James Taylor 
Authored: Fri Jul 13 04:01:41 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:36:13 2018 +0100

--
 .../apache/phoenix/compile/DeleteCompiler.java  | 24 
 1 file changed, 19 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a8c2adbe/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index 78b2db9..5f9c76c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -46,6 +46,7 @@ import org.apache.phoenix.execute.AggregatePlan;
 import org.apache.phoenix.execute.MutationState;
 import org.apache.phoenix.execute.MutationState.MultiRowMutationState;
 import org.apache.phoenix.execute.MutationState.RowMutationState;
+import org.apache.phoenix.filter.SkipScanFilter;
 import org.apache.phoenix.hbase.index.ValueGetter;
 import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
@@ -480,7 +481,6 @@ public class DeleteCompiler {
 projectedColumns.add(column);
 aliasedNodes.add(FACTORY.aliasedNode(null, FACTORY.column(null, 
'"' + column.getName().getString() + '"', null)));
 }
-boolean noQueryReqd = true;
 // Project all non PK indexed columns so that we can do the proper 
index maintenance
 for (PTable index : table.getIndexes()) {
 IndexMaintainer maintainer = index.getIndexMaintainer(table, 
connection);
@@ -492,8 +492,6 @@ public class DeleteCompiler {
 boolean hasNoColumnFamilies = 
table.getColumnFamilies().isEmpty();
 PColumn column = hasNoColumnFamilies ? 
table.getColumnForColumnName(columnName) : 
table.getColumnFamily(familyName).getPColumnForColumnName(columnName);
 if(!projectedColumns.contains(column)) {
-// We must run a query if any index contains a non pk 
column
-noQueryReqd = false;
 projectedColumns.add(column);
 aliasedNodes.add(FACTORY.aliasedNode(null, 
FACTORY.column(hasNoColumnFamilies ? null : TableName.create(null, familyName), 
'"' + columnName + '"', null)));
 }
@@ -513,7 +511,7 @@ public class DeleteCompiler {
 select = StatementNormalizer.normalize(transformedSelect, 
resolverToBe);
 }
 final boolean hasPreOrPostProcessing = hasPreProcessing || 
hasPostProcessing;
-noQueryReqd &= !hasPreOrPostProcessing;
+boolean noQueryReqd = !hasPreOrPostProcessing;
 // No limit and no sub queries, joins, etc in where clause
 // Can't run on same server for transactional data, as we need the row 
keys for the data
 // that is being upserted for conflict detection purposes.
@@ -552,8 +550,24 @@ public class DeleteCompiler {
 }
 
 runOnServer &= queryPlans.get(0).getTableRef().getTable().getType() != 
PTableType.INDEX;
-
+
+// We need to have all indexed columns available in all immutable 
indexes in order
+// to generate the delete markers from the query. We also cannot have 
any filters
+// except for our SkipScanFilter for point lookups.
+// A simple check of the non existence of a where clause in the parse 
node is not sufficient, as the where clause
+// may have been optimized out. Instead, we check that there's a 
single SkipScanFilter
+// If we can generate a plan for every index, that means all the 
required columns are available in every index,
+// hence we can drive the delete from any of the plans.
 noQueryReqd &= queryPlans.size() == 1 + clientSideIndexes.size();
+int queryPlanIndex = 0;
+while (noQueryReqd && queryPlanIndex < queryPlans.size()) {
+QueryPlan plan = queryPlans.get(queryPlanIndex++);
+StatementContext context = plan.getContext();
+noQueryReqd &= (!context.getScan().hasFilter()
+

[26/32] phoenix git commit: PHOENIX-4849 Phoenix may generate incorrectly replace TableResultIterators after HBase region splits.

2018-10-17 Thread pboado
PHOENIX-4849 Phoenix may generate incorrectly replace TableResultIterators 
after HBase region splits.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1acd5fc2
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1acd5fc2
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1acd5fc2

Branch: refs/heads/4.14-cdh5.13
Commit: 1acd5fc2b0a0bdec77a2f69abf64934290ca47e1
Parents: 0d252ed
Author: Lars Hofhansl 
Authored: Wed Sep 26 19:18:05 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:38:34 2018 +0100

--
 .../end2end/UpsertSelectAutoCommitIT.java   | 22 ++---
 .../apache/phoenix/compile/UpsertCompiler.java  | 17 +++
 .../phoenix/coprocessor/ScanRegionObserver.java |  9 +++-
 .../phoenix/iterate/TableResultIterator.java| 50 +++-
 .../java/org/apache/phoenix/util/ScanUtil.java  |  8 
 5 files changed, 63 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/1acd5fc2/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectAutoCommitIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectAutoCommitIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectAutoCommitIT.java
index 38d48d6..d81c2d0 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectAutoCommitIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectAutoCommitIT.java
@@ -34,10 +34,13 @@ import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.Properties;
 
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.TestUtil;
 import org.junit.Test;
 
 
@@ -161,19 +164,24 @@ public class UpsertSelectAutoCommitIT extends 
ParallelStatsDisabledIT {
 props.setProperty(QueryServices.SCAN_RESULT_CHUNK_SIZE, 
Integer.toString(3));
 Connection conn = DriverManager.getConnection(getUrl(), props);
 conn.setAutoCommit(true);
-conn.createStatement().execute("CREATE SEQUENCE keys");
+conn.createStatement().execute("CREATE SEQUENCE keys CACHE 1000");
 String tableName = generateUniqueName();
-conn.createStatement().execute(
-"CREATE TABLE " + tableName + " (pk INTEGER PRIMARY KEY, val 
INTEGER)");
+conn.createStatement().execute("CREATE TABLE " + tableName
++ " (pk INTEGER PRIMARY KEY, val INTEGER) 
UPDATE_CACHE_FREQUENCY=360");
 
 conn.createStatement().execute(
 "UPSERT INTO " + tableName + " VALUES (NEXT VALUE FOR keys,1)");
-for (int i=0; i<6; i++) {
-Statement stmt = conn.createStatement();
-int upsertCount = stmt.executeUpdate(
-"UPSERT INTO " + tableName + " SELECT NEXT VALUE FOR keys, val 
FROM " + tableName);
+PreparedStatement stmt =
+conn.prepareStatement("UPSERT INTO " + tableName
++ " SELECT NEXT VALUE FOR keys, val FROM " + 
tableName);
+HBaseAdmin admin =
+driver.getConnectionQueryServices(getUrl(), 
TestUtil.TEST_PROPERTIES).getAdmin();
+for (int i=0; i<12; i++) {
+admin.split(TableName.valueOf(tableName));
+int upsertCount = stmt.executeUpdate();
 assertEquals((int)Math.pow(2, i), upsertCount);
 }
+admin.close();
 conn.close();
 }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1acd5fc2/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
index c3cfa10..fb1169d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
@@ -76,12 +76,14 @@ import org.apache.phoenix.parse.SelectStatement;
 import org.apache.phoenix.parse.SequenceValueParseNode;
 import org.apache.phoenix.parse.UpsertStatement;
 import org.apache.phoenix.query.ConnectionQueryServices;
+import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.ColumnRef;
 import org.apache.phoenix.schema.ConstraintViolationException;
 import 

[28/32] phoenix git commit: PHOENIX-4933 DELETE FROM throws NPE when a local index is present.

2018-10-17 Thread pboado
PHOENIX-4933 DELETE FROM throws NPE when a local index is present.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/81d679e3
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/81d679e3
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/81d679e3

Branch: refs/heads/4.14-cdh5.13
Commit: 81d679e38c47e2944a9b2fe4296fd8456a898ea6
Parents: a606065
Author: Lars Hofhansl 
Authored: Mon Oct 1 19:57:44 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:38:39 2018 +0100

--
 .../phoenix/end2end/index/LocalIndexIT.java | 22 +
 .../tuple/EncodedColumnQualiferCellsList.java   | 25 +---
 2 files changed, 39 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/81d679e3/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 0dcf1d5..796d5a2 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -78,7 +78,29 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 public LocalIndexIT(boolean isNamespaceMapped) {
 super(isNamespaceMapped);
 }
+
+@Test
+public void testDeleteFromLocalIndex() throws Exception {
+String tableName = schemaName + "." + generateUniqueName();
+String indexName = "IDX_" + generateUniqueName();
 
+Connection conn = getConnection();
+conn.setAutoCommit(true);
+if (isNamespaceMapped) {
+conn.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + 
schemaName);
+}
+
+conn.createStatement().execute("CREATE TABLE " + tableName + " (pk 
INTEGER PRIMARY KEY, v1 FLOAT, v2 FLOAT)");
+conn.createStatement().execute("CREATE LOCAL INDEX " + indexName + " 
ON " + tableName + "(v2)");
+conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES(1, rand(), rand())");
+// This would fail with an NPE before PHOENIX-4933
+conn.createStatement().execute("DELETE FROM " + tableName + " WHERE v1 
< 1");
+ResultSet rs = conn.createStatement().executeQuery("SELECT COUNT(*) 
FROM "+tableName);
+rs.next();
+assertEquals(0, rs.getInt(1));
+rs.close();
+}
+
 @Test
 public void testLocalIndexRoundTrip() throws Exception {
 String tableName = schemaName + "." + generateUniqueName();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/81d679e3/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/EncodedColumnQualiferCellsList.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/EncodedColumnQualiferCellsList.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/EncodedColumnQualiferCellsList.java
index 10329fb..db3647d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/EncodedColumnQualiferCellsList.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/EncodedColumnQualiferCellsList.java
@@ -175,14 +175,7 @@ public class EncodedColumnQualiferCellsList implements 
List {
 firstNonNullElementIdx = -1;
 } else if (firstNonNullElementIdx == i) {
 // the element being removed was the first non-null 
element we knew
-while (i < array.length && (array[i]) == null) {
-i++;
-}
-if (i < array.length) {
-firstNonNullElementIdx = i;
-} else {
-firstNonNullElementIdx = -1;
-}
+adjustFirstNonNullElement();
 }
 modCount++;
 return true;
@@ -383,6 +376,18 @@ public class EncodedColumnQualiferCellsList implements 
List {
 return getCellForColumnQualifier(columnQualifier);
 }
 
+private void adjustFirstNonNullElement() {
+int i = firstNonNullElementIdx;
+while (i < array.length && (array[i]) == null) {
+i++;
+}
+if (i < array.length) {
+firstNonNullElementIdx = i;
+} else {
+firstNonNullElementIdx = -1;
+}
+
+}
 private Cell getCellForColumnQualifier(int columnQualifier) {
 checkQualifierRange(columnQualifier);
 int idx = getArrayIndex(columnQualifier);
@@ -461,6 +466,10 @@ public class 

[12/32] phoenix git commit: PHOENIX-3383 Comparison between descending row keys used in RVC is reverse

2018-10-17 Thread pboado
http://git-wip-us.apache.org/repos/asf/phoenix/blob/dc383eb6/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
index 87f00e4..a5287cb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
@@ -17,8 +17,6 @@
  */
 package org.apache.phoenix.compile;
 
-import static java.util.Collections.singletonList;
-
 import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -27,10 +25,14 @@ import java.util.Comparator;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
+import java.util.NoSuchElementException;
 import java.util.Set;
 
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.expression.AndExpression;
 import org.apache.phoenix.expression.BaseExpression;
 import 
org.apache.phoenix.expression.BaseExpression.ExpressionComparabilityWrapper;
@@ -61,7 +63,6 @@ import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.RowKeySchema;
 import org.apache.phoenix.schema.SaltingUtil;
 import org.apache.phoenix.schema.SortOrder;
-import org.apache.phoenix.schema.ValueSchema.Field;
 import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PChar;
 import org.apache.phoenix.schema.types.PDataType;
@@ -74,8 +75,11 @@ import org.apache.phoenix.util.SchemaUtil;
 
 import com.google.common.collect.Iterators;
 import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 
+import edu.umd.cs.findbugs.annotations.NonNull;
+
 
 /**
  *
@@ -115,6 +119,7 @@ public class WhereOptimizer {
RowKeySchema schema = table.getRowKeySchema();
boolean isMultiTenant = tenantId != null && table.isMultiTenant();
boolean isSharedIndex = table.getViewIndexId() != null;
+   ImmutableBytesWritable ptr = context.getTempPtr();

if (isMultiTenant) {
 tenantIdBytes = ScanUtil.getTenantIdBytes(schema, isSalted, 
tenantId, isSharedIndex);
@@ -158,31 +163,13 @@ public class WhereOptimizer {
 
 int pkPos = 0;
 int nPKColumns = table.getPKColumns().size();
-int[] slotSpan = new int[nPKColumns];
+int[] slotSpanArray = new int[nPKColumns];
 List> cnf = 
Lists.newArrayListWithExpectedSize(schema.getMaxFields());
-KeyRange minMaxRange = keySlots.getMinMaxRange();
-if (minMaxRange == null) {
-minMaxRange = KeyRange.EVERYTHING_RANGE;
-}
-boolean hasMinMaxRange = (minMaxRange != KeyRange.EVERYTHING_RANGE);
-int minMaxRangeOffset = 0;
-byte[] minMaxRangePrefix = null;
 boolean hasViewIndex = table.getViewIndexId() != null;
-if (hasMinMaxRange) {
-int minMaxRangeSize = (isSalted ? SaltingUtil.NUM_SALTING_BYTES : 
0)
-+ (isMultiTenant ? tenantIdBytes.length + 1 : 0)
-+ (hasViewIndex ? 
MetaDataUtil.getViewIndexIdDataType().getByteSize() : 0);
-minMaxRangePrefix = new byte[minMaxRangeSize];
-}
-
-Iterator iterator = keySlots.iterator();
+Iterator iterator = 
keySlots.getSlots().iterator();
 // Add placeholder for salt byte ranges
 if (isSalted) {
 cnf.add(SALT_PLACEHOLDER);
-if (hasMinMaxRange) {
-   System.arraycopy(SALT_PLACEHOLDER.get(0).getLowerRange(), 
0, minMaxRangePrefix, minMaxRangeOffset, SaltingUtil.NUM_SALTING_BYTES);
-   minMaxRangeOffset += SaltingUtil.NUM_SALTING_BYTES;
-}
 // Increment the pkPos, as the salt column is in the row schema
 // Do not increment the iterator, though, as there will never be
 // an expression in the keySlots for the salt column
@@ -194,35 +181,17 @@ public class WhereOptimizer {
 if (hasViewIndex) {
 byte[] viewIndexBytes = 
MetaDataUtil.getViewIndexIdDataType().toBytes(table.getViewIndexId());
 KeyRange indexIdKeyRange = KeyRange.getKeyRange(viewIndexBytes);
-cnf.add(singletonList(indexIdKeyRange));
-if (hasMinMaxRange) {
-System.arraycopy(viewIndexBytes, 0, minMaxRangePrefix, 
minMaxRangeOffset, viewIndexBytes.length);
-minMaxRangeOffset += viewIndexBytes.length;
-}
+cnf.add(Collections.singletonList(indexIdKeyRange));
 pkPos++;
 }
 
 // Add tenant data isolation for 

[15/32] phoenix git commit: PHOENIX-4809 Only cache PhoenixConnections when lease renewal is on

2018-10-17 Thread pboado
PHOENIX-4809 Only cache PhoenixConnections when lease renewal is on

Lease renewal is the only mechanism under which connections are removed
from the connectionQueue. Calling close() on a connection doesn't proactively
remove it from the instance of ConnectionQueryServicesImpl.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/656df7e3
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/656df7e3
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/656df7e3

Branch: refs/heads/4.14-cdh5.13
Commit: 656df7e370614d46e28f7fbb5f4d2877ba4642b4
Parents: abba660
Author: Josh Elser 
Authored: Wed Jul 11 22:02:46 2018 +0100
Committer: Pedro Boado 
Committed: Wed Oct 17 20:35:47 2018 +0100

--
 .../phoenix/query/ConnectionCachingIT.java  | 87 
 .../query/ConnectionQueryServicesImpl.java  | 11 ++-
 2 files changed, 97 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/656df7e3/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
new file mode 100644
index 000..b2ef052
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/query/ConnectionCachingIT.java
@@ -0,0 +1,87 @@
+package org.apache.phoenix.query;
+
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertTrue;
+
+import java.lang.ref.WeakReference;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.util.Arrays;
+import java.util.Properties;
+import java.util.concurrent.LinkedBlockingQueue;
+
+import org.apache.phoenix.end2end.ParallelStatsEnabledIT;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.ConnectionQueryServices;
+import org.apache.phoenix.query.ConnectionQueryServicesImpl;
+import org.apache.phoenix.query.DelegateConnectionQueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@RunWith(Parameterized.class)
+public class ConnectionCachingIT extends ParallelStatsEnabledIT {
+  private static final Logger LOG = 
LoggerFactory.getLogger(ConnectionCachingIT.class);
+
+  @Parameters(name= "phoenix.scanner.lease.renew.enabled={0}")
+  public static Iterable data() {
+return Arrays.asList("true", "false");
+  }
+
+  private String leaseRenewal;
+
+  public ConnectionCachingIT(String leaseRenewalValue) {
+this.leaseRenewal = leaseRenewalValue;
+  }
+
+  @Test
+  public void test() throws Exception {
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+props.put("phoenix.scanner.lease.renew.enabled", leaseRenewal);
+
+// The test driver works correctly, the real one doesn't.
+String url = getUrl();
+url = url.replace(";" + PhoenixRuntime.PHOENIX_TEST_DRIVER_URL_PARAM, "");
+LOG.info("URL to use is: {}", url);
+
+Connection conn = DriverManager.getConnection(url, props);
+long before = getNumCachedConnections(conn);
+for (int i = 0; i < 10_000; i++) {
+  Connection c = DriverManager.getConnection(url, props);
+  c.close();
+}
+
Thread.sleep(QueryServicesOptions.DEFAULT_RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS
 / 2);
+long after = getNumCachedConnections(conn);
+for (int i = 0; i < 6; i++) {
+  LOG.info("Found {} connections cached", after);
+  if (after <= before) {
+break;
+  }
+  
Thread.sleep(QueryServicesOptions.DEFAULT_RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS
 / 2);
+  after = getNumCachedConnections(conn);
+}
+assertTrue("Saw " + before + " connections, but ended with " + after, 
after <= before);
+  }
+
+  long getNumCachedConnections(Connection conn) throws Exception {
+PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);
+ConnectionQueryServices cqs = pConn.getQueryServices();
+// For whatever reason, we sometimes get a delegate here, and sometimes 
the real thing.
+if (cqs instanceof DelegateConnectionQueryServices) {
+  cqs = ((DelegateConnectionQueryServices) cqs).getDelegate();
+}
+assertTrue("ConnectionQueryServices was a " + cqs.getClass(), cqs 
instanceof ConnectionQueryServicesImpl);
+ConnectionQueryServicesImpl cqsi = (ConnectionQueryServicesImpl) cqs;
+long cachedConnections = 0L;
+for 

  1   2   3   >