[phoenix] annotated tag 4.14.2-HBase-1.3-rc0 created (now b43bcd3)

2019-04-21 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a change to annotated tag 4.14.2-HBase-1.3-rc0
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


  at b43bcd3  (tag)
 tagging b9842b6a8f1b94ca148e2f657a5d16da6cb43a41 (commit)
 replaces v4.14.1-HBase-1.3
  by Thomas D'Silva
  on Sun Apr 21 00:25:39 2019 -0700

- Log -
4.14.2-HBase-1.3-rc0
---

No new revisions were added by this update.



[phoenix] branch 4.14-HBase-1.3 updated: Set version to 4.14.2-HBase-1.3

2019-04-21 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.3 by this push:
 new b9842b6  Set version to 4.14.2-HBase-1.3
b9842b6 is described below

commit b9842b6a8f1b94ca148e2f657a5d16da6cb43a41
Author: Thomas D'Silva 
AuthorDate: Sun Apr 21 00:21:14 2019 -0700

Set version to 4.14.2-HBase-1.3
---
 phoenix-assembly/pom.xml   | 2 +-
 phoenix-client/pom.xml | 2 +-
 phoenix-core/pom.xml   | 2 +-
 phoenix-flume/pom.xml  | 2 +-
 phoenix-hive/pom.xml   | 2 +-
 phoenix-kafka/pom.xml  | 2 +-
 phoenix-load-balancer/pom.xml  | 2 +-
 phoenix-pherf/pom.xml  | 2 +-
 phoenix-pig/pom.xml| 2 +-
 phoenix-queryserver-client/pom.xml | 2 +-
 phoenix-queryserver/pom.xml| 2 +-
 phoenix-server/pom.xml | 2 +-
 phoenix-spark/pom.xml  | 2 +-
 phoenix-tracing-webapp/pom.xml | 2 +-
 pom.xml| 2 +-
 15 files changed, 15 insertions(+), 15 deletions(-)

diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index 08735c4..7f7d78f 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.3
+4.14.2-HBase-1.3
   
   phoenix-assembly
   Phoenix Assembly
diff --git a/phoenix-client/pom.xml b/phoenix-client/pom.xml
index 71452bd..d0ec982 100644
--- a/phoenix-client/pom.xml
+++ b/phoenix-client/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.3
+4.14.2-HBase-1.3
   
   phoenix-client
   Phoenix Client
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 291abec..2734056 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -4,7 +4,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.3
+4.14.2-HBase-1.3
   
   phoenix-core
   Phoenix Core
diff --git a/phoenix-flume/pom.xml b/phoenix-flume/pom.xml
index cdd4627..dc62381 100644
--- a/phoenix-flume/pom.xml
+++ b/phoenix-flume/pom.xml
@@ -26,7 +26,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.3
+4.14.2-HBase-1.3
   
   phoenix-flume
   Phoenix - Flume
diff --git a/phoenix-hive/pom.xml b/phoenix-hive/pom.xml
index 341dc4e..2162e8c 100644
--- a/phoenix-hive/pom.xml
+++ b/phoenix-hive/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.3
+4.14.2-HBase-1.3
   
   phoenix-hive
   Phoenix - Hive
diff --git a/phoenix-kafka/pom.xml b/phoenix-kafka/pom.xml
index 9dd0c0e..da89a8a 100644
--- a/phoenix-kafka/pom.xml
+++ b/phoenix-kafka/pom.xml
@@ -26,7 +26,7 @@

org.apache.phoenix
phoenix
-   4.14.1-HBase-1.3
+   4.14.2-HBase-1.3

phoenix-kafka
Phoenix - Kafka
diff --git a/phoenix-load-balancer/pom.xml b/phoenix-load-balancer/pom.xml
index b91244d..df7a50b 100644
--- a/phoenix-load-balancer/pom.xml
+++ b/phoenix-load-balancer/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.3
+4.14.2-HBase-1.3
   
   phoenix-load-balancer
   Phoenix Load Balancer
diff --git a/phoenix-pherf/pom.xml b/phoenix-pherf/pom.xml
index 74d9e89..f5d570f 100644
--- a/phoenix-pherf/pom.xml
+++ b/phoenix-pherf/pom.xml
@@ -15,7 +15,7 @@

org.apache.phoenix
phoenix
-   4.14.1-HBase-1.3
+   4.14.2-HBase-1.3

 
phoenix-pherf
diff --git a/phoenix-pig/pom.xml b/phoenix-pig/pom.xml
index 52a03cf..80901ba 100644
--- a/phoenix-pig/pom.xml
+++ b/phoenix-pig/pom.xml
@@ -26,7 +26,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.3
+4.14.2-HBase-1.3
   
   phoenix-pig
   Phoenix - Pig
diff --git a/phoenix-queryserver-client/pom.xml 
b/phoenix-queryserver-client/pom.xml
index d71556b..07ec210 100644
--- a/phoenix-queryserver-client/pom.xml
+++ b/phoenix-queryserver-client/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.3
+4.14.2-HBase-1.3
   
   phoenix-queryserver-client
   Phoenix Query Server Client
diff --git a/phoenix-queryserver/pom.xml b/phoenix-queryserver/pom.xml
index 1278928..2e3d567 100644
--- a/phoenix-queryserver/pom.xml
+++ b/phoenix-queryserver/pom.xml
@@ -26,7 +26,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.3
+4.14.2-HBase-1.3
   
   phoenix-queryserver
   Phoenix Query Server
diff --git a/phoenix-server/pom.xml b/phoenix-server/pom.xml
index 6b584bb..3263940 100644
--- a/phoenix-server/pom.xml
+++ b/phoenix-server/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.3
+4.14.2-HBase-1.3
   
   phoenix-server
   Phoenix Server
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
index af8f034..f3c1552 100644

[phoenix] annotated tag 4.14.2-HBase-1.2-rc0 created (now 3d0ee6d)

2019-04-21 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a change to annotated tag 4.14.2-HBase-1.2-rc0
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


  at 3d0ee6d  (tag)
 tagging 2a61e639ebc4f373cd9dc3b17e628fd2e3f14c4e (commit)
 replaces v4.14.1-HBase-1.2
  by Thomas D'Silva
  on Sun Apr 21 00:13:29 2019 -0700

- Log -
4.14.2-HBase-1.2-rc0
---

No new revisions were added by this update.



[phoenix] branch 4.14-HBase-1.2 updated: Set version to 4.14.2-HBase-1.2

2019-04-21 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.2 by this push:
 new 2a61e63  Set version to 4.14.2-HBase-1.2
2a61e63 is described below

commit 2a61e639ebc4f373cd9dc3b17e628fd2e3f14c4e
Author: Thomas D'Silva 
AuthorDate: Sun Apr 21 00:09:07 2019 -0700

Set version to 4.14.2-HBase-1.2
---
 phoenix-assembly/pom.xml   | 2 +-
 phoenix-client/pom.xml | 2 +-
 phoenix-core/pom.xml   | 2 +-
 phoenix-flume/pom.xml  | 2 +-
 phoenix-hive/pom.xml   | 2 +-
 phoenix-kafka/pom.xml  | 2 +-
 phoenix-load-balancer/pom.xml  | 2 +-
 phoenix-pherf/pom.xml  | 2 +-
 phoenix-pig/pom.xml| 2 +-
 phoenix-queryserver-client/pom.xml | 2 +-
 phoenix-queryserver/pom.xml| 2 +-
 phoenix-server/pom.xml | 2 +-
 phoenix-spark/pom.xml  | 2 +-
 phoenix-tracing-webapp/pom.xml | 2 +-
 pom.xml| 2 +-
 15 files changed, 15 insertions(+), 15 deletions(-)

diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index d8ddf4a..8dff5eb 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.2
+4.14.2-HBase-1.2
   
   phoenix-assembly
   Phoenix Assembly
diff --git a/phoenix-client/pom.xml b/phoenix-client/pom.xml
index 778a5a9..8650f27 100644
--- a/phoenix-client/pom.xml
+++ b/phoenix-client/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.2
+4.14.2-HBase-1.2
   
   phoenix-client
   Phoenix Client
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 2543e36..6a49017 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -4,7 +4,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.2
+4.14.2-HBase-1.2
   
   phoenix-core
   Phoenix Core
diff --git a/phoenix-flume/pom.xml b/phoenix-flume/pom.xml
index 0fe653d..f5c6b7e 100644
--- a/phoenix-flume/pom.xml
+++ b/phoenix-flume/pom.xml
@@ -26,7 +26,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.2
+4.14.2-HBase-1.2
   
   phoenix-flume
   Phoenix - Flume
diff --git a/phoenix-hive/pom.xml b/phoenix-hive/pom.xml
index cbf371a..c14016b 100644
--- a/phoenix-hive/pom.xml
+++ b/phoenix-hive/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.2
+4.14.2-HBase-1.2
   
   phoenix-hive
   Phoenix - Hive
diff --git a/phoenix-kafka/pom.xml b/phoenix-kafka/pom.xml
index 7418c8b..4441ea3 100644
--- a/phoenix-kafka/pom.xml
+++ b/phoenix-kafka/pom.xml
@@ -26,7 +26,7 @@

org.apache.phoenix
phoenix
-   4.14.1-HBase-1.2
+   4.14.2-HBase-1.2

phoenix-kafka
Phoenix - Kafka
diff --git a/phoenix-load-balancer/pom.xml b/phoenix-load-balancer/pom.xml
index 828e42d..b3edab9 100644
--- a/phoenix-load-balancer/pom.xml
+++ b/phoenix-load-balancer/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.2
+4.14.2-HBase-1.2
   
   phoenix-load-balancer
   Phoenix Load Balancer
diff --git a/phoenix-pherf/pom.xml b/phoenix-pherf/pom.xml
index 92e08e9..7c430c0 100644
--- a/phoenix-pherf/pom.xml
+++ b/phoenix-pherf/pom.xml
@@ -15,7 +15,7 @@

org.apache.phoenix
phoenix
-   4.14.1-HBase-1.2
+   4.14.2-HBase-1.2

 
phoenix-pherf
diff --git a/phoenix-pig/pom.xml b/phoenix-pig/pom.xml
index 8d7ff1e..1178146 100644
--- a/phoenix-pig/pom.xml
+++ b/phoenix-pig/pom.xml
@@ -26,7 +26,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.2
+4.14.2-HBase-1.2
   
   phoenix-pig
   Phoenix - Pig
diff --git a/phoenix-queryserver-client/pom.xml 
b/phoenix-queryserver-client/pom.xml
index 36896c5..f38ebea 100644
--- a/phoenix-queryserver-client/pom.xml
+++ b/phoenix-queryserver-client/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.2
+4.14.2-HBase-1.2
   
   phoenix-queryserver-client
   Phoenix Query Server Client
diff --git a/phoenix-queryserver/pom.xml b/phoenix-queryserver/pom.xml
index 6ddd520..57db845 100644
--- a/phoenix-queryserver/pom.xml
+++ b/phoenix-queryserver/pom.xml
@@ -26,7 +26,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.2
+4.14.2-HBase-1.2
   
   phoenix-queryserver
   Phoenix Query Server
diff --git a/phoenix-server/pom.xml b/phoenix-server/pom.xml
index c8220e6..10da012 100644
--- a/phoenix-server/pom.xml
+++ b/phoenix-server/pom.xml
@@ -27,7 +27,7 @@
   
 org.apache.phoenix
 phoenix
-4.14.1-HBase-1.2
+4.14.2-HBase-1.2
   
   phoenix-server
   Phoenix Server
diff --git a/phoenix-spark/pom.xml b/phoenix-spark/pom.xml
index 7aeef32..dafb7a7 100644

[phoenix] branch 4.14-HBase-1.4 updated: Add missing license

2019-04-21 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.4 by this push:
 new 00a4c81  Add missing license
00a4c81 is described below

commit 00a4c8174332a879689617a57f8bc841af91b4d0
Author: Thomas D'Silva 
AuthorDate: Sun Apr 21 00:04:20 2019 -0700

Add missing license
---
 .../server/AvaticaServerConfigurationFactory.java   | 17 +
 .../server/CustomAvaticaServerConfigurationTest.java| 17 +
 2 files changed, 34 insertions(+)

diff --git 
a/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/AvaticaServerConfigurationFactory.java
 
b/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/AvaticaServerConfigurationFactory.java
index 87a72ea..33fd590 100644
--- 
a/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/AvaticaServerConfigurationFactory.java
+++ 
b/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/AvaticaServerConfigurationFactory.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.phoenix.queryserver.server;
 
 import org.apache.calcite.avatica.server.AvaticaServerConfiguration;
diff --git 
a/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/CustomAvaticaServerConfigurationTest.java
 
b/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/CustomAvaticaServerConfigurationTest.java
index 20bc868..fb59e0d 100644
--- 
a/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/CustomAvaticaServerConfigurationTest.java
+++ 
b/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/CustomAvaticaServerConfigurationTest.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.phoenix.queryserver.server;
 
 import org.apache.calcite.avatica.server.AvaticaServerConfiguration;



[phoenix] branch 4.14-HBase-1.3 updated: Add missing license

2019-04-21 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.3 by this push:
 new bf076d5  Add missing license
bf076d5 is described below

commit bf076d59fd254b1072cc7c8cbbebb8def66f7e31
Author: Thomas D'Silva 
AuthorDate: Sun Apr 21 00:04:20 2019 -0700

Add missing license
---
 .../server/AvaticaServerConfigurationFactory.java   | 17 +
 .../server/CustomAvaticaServerConfigurationTest.java| 17 +
 2 files changed, 34 insertions(+)

diff --git 
a/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/AvaticaServerConfigurationFactory.java
 
b/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/AvaticaServerConfigurationFactory.java
index 87a72ea..33fd590 100644
--- 
a/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/AvaticaServerConfigurationFactory.java
+++ 
b/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/AvaticaServerConfigurationFactory.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.phoenix.queryserver.server;
 
 import org.apache.calcite.avatica.server.AvaticaServerConfiguration;
diff --git 
a/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/CustomAvaticaServerConfigurationTest.java
 
b/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/CustomAvaticaServerConfigurationTest.java
index 20bc868..fb59e0d 100644
--- 
a/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/CustomAvaticaServerConfigurationTest.java
+++ 
b/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/CustomAvaticaServerConfigurationTest.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.phoenix.queryserver.server;
 
 import org.apache.calcite.avatica.server.AvaticaServerConfiguration;



[phoenix] branch 4.14-HBase-1.2 updated: Add missing license

2019-04-21 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.2 by this push:
 new ecfe7c0  Add missing license
ecfe7c0 is described below

commit ecfe7c04c64fd6aae8c9931a8b00ba726e44c82e
Author: Thomas D'Silva 
AuthorDate: Sun Apr 21 00:04:20 2019 -0700

Add missing license
---
 .../server/AvaticaServerConfigurationFactory.java   | 17 +
 .../server/CustomAvaticaServerConfigurationTest.java| 17 +
 2 files changed, 34 insertions(+)

diff --git 
a/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/AvaticaServerConfigurationFactory.java
 
b/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/AvaticaServerConfigurationFactory.java
index 87a72ea..33fd590 100644
--- 
a/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/AvaticaServerConfigurationFactory.java
+++ 
b/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/AvaticaServerConfigurationFactory.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.phoenix.queryserver.server;
 
 import org.apache.calcite.avatica.server.AvaticaServerConfiguration;
diff --git 
a/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/CustomAvaticaServerConfigurationTest.java
 
b/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/CustomAvaticaServerConfigurationTest.java
index 20bc868..fb59e0d 100644
--- 
a/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/CustomAvaticaServerConfigurationTest.java
+++ 
b/phoenix-queryserver/src/test/java/org/apache/phoenix/queryserver/server/CustomAvaticaServerConfigurationTest.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.phoenix.queryserver.server;
 
 import org.apache.calcite.avatica.server.AvaticaServerConfiguration;



[phoenix] branch 4.14-HBase-1.3 updated (563c8f9 -> 05fbe72)

2019-04-21 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a change to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from 563c8f9  PhoenixResultSet#next() closes the result set if scanner 
returns null
 new 5e5ecde  PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE
 new 05fbe72  PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE 
(Addendum)

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../phoenix/iterate/ScanningResultIterator.java| 69 ++
 1 file changed, 43 insertions(+), 26 deletions(-)



[phoenix] 01/02: PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE

2019-04-21 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 5e5ecdedeee518269e147a0c1dd04e65c431e609
Author: Karan Mehta 
AuthorDate: Wed Apr 3 11:34:26 2019 -0700

PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE
---
 .../phoenix/iterate/ScanningResultIterator.java| 83 --
 1 file changed, 47 insertions(+), 36 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
index 9a31238..9a7384b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
@@ -17,6 +17,17 @@
  */
 package org.apache.phoenix.iterate;
 
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.BYTES_IN_REMOTE_RESULTS_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.BYTES_IN_RESULTS_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.MILLIS_BETWEEN_NEXTS_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.NOT_SERVING_REGION_EXCEPTION_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.REGIONS_SCANNED_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.REMOTE_RPC_CALLS_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.REMOTE_RPC_RETRIES_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.RPC_CALLS_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.RPC_RETRIES_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_BYTES_IN_REMOTE_RESULTS;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_BYTES_REGION_SERVER_RESULTS;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_MILLS_BETWEEN_NEXTS;
@@ -39,6 +50,7 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.phoenix.monitoring.CombinableMetric;
+import org.apache.phoenix.monitoring.GlobalClientMetrics;
 import org.apache.phoenix.monitoring.ScanMetricsHolder;
 import org.apache.phoenix.schema.tuple.ResultTuple;
 import org.apache.phoenix.schema.tuple.Tuple;
@@ -46,29 +58,12 @@ import org.apache.phoenix.util.ServerUtil;
 
 public class ScanningResultIterator implements ResultIterator {
 private final ResultScanner scanner;
-private final Scan scan;
 private final ScanMetricsHolder scanMetricsHolder;
 boolean scanMetricsUpdated;
 boolean scanMetricsEnabled;
 
-// These metric names are how HBase refers them
-// Since HBase stores these strings as static final, we are using the same 
here
-static final String RPC_CALLS_METRIC_NAME = "RPC_CALLS";
-static final String REMOTE_RPC_CALLS_METRIC_NAME = "REMOTE_RPC_CALLS";
-static final String MILLIS_BETWEEN_NEXTS_METRIC_NAME = 
"MILLIS_BETWEEN_NEXTS";
-static final String NOT_SERVING_REGION_EXCEPTION_METRIC_NAME = 
"NOT_SERVING_REGION_EXCEPTION";
-static final String BYTES_IN_RESULTS_METRIC_NAME = "BYTES_IN_RESULTS";
-static final String BYTES_IN_REMOTE_RESULTS_METRIC_NAME = 
"BYTES_IN_REMOTE_RESULTS";
-static final String REGIONS_SCANNED_METRIC_NAME = "REGIONS_SCANNED";
-static final String RPC_RETRIES_METRIC_NAME = "RPC_RETRIES";
-static final String REMOTE_RPC_RETRIES_METRIC_NAME = "REMOTE_RPC_RETRIES";
-static final String COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME = "ROWS_SCANNED";
-static final String COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME = 
"ROWS_FILTERED";
-static final String GLOBAL_BYTES_IN_RESULTS_METRIC_NAME = 
"BYTES_IN_RESULTS";
-
 public ScanningResultIterator(ResultScanner scanner, Scan scan, 
ScanMetricsHolder scanMetricsHolder) {
 this.scanner = scanner;
-this.scan = scan;
 this.scanMetricsHolder = scanMetricsHolder;
 scanMetricsUpdated = false;
 scanMetricsEnabled = scan.isScanMetricsEnabled();
@@ -80,20 +75,25 @@ public class ScanningResultIterator implements 
ResultIterator {
 scanner.close();
 }
 
-private static void changeMetric(CombinableMetric metric, Long value) {
+private void changeMetric(CombinableMetric metric, Long value) {
 if(value != null) {

[phoenix] 02/02: PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE (Addendum)

2019-04-21 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit f80abb0cf342b099b686ae26426074b305b6f041
Author: Karan Mehta 
AuthorDate: Wed Apr 3 15:12:19 2019 -0700

PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE (Addendum)
---
 .../phoenix/iterate/ScanningResultIterator.java| 40 +-
 1 file changed, 23 insertions(+), 17 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
index 9a7384b..8a1fe5a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
@@ -17,17 +17,8 @@
  */
 package org.apache.phoenix.iterate;
 
-import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.BYTES_IN_REMOTE_RESULTS_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.BYTES_IN_RESULTS_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.MILLIS_BETWEEN_NEXTS_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.NOT_SERVING_REGION_EXCEPTION_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.REGIONS_SCANNED_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.REMOTE_RPC_CALLS_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.REMOTE_RPC_RETRIES_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.RPC_CALLS_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.RPC_RETRIES_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_FILTERED_KEY;
+import static 
org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_SCANNED_KEY;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_BYTES_IN_REMOTE_RESULTS;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_BYTES_REGION_SERVER_RESULTS;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_MILLS_BETWEEN_NEXTS;
@@ -49,6 +40,7 @@ import java.util.Map;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
 import org.apache.phoenix.monitoring.CombinableMetric;
 import org.apache.phoenix.monitoring.GlobalClientMetrics;
 import org.apache.phoenix.monitoring.ScanMetricsHolder;
@@ -58,12 +50,26 @@ import org.apache.phoenix.util.ServerUtil;
 
 public class ScanningResultIterator implements ResultIterator {
 private final ResultScanner scanner;
+private final Scan scan;
 private final ScanMetricsHolder scanMetricsHolder;
 boolean scanMetricsUpdated;
 boolean scanMetricsEnabled;
 
+// These metric names are how HBase refers them
+// Since HBase stores these strings as static final, we are using the same 
here
+static final String RPC_CALLS_METRIC_NAME = "RPC_CALLS";
+static final String REMOTE_RPC_CALLS_METRIC_NAME = "REMOTE_RPC_CALLS";
+static final String MILLIS_BETWEEN_NEXTS_METRIC_NAME = 
"MILLIS_BETWEEN_NEXTS";
+static final String NOT_SERVING_REGION_EXCEPTION_METRIC_NAME = 
"NOT_SERVING_REGION_EXCEPTION";
+static final String BYTES_IN_RESULTS_METRIC_NAME = "BYTES_IN_RESULTS";
+static final String BYTES_IN_REMOTE_RESULTS_METRIC_NAME = 
"BYTES_IN_REMOTE_RESULTS";
+static final String REGIONS_SCANNED_METRIC_NAME = "REGIONS_SCANNED";
+static final String RPC_RETRIES_METRIC_NAME = "RPC_RETRIES";
+static final String REMOTE_RPC_RETRIES_METRIC_NAME = "REMOTE_RPC_RETRIES";
+
 public ScanningResultIterator(ResultScanner scanner, Scan scan, 
ScanMetricsHolder scanMetricsHolder) {
 this.scanner = scanner;
+this.scan = scan;
 this.scanMetricsHolder = scanMetricsHolder;
 scanMetricsUpdated = false;
 scanMetricsEnabled = scan.isScanMetricsEnabled();
@@ -90,7 +96,7 @@ public class ScanningResultIterator implements ResultIterator 
{
 private void getScanMetrics() {
 
 if (scanMetricsEnabled && !scanMetricsUpdated) {
-ScanMetrics scanMetrics = scanner.getScanMetrics();
+ScanMetrics scanMetrics = scan.getScanMetrics();
 Map scanMetricsMap = scanMetrics.getMetricsMap(

[phoenix] 01/02: PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE

2019-04-21 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 22501c514e822a0ac187cac3d89f3c442c31bfd4
Author: Karan Mehta 
AuthorDate: Wed Apr 3 11:34:26 2019 -0700

PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE
---
 .../phoenix/iterate/ScanningResultIterator.java| 83 --
 1 file changed, 47 insertions(+), 36 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
index 9a31238..9a7384b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
@@ -17,6 +17,17 @@
  */
 package org.apache.phoenix.iterate;
 
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.BYTES_IN_REMOTE_RESULTS_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.BYTES_IN_RESULTS_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.MILLIS_BETWEEN_NEXTS_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.NOT_SERVING_REGION_EXCEPTION_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.REGIONS_SCANNED_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.REMOTE_RPC_CALLS_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.REMOTE_RPC_RETRIES_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.RPC_CALLS_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.RPC_RETRIES_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_BYTES_IN_REMOTE_RESULTS;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_BYTES_REGION_SERVER_RESULTS;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_MILLS_BETWEEN_NEXTS;
@@ -39,6 +50,7 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.phoenix.monitoring.CombinableMetric;
+import org.apache.phoenix.monitoring.GlobalClientMetrics;
 import org.apache.phoenix.monitoring.ScanMetricsHolder;
 import org.apache.phoenix.schema.tuple.ResultTuple;
 import org.apache.phoenix.schema.tuple.Tuple;
@@ -46,29 +58,12 @@ import org.apache.phoenix.util.ServerUtil;
 
 public class ScanningResultIterator implements ResultIterator {
 private final ResultScanner scanner;
-private final Scan scan;
 private final ScanMetricsHolder scanMetricsHolder;
 boolean scanMetricsUpdated;
 boolean scanMetricsEnabled;
 
-// These metric names are how HBase refers them
-// Since HBase stores these strings as static final, we are using the same 
here
-static final String RPC_CALLS_METRIC_NAME = "RPC_CALLS";
-static final String REMOTE_RPC_CALLS_METRIC_NAME = "REMOTE_RPC_CALLS";
-static final String MILLIS_BETWEEN_NEXTS_METRIC_NAME = 
"MILLIS_BETWEEN_NEXTS";
-static final String NOT_SERVING_REGION_EXCEPTION_METRIC_NAME = 
"NOT_SERVING_REGION_EXCEPTION";
-static final String BYTES_IN_RESULTS_METRIC_NAME = "BYTES_IN_RESULTS";
-static final String BYTES_IN_REMOTE_RESULTS_METRIC_NAME = 
"BYTES_IN_REMOTE_RESULTS";
-static final String REGIONS_SCANNED_METRIC_NAME = "REGIONS_SCANNED";
-static final String RPC_RETRIES_METRIC_NAME = "RPC_RETRIES";
-static final String REMOTE_RPC_RETRIES_METRIC_NAME = "REMOTE_RPC_RETRIES";
-static final String COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME = "ROWS_SCANNED";
-static final String COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME = 
"ROWS_FILTERED";
-static final String GLOBAL_BYTES_IN_RESULTS_METRIC_NAME = 
"BYTES_IN_RESULTS";
-
 public ScanningResultIterator(ResultScanner scanner, Scan scan, 
ScanMetricsHolder scanMetricsHolder) {
 this.scanner = scanner;
-this.scan = scan;
 this.scanMetricsHolder = scanMetricsHolder;
 scanMetricsUpdated = false;
 scanMetricsEnabled = scan.isScanMetricsEnabled();
@@ -80,20 +75,25 @@ public class ScanningResultIterator implements 
ResultIterator {
 scanner.close();
 }
 
-private static void changeMetric(CombinableMetric metric, Long value) {
+private void changeMetric(CombinableMetric metric, Long value) {
 if(value != null) {

[phoenix] 02/02: PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE (Addendum)

2019-04-21 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 05fbe721f746fac29ed084a7c0e3b41ccbdb4c35
Author: Karan Mehta 
AuthorDate: Wed Apr 3 15:12:19 2019 -0700

PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE (Addendum)
---
 .../phoenix/iterate/ScanningResultIterator.java| 40 +-
 1 file changed, 23 insertions(+), 17 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
index 9a7384b..8a1fe5a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
@@ -17,17 +17,8 @@
  */
 package org.apache.phoenix.iterate;
 
-import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.BYTES_IN_REMOTE_RESULTS_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.BYTES_IN_RESULTS_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.MILLIS_BETWEEN_NEXTS_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.NOT_SERVING_REGION_EXCEPTION_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.REGIONS_SCANNED_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.REMOTE_RPC_CALLS_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.REMOTE_RPC_RETRIES_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.RPC_CALLS_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.RPC_RETRIES_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_FILTERED_KEY;
+import static 
org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_SCANNED_KEY;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_BYTES_IN_REMOTE_RESULTS;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_BYTES_REGION_SERVER_RESULTS;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_MILLS_BETWEEN_NEXTS;
@@ -49,6 +40,7 @@ import java.util.Map;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
 import org.apache.phoenix.monitoring.CombinableMetric;
 import org.apache.phoenix.monitoring.GlobalClientMetrics;
 import org.apache.phoenix.monitoring.ScanMetricsHolder;
@@ -58,12 +50,26 @@ import org.apache.phoenix.util.ServerUtil;
 
 public class ScanningResultIterator implements ResultIterator {
 private final ResultScanner scanner;
+private final Scan scan;
 private final ScanMetricsHolder scanMetricsHolder;
 boolean scanMetricsUpdated;
 boolean scanMetricsEnabled;
 
+// These metric names are how HBase refers them
+// Since HBase stores these strings as static final, we are using the same 
here
+static final String RPC_CALLS_METRIC_NAME = "RPC_CALLS";
+static final String REMOTE_RPC_CALLS_METRIC_NAME = "REMOTE_RPC_CALLS";
+static final String MILLIS_BETWEEN_NEXTS_METRIC_NAME = 
"MILLIS_BETWEEN_NEXTS";
+static final String NOT_SERVING_REGION_EXCEPTION_METRIC_NAME = 
"NOT_SERVING_REGION_EXCEPTION";
+static final String BYTES_IN_RESULTS_METRIC_NAME = "BYTES_IN_RESULTS";
+static final String BYTES_IN_REMOTE_RESULTS_METRIC_NAME = 
"BYTES_IN_REMOTE_RESULTS";
+static final String REGIONS_SCANNED_METRIC_NAME = "REGIONS_SCANNED";
+static final String RPC_RETRIES_METRIC_NAME = "RPC_RETRIES";
+static final String REMOTE_RPC_RETRIES_METRIC_NAME = "REMOTE_RPC_RETRIES";
+
 public ScanningResultIterator(ResultScanner scanner, Scan scan, 
ScanMetricsHolder scanMetricsHolder) {
 this.scanner = scanner;
+this.scan = scan;
 this.scanMetricsHolder = scanMetricsHolder;
 scanMetricsUpdated = false;
 scanMetricsEnabled = scan.isScanMetricsEnabled();
@@ -90,7 +96,7 @@ public class ScanningResultIterator implements ResultIterator 
{
 private void getScanMetrics() {
 
 if (scanMetricsEnabled && !scanMetricsUpdated) {
-ScanMetrics scanMetrics = scanner.getScanMetrics();
+ScanMetrics scanMetrics = scan.getScanMetrics();
 Map scanMetricsMap = scanMetrics.getMetricsMap(

[phoenix] branch 4.14-HBase-1.4 updated (0d9c604 -> f80abb0)

2019-04-21 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a change to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from 0d9c604  PhoenixResultSet#next() closes the result set if scanner 
returns null
 new 22501c5  PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE
 new f80abb0  PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE 
(Addendum)

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../phoenix/iterate/ScanningResultIterator.java| 69 ++
 1 file changed, 43 insertions(+), 26 deletions(-)



[phoenix] branch 4.14-HBase-1.2 updated (6bb66eb -> 01ce1b5)

2019-04-21 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a change to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from 6bb66eb  PhoenixResultSet#next() closes the result set if scanner 
returns null
 new 492084b  PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE
 new 01ce1b5  PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE 
(Addendum)

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../phoenix/iterate/ScanningResultIterator.java| 69 ++
 1 file changed, 43 insertions(+), 26 deletions(-)



[phoenix] 01/02: PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE

2019-04-21 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 492084b9f67ecd54a84bfe39172c62efb49bf3c6
Author: Karan Mehta 
AuthorDate: Wed Apr 3 11:34:26 2019 -0700

PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE
---
 .../phoenix/iterate/ScanningResultIterator.java| 83 --
 1 file changed, 47 insertions(+), 36 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
index 9a31238..9a7384b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
@@ -17,6 +17,17 @@
  */
 package org.apache.phoenix.iterate;
 
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.BYTES_IN_REMOTE_RESULTS_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.BYTES_IN_RESULTS_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.MILLIS_BETWEEN_NEXTS_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.NOT_SERVING_REGION_EXCEPTION_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.REGIONS_SCANNED_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.REMOTE_RPC_CALLS_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.REMOTE_RPC_RETRIES_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.RPC_CALLS_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.RPC_RETRIES_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_BYTES_IN_REMOTE_RESULTS;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_BYTES_REGION_SERVER_RESULTS;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_MILLS_BETWEEN_NEXTS;
@@ -39,6 +50,7 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.phoenix.monitoring.CombinableMetric;
+import org.apache.phoenix.monitoring.GlobalClientMetrics;
 import org.apache.phoenix.monitoring.ScanMetricsHolder;
 import org.apache.phoenix.schema.tuple.ResultTuple;
 import org.apache.phoenix.schema.tuple.Tuple;
@@ -46,29 +58,12 @@ import org.apache.phoenix.util.ServerUtil;
 
 public class ScanningResultIterator implements ResultIterator {
 private final ResultScanner scanner;
-private final Scan scan;
 private final ScanMetricsHolder scanMetricsHolder;
 boolean scanMetricsUpdated;
 boolean scanMetricsEnabled;
 
-// These metric names are how HBase refers them
-// Since HBase stores these strings as static final, we are using the same 
here
-static final String RPC_CALLS_METRIC_NAME = "RPC_CALLS";
-static final String REMOTE_RPC_CALLS_METRIC_NAME = "REMOTE_RPC_CALLS";
-static final String MILLIS_BETWEEN_NEXTS_METRIC_NAME = 
"MILLIS_BETWEEN_NEXTS";
-static final String NOT_SERVING_REGION_EXCEPTION_METRIC_NAME = 
"NOT_SERVING_REGION_EXCEPTION";
-static final String BYTES_IN_RESULTS_METRIC_NAME = "BYTES_IN_RESULTS";
-static final String BYTES_IN_REMOTE_RESULTS_METRIC_NAME = 
"BYTES_IN_REMOTE_RESULTS";
-static final String REGIONS_SCANNED_METRIC_NAME = "REGIONS_SCANNED";
-static final String RPC_RETRIES_METRIC_NAME = "RPC_RETRIES";
-static final String REMOTE_RPC_RETRIES_METRIC_NAME = "REMOTE_RPC_RETRIES";
-static final String COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME = "ROWS_SCANNED";
-static final String COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME = 
"ROWS_FILTERED";
-static final String GLOBAL_BYTES_IN_RESULTS_METRIC_NAME = 
"BYTES_IN_RESULTS";
-
 public ScanningResultIterator(ResultScanner scanner, Scan scan, 
ScanMetricsHolder scanMetricsHolder) {
 this.scanner = scanner;
-this.scan = scan;
 this.scanMetricsHolder = scanMetricsHolder;
 scanMetricsUpdated = false;
 scanMetricsEnabled = scan.isScanMetricsEnabled();
@@ -80,20 +75,25 @@ public class ScanningResultIterator implements 
ResultIterator {
 scanner.close();
 }
 
-private static void changeMetric(CombinableMetric metric, Long value) {
+private void changeMetric(CombinableMetric metric, Long value) {
 if(value != null) {

[phoenix] 02/02: PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE (Addendum)

2019-04-21 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 01ce1b53322b9560d28f02740d624e20a35b61da
Author: Karan Mehta 
AuthorDate: Wed Apr 3 15:12:19 2019 -0700

PHOENIX-5101 ScanningResultIterator getScanMetrics throws NPE (Addendum)
---
 .../phoenix/iterate/ScanningResultIterator.java| 40 +-
 1 file changed, 23 insertions(+), 17 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
index 9a7384b..8a1fe5a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
@@ -17,17 +17,8 @@
  */
 package org.apache.phoenix.iterate;
 
-import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.BYTES_IN_REMOTE_RESULTS_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.BYTES_IN_RESULTS_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.MILLIS_BETWEEN_NEXTS_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.NOT_SERVING_REGION_EXCEPTION_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.REGIONS_SCANNED_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.REMOTE_RPC_CALLS_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.REMOTE_RPC_RETRIES_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.RPC_CALLS_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ScanMetrics.RPC_RETRIES_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME;
-import static 
org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME;
+import static 
org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_FILTERED_KEY;
+import static 
org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_SCANNED_KEY;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_BYTES_IN_REMOTE_RESULTS;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_BYTES_REGION_SERVER_RESULTS;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_MILLS_BETWEEN_NEXTS;
@@ -49,6 +40,7 @@ import java.util.Map;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
 import org.apache.phoenix.monitoring.CombinableMetric;
 import org.apache.phoenix.monitoring.GlobalClientMetrics;
 import org.apache.phoenix.monitoring.ScanMetricsHolder;
@@ -58,12 +50,26 @@ import org.apache.phoenix.util.ServerUtil;
 
 public class ScanningResultIterator implements ResultIterator {
 private final ResultScanner scanner;
+private final Scan scan;
 private final ScanMetricsHolder scanMetricsHolder;
 boolean scanMetricsUpdated;
 boolean scanMetricsEnabled;
 
+// These metric names are how HBase refers them
+// Since HBase stores these strings as static final, we are using the same 
here
+static final String RPC_CALLS_METRIC_NAME = "RPC_CALLS";
+static final String REMOTE_RPC_CALLS_METRIC_NAME = "REMOTE_RPC_CALLS";
+static final String MILLIS_BETWEEN_NEXTS_METRIC_NAME = 
"MILLIS_BETWEEN_NEXTS";
+static final String NOT_SERVING_REGION_EXCEPTION_METRIC_NAME = 
"NOT_SERVING_REGION_EXCEPTION";
+static final String BYTES_IN_RESULTS_METRIC_NAME = "BYTES_IN_RESULTS";
+static final String BYTES_IN_REMOTE_RESULTS_METRIC_NAME = 
"BYTES_IN_REMOTE_RESULTS";
+static final String REGIONS_SCANNED_METRIC_NAME = "REGIONS_SCANNED";
+static final String RPC_RETRIES_METRIC_NAME = "RPC_RETRIES";
+static final String REMOTE_RPC_RETRIES_METRIC_NAME = "REMOTE_RPC_RETRIES";
+
 public ScanningResultIterator(ResultScanner scanner, Scan scan, 
ScanMetricsHolder scanMetricsHolder) {
 this.scanner = scanner;
+this.scan = scan;
 this.scanMetricsHolder = scanMetricsHolder;
 scanMetricsUpdated = false;
 scanMetricsEnabled = scan.isScanMetricsEnabled();
@@ -90,7 +96,7 @@ public class ScanningResultIterator implements ResultIterator 
{
 private void getScanMetrics() {
 
 if (scanMetricsEnabled && !scanMetricsUpdated) {
-ScanMetrics scanMetrics = scanner.getScanMetrics();
+ScanMetrics scanMetrics = scan.getScanMetrics();
 Map scanMetricsMap = scanMetrics.getMetricsMap(

svn commit: r33695 - /dev/phoenix/KEYS

2019-04-19 Thread tdsilva
Author: tdsilva
Date: Sat Apr 20 05:35:22 2019
New Revision: 33695

Log:
Add Thomas D'Silva (CODE SIGNING KEY)

Modified:
dev/phoenix/KEYS

Modified: dev/phoenix/KEYS
==
--- dev/phoenix/KEYS (original)
+++ dev/phoenix/KEYS Sat Apr 20 05:35:22 2019
@@ -648,3 +648,61 @@ PIhgnAndAr32ZDjWXidjDcS4asR+9RDKJ+lVJ6Dg
 YJ+l5z2VdaPLR4VfFvty7PGe2H0=
 =gf+H
 -END PGP PUBLIC KEY BLOCK-
+pub   4096R/DFD86C02 2018-09-28
+uid  Thomas D'Silva 
+sig 3DFD86C02 2018-09-28  Thomas D'Silva 
+sub   4096R/EFEBE302 2018-09-28
+sig  DFD86C02 2018-09-28  Thomas D'Silva 
+
+-BEGIN PGP PUBLIC KEY BLOCK-
+Version: GnuPG v1
+
+mQINBFuueLwBEADE9BfyLawEA2NZS7belQHUuvv6W/+iH6qik1+Ng9r43Clx8Q2N
+LL9iZjoT9kiMLT7SFwPW/1FNfMWGnAyyCHQUN62hmmytgDX4S7HbR6hHyLx7H5wf
+FbmBykjAK/TJZ1be1pQmbCd/xxy5FBtQz8oVOC9NGtW5Pla7lI33R8Co3l7PfmG/
+FPGEJcmUeRhVsiL7JjOk26YVxupQNKacjH705mNm3n5CPQ7NkV71zRr5dPZdT0pS
+I/lHzYSdoJrqUMIbM5m7nFxHf4MYW7/rGAlcJE9puZOMk2TNbcIcaq2Qosqklttj
+NAsSTGMgJB4SvPUC/OQnS8hqu8PKnXerd23+dZLN+tuY6Gg/Otjuth5+u6q1MaP6
+uwDJfpEqFtM2pBZR/zuPNlfODBSarggXIGVD6Hor4iTZJ8NpXVSpdQsviRLYztQG
+BJHVwL0Yq4Ykg2GgMdep+DoVFimaiwU4i+gDhvpDRKb8wWIx5cRgjn6RfZXcW9Ha
+6+CxX6sa0Yf57+vcBZyRa20/mSocRNgAZ/umvgd9djHHxlRyXYLoOIQRlUGHhuJX
+V7oHbvi5Rw+EO0Fe190H0eEw0k7Qa92YhK1bgwQvIgJr7b0x2zACrsNl0cLyKnp4
+OCtgSj7mZbn0UiUDaxO8GZohZGXDT0lS8uy1vRFtWVvyGY0YZVUnxJm0BwARAQAB
+tCdUaG9tYXMgRCdTaWx2YSA8dGRzaWx2YUBzYWxlc2ZvcmNlLmNvbT6JAjkEEwEC
+ACMFAluueLwCGwMHCwkIBwMCAQYVCAIJCgsEFgIDAQIeAQIXgAAKCRBpHGYc39hs
+AoZxEAC+c8omGqRHeHAmspEBugsVtjYNithkKcMEn3emwhk8VKYEHj+BMxw+ivuB
+EzideUQp2KF9FXgsKrH8L2F7ilKTHcrebs+eUPdBwBd1PJAh5zFivUIlM+LLHWTi
+0p2xJ7XXSCBmpXpHpBOrdYMtD9ifsy4rMF3s5F8yVpRHxivVyFpxN78sdbA2mWkW
+UofGeqrhXmHJErRhUADZmRyUvqX+MQVlAaVG3J6C40jFfYBWyj3bzhulAPg0mpRa
+xImLwvJa9/twFnYOctaaWoiLXFI6atyXIhNdiM+t6ZwezX2RlOJlQedQrp+fl10r
+dcgkbSYJQjT0HLxcbNsv8lMRpbQUAu4RsWw0XdG/yWFGdGGmns8pf8ueYqwmyl5D
++FEz/XdaRG2kYmYUIZZ/IrYP55tmcU7+M79LpGELmdDBQ9wEJRoKgpM0DLcxu71X
+5IGDI8KvS/MVKUFsG1uD41Z61aHj26ymEtZx7FbqVmUxgIypsDJNJ7iZht0MS9Tm
+OzuWmJSa/lO0AXJO4lTFiyHzMyZ8xQT5fwi07pG1EqOlF8w8/BnSuKA83HPrNGcB
+VhrnYtzGGeyc3aMzVjhB3ZTUFujI8y+Mx6aIf77OSyNVlDNrdq2YLerlZyg9y6ze
+OBeMw1p0kRdEvm1vFIw66RPniG658eIFfl4PRI6VsUx+e+QjxbkCDQRbrni8ARAA
+q3XRWRerbv/h4eFEPjZl+X9dElOrWdcPs/rUgzj2oFNtdcmPiDfDCcyGKeE7ojJE
+DxoJV5rcjRsKya5+fHStiqOyXU3tV/NgARoClFJ4C1jpkHxQgkfQSq9IabukNBic
+Yb7Kl9VDBpHeX+A/5ZhffYzcPPE4LB5MKv2bYQtJBMumKB+5vidBCc8a3CGbApS7
+ZKtnY0eaBl1+BldLbbLtzvZVPvEE0cL0yAaB7+SRU7uIg8BVSZnRIzl/i4OtJlpJ
+GsWCKCf/YwF/uCjtpiEJhl2s1pZSvt17HEkduyDDoNZj9ajZTxx51TD0ZOQ+KVCZ
+mjbZVZJ6Tpi2aA6Lam0C5eyWNQrIenJDZ6rIB9Q2MYtr83gdaGneQbJpzAEUbUv4
+h8JSWotbb5LBUGEG2dYP07KwLbX4x47XftMX2nBbLumRWmFsVapocrDQqWDsUat6
+hOHaAYTYkRIX3gXbxbBVwQBeEFDGBF0PyAjQVwwIOfPASZJFdYvwaxVR2mWcGa98
+deOhf+fwd2uj2uT33ZYyCiPXb/c5x9Vxed+DwGNLbV7mFQFfAAVXFLADc1VFlwCB
+IpKA60koPM6S/Zm57Zs76BmpAj/zhPCrL424fphtGvzC6VQh66A8nSSlGLxk+7JM
+WHsU0NscDZb4ucNTwTNZQ8PoVQ7KpdEf6Ivv2/ZpqssAEQEAAYkCHwQYAQIACQUC
+W654vAIbDAAKCRBpHGYc39hsAjoXEACfhdUtculy8j0Up6vfn8O0Tq9ewcUxmq7z
+iE/mkCQ6fhaVglIJKZfHDIa5UahpVdk/EAb7lh6V0kDcVGrfm/3NMKpg5xWQXHm/
+WyKK4kKtV8KM+CHCSe0AOGGZhPAPmt9jUW0S6CbwO55I8JLiIFvwZ5cewE4uMoL5
+WnYGLxMtElUTcUWCOrySY2PdyzgX9pOv4ZDpYo5MlG2xd4qTYR3swnAHizfuTnNr
+QpDwmnaviVvCZwfMSlE83E+8dDrFDyr74egLd68XRUKEJnrX1KNJmZW/HDud5d6z
+wDDSA8jtqKLs+EUZtJqP/zpM11sYsq7P6ABpgw+s32u5GopZ8MQNiFyKOVpInZqc
+M2T4lO69BEF1T3F8jFH7MIbyDpTxJgqCmlEhM111pxh/z7yxQm//5TyGKlXgXNjD
+SCN8DVYpieUMfbsmAZDbg2b2faTYBNoINOF5B2HsnFAO336+aSre5lrhHIv5gYuQ
+QtBiGNnTJIMHus+JGfdk5ZY7Akhgb0xf5ZVqNE1KPhoGhutCUPdJ8vxoB6LUiNa3
+oc7Cg+GcyrS3+2AjZCpwg/jpYbfdNOoX60Qf7FYY1Hi08g9YzivV5S0DaQWgHEGJ
+Zlf0WWO0W8ULaPmNd4XK/oHzxdyR7OPc0LM22VaLwQFwTS3yUMbgvxt/I51XYzil
+Fx6bhHulGA==
+=PgYX
+-END PGP PUBLIC KEY BLOCK-




[phoenix] branch master updated: PhoenixResultSet#next() closes the result set if scanner returns null

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new b4697af  PhoenixResultSet#next() closes the result set if scanner 
returns null
b4697af is described below

commit b4697afc3f23ca29ddd87c8708097cd68f91286d
Author: s.kadam 
AuthorDate: Thu Apr 18 14:05:21 2019 -0700

PhoenixResultSet#next() closes the result set if scanner returns null
---
 .../org/apache/phoenix/end2end/QueryLoggerIT.java  | 193 +++--
 .../org/apache/phoenix/jdbc/PhoenixResultSet.java  |   4 +-
 2 files changed, 102 insertions(+), 95 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java
index 208eddd..8a08d37 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java
@@ -103,44 +103,47 @@ public class QueryLoggerIT extends 
BaseUniqueNamesOwnClusterIT {
 Connection conn = DriverManager.getConnection(getUrl(),props);
 
assertEquals(conn.unwrap(PhoenixConnection.class).getLogLevel(),LogLevel.DEBUG);
 String query = "SELECT * FROM " + tableName;
-ResultSet rs = conn.createStatement().executeQuery(query);
-StatementContext context = ((PhoenixResultSet)rs).getContext();
-String queryId = context.getQueryLogger().getQueryId();
-while (rs.next()) {
-rs.getString(1);
-rs.getString(2);
+StatementContext context;
+try (ResultSet rs = conn.createStatement().executeQuery(query)) {
+context = ((PhoenixResultSet) rs).getContext();
+while (rs.next()) {
+rs.getString(1);
+rs.getString(2);
+}
 }
-ResultSet explainRS = conn.createStatement().executeQuery("Explain " + 
query);
+String queryId = context.getQueryLogger().getQueryId();
 
 String logQuery = "SELECT * FROM " + SYSTEM_CATALOG_SCHEMA + ".\"" + 
SYSTEM_LOG_TABLE + "\"";
 int delay = 5000;
 
 // sleep for sometime to let query log committed
 Thread.sleep(delay);
-rs = conn.createStatement().executeQuery(logQuery);
-boolean foundQueryLog = false;
+try (ResultSet explainRS = 
conn.createStatement().executeQuery("Explain " + query);
+ ResultSet rs = conn.createStatement().executeQuery(logQuery)) {
+boolean foundQueryLog = false;
 
-while (rs.next()) {
-if (rs.getString(QUERY_ID).equals(queryId)) {
-foundQueryLog = true;
-assertEquals(rs.getString(BIND_PARAMETERS), null);
-assertEquals(rs.getString(USER), 
System.getProperty("user.name"));
-assertEquals(rs.getString(CLIENT_IP), 
InetAddress.getLocalHost().getHostAddress());
-assertEquals(rs.getString(EXPLAIN_PLAN), 
QueryUtil.getExplainPlan(explainRS));
-assertEquals(rs.getString(GLOBAL_SCAN_DETAILS), 
context.getScan().toJSON());
-assertEquals(rs.getLong(NO_OF_RESULTS_ITERATED), 10);
-assertEquals(rs.getString(QUERY), query);
-assertEquals(rs.getString(QUERY_STATUS), 
QueryStatus.COMPLETED.toString());
-assertEquals(rs.getString(TENANT_ID), null);
-assertTrue(rs.getString(SCAN_METRICS_JSON)==null);
-assertEquals(rs.getString(EXCEPTION_TRACE),null);
-}else{
-//confirm we are not logging system queries
-
assertFalse(rs.getString(QUERY).toString().contains(SYSTEM_CATALOG_SCHEMA));
+while (rs.next()) {
+if (rs.getString(QUERY_ID).equals(queryId)) {
+foundQueryLog = true;
+assertEquals(rs.getString(BIND_PARAMETERS), null);
+assertEquals(rs.getString(USER), 
System.getProperty("user.name"));
+assertEquals(rs.getString(CLIENT_IP), 
InetAddress.getLocalHost().getHostAddress());
+assertEquals(rs.getString(EXPLAIN_PLAN), 
QueryUtil.getExplainPlan(explainRS));
+assertEquals(rs.getString(GLOBAL_SCAN_DETAILS), 
context.getScan().toJSON());
+assertEquals(rs.getLong(NO_OF_RESULTS_ITERATED), 10);
+assertEquals(rs.getString(QUERY), query);
+assertEquals(rs.getString(QUERY_STATUS), 
QueryStatus.COMPLETED.toString());
+assertEquals(rs.getString(TENANT_ID), null);
+assertTrue(rs.getString(SCAN_METRICS_JSON) == null);
+assertEquals(rs.getString(EXCEPTION_TRACE), null);

[phoenix] branch 4.14-HBase-1.4 updated: PhoenixResultSet#next() closes the result set if scanner returns null

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.4 by this push:
 new 0d9c604  PhoenixResultSet#next() closes the result set if scanner 
returns null
0d9c604 is described below

commit 0d9c604c64805c7e4f229b09461d05f1856b1d7c
Author: s.kadam 
AuthorDate: Thu Apr 18 14:05:21 2019 -0700

PhoenixResultSet#next() closes the result set if scanner returns null
---
 .../org/apache/phoenix/end2end/QueryLoggerIT.java  | 193 +++--
 .../org/apache/phoenix/jdbc/PhoenixResultSet.java  |   4 +-
 2 files changed, 102 insertions(+), 95 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java
index 618d7d9..08cb0c1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java
@@ -101,44 +101,47 @@ public class QueryLoggerIT extends 
BaseUniqueNamesOwnClusterIT {
 Connection conn = DriverManager.getConnection(getUrl(),props);
 
assertEquals(conn.unwrap(PhoenixConnection.class).getLogLevel(),LogLevel.DEBUG);
 String query = "SELECT * FROM " + tableName;
-ResultSet rs = conn.createStatement().executeQuery(query);
-StatementContext context = ((PhoenixResultSet)rs).getContext();
-String queryId = context.getQueryLogger().getQueryId();
-while (rs.next()) {
-rs.getString(1);
-rs.getString(2);
+StatementContext context;
+try (ResultSet rs = conn.createStatement().executeQuery(query)) {
+context = ((PhoenixResultSet) rs).getContext();
+while (rs.next()) {
+rs.getString(1);
+rs.getString(2);
+}
 }
-ResultSet explainRS = conn.createStatement().executeQuery("Explain " + 
query);
+String queryId = context.getQueryLogger().getQueryId();
 
 String logQuery = "SELECT * FROM " + SYSTEM_CATALOG_SCHEMA + ".\"" + 
SYSTEM_LOG_TABLE + "\"";
 int delay = 5000;
 
 // sleep for sometime to let query log committed
 Thread.sleep(delay);
-rs = conn.createStatement().executeQuery(logQuery);
-boolean foundQueryLog = false;
+try (ResultSet explainRS = 
conn.createStatement().executeQuery("Explain " + query);
+ ResultSet rs = conn.createStatement().executeQuery(logQuery)) {
+boolean foundQueryLog = false;
 
-while (rs.next()) {
-if (rs.getString(QUERY_ID).equals(queryId)) {
-foundQueryLog = true;
-assertEquals(rs.getString(BIND_PARAMETERS), null);
-assertEquals(rs.getString(USER), 
System.getProperty("user.name"));
-assertEquals(rs.getString(CLIENT_IP), 
InetAddress.getLocalHost().getHostAddress());
-assertEquals(rs.getString(EXPLAIN_PLAN), 
QueryUtil.getExplainPlan(explainRS));
-assertEquals(rs.getString(GLOBAL_SCAN_DETAILS), 
context.getScan().toJSON());
-assertEquals(rs.getLong(NO_OF_RESULTS_ITERATED), 10);
-assertEquals(rs.getString(QUERY), query);
-assertEquals(rs.getString(QUERY_STATUS), 
QueryStatus.COMPLETED.toString());
-assertEquals(rs.getString(TENANT_ID), null);
-assertTrue(rs.getString(SCAN_METRICS_JSON)==null);
-assertEquals(rs.getString(EXCEPTION_TRACE),null);
-}else{
-//confirm we are not logging system queries
-
assertFalse(rs.getString(QUERY).toString().contains(SYSTEM_CATALOG_SCHEMA));
+while (rs.next()) {
+if (rs.getString(QUERY_ID).equals(queryId)) {
+foundQueryLog = true;
+assertEquals(rs.getString(BIND_PARAMETERS), null);
+assertEquals(rs.getString(USER), 
System.getProperty("user.name"));
+assertEquals(rs.getString(CLIENT_IP), 
InetAddress.getLocalHost().getHostAddress());
+assertEquals(rs.getString(EXPLAIN_PLAN), 
QueryUtil.getExplainPlan(explainRS));
+assertEquals(rs.getString(GLOBAL_SCAN_DETAILS), 
context.getScan().toJSON());
+assertEquals(rs.getLong(NO_OF_RESULTS_ITERATED), 10);
+assertEquals(rs.getString(QUERY), query);
+assertEquals(rs.getString(QUERY_STATUS), 
QueryStatus.COMPLETED.toString());
+assertEquals(rs.getString(TENANT_ID), null);
+assertTrue(rs.getString(SCAN_METRICS_JSON) == null);
+assertEquals(rs.getString(EXCEPTION_

[phoenix] branch 4.x-HBase-1.2 updated: PhoenixResultSet#next() closes the result set if scanner returns null

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.x-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.2 by this push:
 new 5a57358  PhoenixResultSet#next() closes the result set if scanner 
returns null
5a57358 is described below

commit 5a57358117ee7733aeace41ff172153473884133
Author: s.kadam 
AuthorDate: Thu Apr 18 14:05:21 2019 -0700

PhoenixResultSet#next() closes the result set if scanner returns null
---
 .../org/apache/phoenix/end2end/QueryLoggerIT.java  | 193 +++--
 .../org/apache/phoenix/jdbc/PhoenixResultSet.java  |   4 +-
 2 files changed, 102 insertions(+), 95 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java
index f3793f9..86d5c0f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java
@@ -103,44 +103,47 @@ public class QueryLoggerIT extends 
BaseUniqueNamesOwnClusterIT {
 Connection conn = DriverManager.getConnection(getUrl(),props);
 
assertEquals(conn.unwrap(PhoenixConnection.class).getLogLevel(),LogLevel.DEBUG);
 String query = "SELECT * FROM " + tableName;
-ResultSet rs = conn.createStatement().executeQuery(query);
-StatementContext context = ((PhoenixResultSet)rs).getContext();
-String queryId = context.getQueryLogger().getQueryId();
-while (rs.next()) {
-rs.getString(1);
-rs.getString(2);
+StatementContext context;
+try (ResultSet rs = conn.createStatement().executeQuery(query)) {
+context = ((PhoenixResultSet) rs).getContext();
+while (rs.next()) {
+rs.getString(1);
+rs.getString(2);
+}
 }
-ResultSet explainRS = conn.createStatement().executeQuery("Explain " + 
query);
+String queryId = context.getQueryLogger().getQueryId();
 
 String logQuery = "SELECT * FROM " + SYSTEM_CATALOG_SCHEMA + ".\"" + 
SYSTEM_LOG_TABLE + "\"";
 int delay = 5000;
 
 // sleep for sometime to let query log committed
 Thread.sleep(delay);
-rs = conn.createStatement().executeQuery(logQuery);
-boolean foundQueryLog = false;
+try (ResultSet explainRS = 
conn.createStatement().executeQuery("Explain " + query);
+ ResultSet rs = conn.createStatement().executeQuery(logQuery)) {
+boolean foundQueryLog = false;
 
-while (rs.next()) {
-if (rs.getString(QUERY_ID).equals(queryId)) {
-foundQueryLog = true;
-assertEquals(rs.getString(BIND_PARAMETERS), null);
-assertEquals(rs.getString(USER), 
System.getProperty("user.name"));
-assertEquals(rs.getString(CLIENT_IP), 
InetAddress.getLocalHost().getHostAddress());
-assertEquals(rs.getString(EXPLAIN_PLAN), 
QueryUtil.getExplainPlan(explainRS));
-assertEquals(rs.getString(GLOBAL_SCAN_DETAILS), 
context.getScan().toJSON());
-assertEquals(rs.getLong(NO_OF_RESULTS_ITERATED), 10);
-assertEquals(rs.getString(QUERY), query);
-assertEquals(rs.getString(QUERY_STATUS), 
QueryStatus.COMPLETED.toString());
-assertEquals(rs.getString(TENANT_ID), null);
-assertTrue(rs.getString(SCAN_METRICS_JSON)==null);
-assertEquals(rs.getString(EXCEPTION_TRACE),null);
-}else{
-//confirm we are not logging system queries
-
assertFalse(rs.getString(QUERY).toString().contains(SYSTEM_CATALOG_SCHEMA));
+while (rs.next()) {
+if (rs.getString(QUERY_ID).equals(queryId)) {
+foundQueryLog = true;
+assertEquals(rs.getString(BIND_PARAMETERS), null);
+assertEquals(rs.getString(USER), 
System.getProperty("user.name"));
+assertEquals(rs.getString(CLIENT_IP), 
InetAddress.getLocalHost().getHostAddress());
+assertEquals(rs.getString(EXPLAIN_PLAN), 
QueryUtil.getExplainPlan(explainRS));
+assertEquals(rs.getString(GLOBAL_SCAN_DETAILS), 
context.getScan().toJSON());
+assertEquals(rs.getLong(NO_OF_RESULTS_ITERATED), 10);
+assertEquals(rs.getString(QUERY), query);
+assertEquals(rs.getString(QUERY_STATUS), 
QueryStatus.COMPLETED.toString());
+assertEquals(rs.getString(TENANT_ID), null);
+assertTrue(rs.getString(SCAN_METRICS_JSON) == null);
+assertEquals(rs.getString(EXCEPTION_

[phoenix] branch 4.14-HBase-1.3 updated: PhoenixResultSet#next() closes the result set if scanner returns null

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.3 by this push:
 new 563c8f9  PhoenixResultSet#next() closes the result set if scanner 
returns null
563c8f9 is described below

commit 563c8f9d4c7c477e82eff05f7e9e26aa560032d5
Author: s.kadam 
AuthorDate: Thu Apr 18 14:05:21 2019 -0700

PhoenixResultSet#next() closes the result set if scanner returns null
---
 .../org/apache/phoenix/end2end/QueryLoggerIT.java  | 193 +++--
 .../org/apache/phoenix/jdbc/PhoenixResultSet.java  |   4 +-
 2 files changed, 102 insertions(+), 95 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java
index 618d7d9..08cb0c1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java
@@ -101,44 +101,47 @@ public class QueryLoggerIT extends 
BaseUniqueNamesOwnClusterIT {
 Connection conn = DriverManager.getConnection(getUrl(),props);
 
assertEquals(conn.unwrap(PhoenixConnection.class).getLogLevel(),LogLevel.DEBUG);
 String query = "SELECT * FROM " + tableName;
-ResultSet rs = conn.createStatement().executeQuery(query);
-StatementContext context = ((PhoenixResultSet)rs).getContext();
-String queryId = context.getQueryLogger().getQueryId();
-while (rs.next()) {
-rs.getString(1);
-rs.getString(2);
+StatementContext context;
+try (ResultSet rs = conn.createStatement().executeQuery(query)) {
+context = ((PhoenixResultSet) rs).getContext();
+while (rs.next()) {
+rs.getString(1);
+rs.getString(2);
+}
 }
-ResultSet explainRS = conn.createStatement().executeQuery("Explain " + 
query);
+String queryId = context.getQueryLogger().getQueryId();
 
 String logQuery = "SELECT * FROM " + SYSTEM_CATALOG_SCHEMA + ".\"" + 
SYSTEM_LOG_TABLE + "\"";
 int delay = 5000;
 
 // sleep for sometime to let query log committed
 Thread.sleep(delay);
-rs = conn.createStatement().executeQuery(logQuery);
-boolean foundQueryLog = false;
+try (ResultSet explainRS = 
conn.createStatement().executeQuery("Explain " + query);
+ ResultSet rs = conn.createStatement().executeQuery(logQuery)) {
+boolean foundQueryLog = false;
 
-while (rs.next()) {
-if (rs.getString(QUERY_ID).equals(queryId)) {
-foundQueryLog = true;
-assertEquals(rs.getString(BIND_PARAMETERS), null);
-assertEquals(rs.getString(USER), 
System.getProperty("user.name"));
-assertEquals(rs.getString(CLIENT_IP), 
InetAddress.getLocalHost().getHostAddress());
-assertEquals(rs.getString(EXPLAIN_PLAN), 
QueryUtil.getExplainPlan(explainRS));
-assertEquals(rs.getString(GLOBAL_SCAN_DETAILS), 
context.getScan().toJSON());
-assertEquals(rs.getLong(NO_OF_RESULTS_ITERATED), 10);
-assertEquals(rs.getString(QUERY), query);
-assertEquals(rs.getString(QUERY_STATUS), 
QueryStatus.COMPLETED.toString());
-assertEquals(rs.getString(TENANT_ID), null);
-assertTrue(rs.getString(SCAN_METRICS_JSON)==null);
-assertEquals(rs.getString(EXCEPTION_TRACE),null);
-}else{
-//confirm we are not logging system queries
-
assertFalse(rs.getString(QUERY).toString().contains(SYSTEM_CATALOG_SCHEMA));
+while (rs.next()) {
+if (rs.getString(QUERY_ID).equals(queryId)) {
+foundQueryLog = true;
+assertEquals(rs.getString(BIND_PARAMETERS), null);
+assertEquals(rs.getString(USER), 
System.getProperty("user.name"));
+assertEquals(rs.getString(CLIENT_IP), 
InetAddress.getLocalHost().getHostAddress());
+assertEquals(rs.getString(EXPLAIN_PLAN), 
QueryUtil.getExplainPlan(explainRS));
+assertEquals(rs.getString(GLOBAL_SCAN_DETAILS), 
context.getScan().toJSON());
+assertEquals(rs.getLong(NO_OF_RESULTS_ITERATED), 10);
+assertEquals(rs.getString(QUERY), query);
+assertEquals(rs.getString(QUERY_STATUS), 
QueryStatus.COMPLETED.toString());
+assertEquals(rs.getString(TENANT_ID), null);
+assertTrue(rs.getString(SCAN_METRICS_JSON) == null);
+assertEquals(rs.getString(EXCEPTION_

[phoenix] branch 4.14-HBase-1.2 updated: PhoenixResultSet#next() closes the result set if scanner returns null

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.2 by this push:
 new 6bb66eb  PhoenixResultSet#next() closes the result set if scanner 
returns null
6bb66eb is described below

commit 6bb66eb5565466a20ba09490cd082c144743c051
Author: s.kadam 
AuthorDate: Thu Apr 18 14:05:21 2019 -0700

PhoenixResultSet#next() closes the result set if scanner returns null
---
 .../org/apache/phoenix/end2end/QueryLoggerIT.java  | 193 +++--
 .../org/apache/phoenix/jdbc/PhoenixResultSet.java  |   4 +-
 2 files changed, 102 insertions(+), 95 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java
index 618d7d9..08cb0c1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryLoggerIT.java
@@ -101,44 +101,47 @@ public class QueryLoggerIT extends 
BaseUniqueNamesOwnClusterIT {
 Connection conn = DriverManager.getConnection(getUrl(),props);
 
assertEquals(conn.unwrap(PhoenixConnection.class).getLogLevel(),LogLevel.DEBUG);
 String query = "SELECT * FROM " + tableName;
-ResultSet rs = conn.createStatement().executeQuery(query);
-StatementContext context = ((PhoenixResultSet)rs).getContext();
-String queryId = context.getQueryLogger().getQueryId();
-while (rs.next()) {
-rs.getString(1);
-rs.getString(2);
+StatementContext context;
+try (ResultSet rs = conn.createStatement().executeQuery(query)) {
+context = ((PhoenixResultSet) rs).getContext();
+while (rs.next()) {
+rs.getString(1);
+rs.getString(2);
+}
 }
-ResultSet explainRS = conn.createStatement().executeQuery("Explain " + 
query);
+String queryId = context.getQueryLogger().getQueryId();
 
 String logQuery = "SELECT * FROM " + SYSTEM_CATALOG_SCHEMA + ".\"" + 
SYSTEM_LOG_TABLE + "\"";
 int delay = 5000;
 
 // sleep for sometime to let query log committed
 Thread.sleep(delay);
-rs = conn.createStatement().executeQuery(logQuery);
-boolean foundQueryLog = false;
+try (ResultSet explainRS = 
conn.createStatement().executeQuery("Explain " + query);
+ ResultSet rs = conn.createStatement().executeQuery(logQuery)) {
+boolean foundQueryLog = false;
 
-while (rs.next()) {
-if (rs.getString(QUERY_ID).equals(queryId)) {
-foundQueryLog = true;
-assertEquals(rs.getString(BIND_PARAMETERS), null);
-assertEquals(rs.getString(USER), 
System.getProperty("user.name"));
-assertEquals(rs.getString(CLIENT_IP), 
InetAddress.getLocalHost().getHostAddress());
-assertEquals(rs.getString(EXPLAIN_PLAN), 
QueryUtil.getExplainPlan(explainRS));
-assertEquals(rs.getString(GLOBAL_SCAN_DETAILS), 
context.getScan().toJSON());
-assertEquals(rs.getLong(NO_OF_RESULTS_ITERATED), 10);
-assertEquals(rs.getString(QUERY), query);
-assertEquals(rs.getString(QUERY_STATUS), 
QueryStatus.COMPLETED.toString());
-assertEquals(rs.getString(TENANT_ID), null);
-assertTrue(rs.getString(SCAN_METRICS_JSON)==null);
-assertEquals(rs.getString(EXCEPTION_TRACE),null);
-}else{
-//confirm we are not logging system queries
-
assertFalse(rs.getString(QUERY).toString().contains(SYSTEM_CATALOG_SCHEMA));
+while (rs.next()) {
+if (rs.getString(QUERY_ID).equals(queryId)) {
+foundQueryLog = true;
+assertEquals(rs.getString(BIND_PARAMETERS), null);
+assertEquals(rs.getString(USER), 
System.getProperty("user.name"));
+assertEquals(rs.getString(CLIENT_IP), 
InetAddress.getLocalHost().getHostAddress());
+assertEquals(rs.getString(EXPLAIN_PLAN), 
QueryUtil.getExplainPlan(explainRS));
+assertEquals(rs.getString(GLOBAL_SCAN_DETAILS), 
context.getScan().toJSON());
+assertEquals(rs.getLong(NO_OF_RESULTS_ITERATED), 10);
+assertEquals(rs.getString(QUERY), query);
+assertEquals(rs.getString(QUERY_STATUS), 
QueryStatus.COMPLETED.toString());
+assertEquals(rs.getString(TENANT_ID), null);
+assertTrue(rs.getString(SCAN_METRICS_JSON) == null);
+assertEquals(rs.getString(EXCEPTION_

[phoenix] 22/34: PHOENIX-5025 Tool to clean up orphan views

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit e907249a80748f11c8558f8ce5c8d7b832791c17
Author: Kadir 
AuthorDate: Mon Nov 12 22:24:10 2018 -0800

PHOENIX-5025 Tool to clean up orphan views
---
 .../apache/phoenix/end2end/OrphanViewToolIT.java   | 472 +++
 .../apache/phoenix/mapreduce/OrphanViewTool.java   | 879 +
 2 files changed, 1351 insertions(+)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java
new file mode 100644
index 000..f9a1785
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java
@@ -0,0 +1,472 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LINK_TYPE;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_TYPE;
+import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+
+import com.google.common.collect.Lists;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.phoenix.mapreduce.OrphanViewTool;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.AfterClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@RunWith(Parameterized.class)
+public class OrphanViewToolIT extends ParallelStatsDisabledIT {
+private static final Logger LOG = 
LoggerFactory.getLogger(OrphanViewToolIT.class);
+
+private final boolean isMultiTenant;
+private final boolean columnEncoded;
+
+private static final long fanout = 2;
+private static final long childCount = fanout;
+private static final long grandChildCount = fanout * fanout;
+private static final long grandGrandChildCount = fanout * fanout * fanout;
+
+private static final String filePath = "/tmp/";
+private static final String viewFileName = "/tmp/" + 
OrphanViewTool.fileName[OrphanViewTool.VIEW];
+private static final String physicalLinkFileName = "/tmp/" + 
OrphanViewTool.fileName[OrphanViewTool.PHYSICAL_TABLE_LINK];
+private static final String parentLinkFileName = "/tmp/" + 
OrphanViewTool.fileName[OrphanViewTool.PARENT_TABLE_LINK];
+private static final String childLinkFileName = "/tmp/" + 
OrphanViewTool.fileName[OrphanViewTool.CHILD_TABLE_LINK];
+
+protected static String SCHEMA1 = "SCHEMA1";
+protected static String SCHEMA2 = "SCHEMA2";
+protected static String SCHEMA3 = "SCHEMA3";
+protected static String SCHEMA4 = "SCHEMA4";
+
+private final String TENANT_SPECIFIC_URL = getUrl() + ';' + 
TENANT_ID_ATTRIB + "=tenant";
+
+private static final String createBaseTableFirstPartDDL = "CREATE TABLE IF 
NOT EXISTS %s";
+private static final String createBaseTableSecondPartDDL = "(%s PK2 
VARCHAR NOT NULL, V1 VARCHAR, V2 VARCHAR " +
+" CONSTRAINT NAME_PK PRIMARY KEY (%s PK2)) %s";
+private static final String deleteTableRows = "DELETE FROM " + 
SYSTEM_CATALOG_NAME +
+" 

[phoenix] 29/34: PHOENIX-5124 PropertyPolicyProvider should not evaluate default hbase config properties

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 019aa1eb2c0b724075c8b11b3078817d8f48e133
Author: Thomas D'Silva 
AuthorDate: Mon Feb 4 23:17:37 2019 -0800

PHOENIX-5124 PropertyPolicyProvider should not evaluate default hbase 
config properties
---
 .../it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java |  2 +-
 .../src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java  |  2 +-
 .../main/java/org/apache/phoenix/jdbc/PhoenixConnection.java   |  7 +--
 .../java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java|  7 ++-
 .../phoenix/mapreduce/util/PhoenixConfigurationUtil.java   |  6 ++
 .../src/main/java/org/apache/phoenix/query/QueryServices.java  |  2 ++
 .../java/org/apache/phoenix/query/QueryServicesOptions.java|  2 ++
 .../src/main/java/org/apache/phoenix/util/PropertiesUtil.java  |  5 -
 .../org/apache/phoenix/query/PropertyPolicyProviderTest.java   | 10 ++
 .../src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala   |  1 +
 10 files changed, 34 insertions(+), 10 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java
index d601beb..771baed 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java
@@ -70,7 +70,7 @@ public class AppendOnlySchemaIT extends 
ParallelStatsDisabledIT {
 Mockito.spy(driver.getConnectionQueryServices(getUrl(),
 PropertiesUtil.deepCopy(TEST_PROPERTIES)));
 Properties props = new Properties();
-props.putAll(PhoenixEmbeddedDriver.DEFFAULT_PROPS.asMap());
+props.putAll(PhoenixEmbeddedDriver.DEFAULT_PROPS.asMap());
 
 try (Connection conn1 = connectionQueryServices.connect(getUrl(), 
props);
 Connection conn2 = sameClient ? conn1 : 
connectionQueryServices.connect(getUrl(), props)) {
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java
index e6a2f7d..dd4c549 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java
@@ -144,7 +144,7 @@ public class UpdateCacheIT extends ParallelStatsDisabledIT {
// use a spyed ConnectionQueryServices so we can verify calls 
to getTable
ConnectionQueryServices connectionQueryServices = 
Mockito.spy(driver.getConnectionQueryServices(getUrl(), 
PropertiesUtil.deepCopy(TEST_PROPERTIES)));
Properties props = new Properties();
-   props.putAll(PhoenixEmbeddedDriver.DEFFAULT_PROPS.asMap());
+   props.putAll(PhoenixEmbeddedDriver.DEFAULT_PROPS.asMap());
Connection conn = connectionQueryServices.connect(getUrl(), 
props);
try {
conn.setAutoCommit(false);
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index 312d17b..43b7ca9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -246,8 +246,11 @@ public class PhoenixConnection implements Connection, 
MetaDataMutated, SQLClosea
 this.isDescVarLengthRowKeyUpgrade = isDescVarLengthRowKeyUpgrade;
 
 // Filter user provided properties based on property policy, if
-// provided.
-PropertyPolicyProvider.getPropertyPolicy().evaluate(info);
+// provided and QueryServices.PROPERTY_POLICY_PROVIDER_ENABLED is true
+if 
(Boolean.valueOf(info.getProperty(QueryServices.PROPERTY_POLICY_PROVIDER_ENABLED,
+
String.valueOf(QueryServicesOptions.DEFAULT_PROPERTY_POLICY_PROVIDER_ENABLED
 {
+PropertyPolicyProvider.getPropertyPolicy().evaluate(info);
+}
 
 // Copy so client cannot change
 this.info = info == null ? new Properties() : PropertiesUtil
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
index 00dfb5a..2669360 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
@@ -19,7 +19,6 @@ package org.apache.phoenix.jdbc;
 
 import static 
org.apache.phoenix.util.PhoenixRuntime.PHOENIX_TEST_DRIVER_URL_PARAM;
 
-import java.io.File;
 import java.io.IOException;
 import java.sql.Connection;
 import java.sql.Driver;
@@ -37,7 +36,6

[phoenix] 28/34: PHOENIX-5188 - IndexedKeyValue should populate KeyValue fields

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 0eeb6c935da3c66fc8326d56128a1a4dbf0d4b0b
Author: Geoffrey Jacoby 
AuthorDate: Tue Mar 12 11:17:50 2019 -0700

PHOENIX-5188 - IndexedKeyValue should populate KeyValue fields
---
 .../phoenix/hbase/index/wal/IndexedKeyValue.java   | 25 
 .../regionserver/wal/IndexedKeyValueTest.java  | 67 ++
 2 files changed, 92 insertions(+)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
index b04cf0a..f01dc06 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
@@ -53,6 +53,7 @@ public class IndexedKeyValue extends KeyValue {
 public IndexedKeyValue() {}
 
 public IndexedKeyValue(byte[] bs, Mutation mutation) {
+super(mutation.getRow(), 0, mutation.getRow().length);
 this.indexTableName = new ImmutableBytesPtr(bs);
 this.mutation = mutation;
 this.hashCode = calcHashCode(indexTableName, mutation);
@@ -117,6 +118,24 @@ public class IndexedKeyValue extends KeyValue {
 }
 
 @Override
+public int getRowOffset() {
+return this.offset;
+}
+
+@Override
+public short getRowLength() {
+return (short) this.length;
+}
+
+@Override
+public int getKeyLength(){
+//normally the key is row key + other key fields such as timestamp,
+// but those aren't defined here because a Mutation can contain 
multiple,
+// so we just return the length of the row key
+return this.length;
+}
+
+@Override
 public String toString() {
 return "IndexWrite:\n\ttable: " + indexTableName + "\n\tmutation:" + 
mutation;
 }
@@ -179,6 +198,12 @@ public class IndexedKeyValue extends KeyValue {
 MutationProto mProto = MutationProto.parseFrom(mutationData);
 this.mutation = 
org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation(mProto);
 this.hashCode = calcHashCode(indexTableName, mutation);
+if (mutation != null){
+bytes = mutation.getRow();
+offset = 0;
+length = bytes.length;
+}
+
 }
 
 public boolean getBatchFinished() {
diff --git 
a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/IndexedKeyValueTest.java
 
b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/IndexedKeyValueTest.java
new file mode 100644
index 000..7f34fcd
--- /dev/null
+++ 
b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/IndexedKeyValueTest.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver.wal;
+
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.phoenix.hbase.index.wal.IndexedKeyValue;
+import org.apache.phoenix.hbase.index.wal.KeyValueCodec;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+
+
+public class IndexedKeyValueTest {
+
+@Test
+public void testIndexedKeyValuePopulatesKVFields() throws Exception {
+byte[] row = Bytes.toBytes("foo");
+byte[] tableNameBytes = Bytes.toBytes("MyTableName");
+Mutation mutation = new Put(row);
+IndexedKeyValue indexedKeyValue = new IndexedKeyValue(tableNameBytes, 
mutation);
+testIndexedKeyValueHelper(indexedKeyValue, row, tableNameBytes, 
mutation);
+
+//now serialize the IndexedKeyValue and make sure the deserialized 
copy also
+//has all the right fields
+ByteArrayOutputStream baos = new ByteArrayOutputStream(

[phoenix] 17/34: PHOENIX-5111: Null Pointer exception fix in index tool due to outputpath being null when direct option is supplied

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 6c9aee159e3858e103a40a678fc4868905a09dc4
Author: Gokcen Iskender 
AuthorDate: Mon Jan 28 13:16:44 2019 -0800

PHOENIX-5111: Null Pointer exception fix in index tool due to outputpath 
being null when direct option is supplied

Signed-off-by: Geoffrey Jacoby 
---
 .../java/org/apache/phoenix/mapreduce/index/IndexTool.java | 14 --
 1 file changed, 8 insertions(+), 6 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
index 15d41ea..78e946d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
@@ -249,7 +249,7 @@ public class IndexTool extends Configured implements Tool {
 if (isPartialBuild) {
 return configureJobForPartialBuild(schemaName, dataTable);
 } else {
-return configureJobForAysncIndex(schemaName, indexTable, 
dataTable, useDirectApi, useSnapshot);
+return configureJobForAsyncIndex(schemaName, indexTable, 
dataTable, useDirectApi, useSnapshot);
 }
 }
 
@@ -362,7 +362,7 @@ public class IndexTool extends Configured implements Tool {
 
 }
 
-private Job configureJobForAysncIndex(String schemaName, String 
indexTable, String dataTable, boolean useDirectApi, boolean useSnapshot)
+private Job configureJobForAsyncIndex(String schemaName, String 
indexTable, String dataTable, boolean useDirectApi, boolean useSnapshot)
 throws Exception {
 final String qDataTable = 
SchemaUtil.getQualifiedTableName(schemaName, dataTable);
 final String qIndexTable;
@@ -408,14 +408,16 @@ public class IndexTool extends Configured implements Tool 
{
 final List columnMetadataList =
 PhoenixRuntime.generateColumnInfo(connection, qIndexTable, 
indexColumns);
 ColumnInfoToStringEncoderDecoder.encode(configuration, 
columnMetadataList);
-fs = outputPath.getFileSystem(configuration);
-fs.delete(outputPath, true);
- 
+
 final String jobName = String.format(INDEX_JOB_NAME_TEMPLATE, 
schemaName, dataTable, indexTable);
 final Job job = Job.getInstance(configuration, jobName);
 job.setJarByClass(IndexTool.class);
 job.setMapOutputKeyClass(ImmutableBytesWritable.class);
-FileOutputFormat.setOutputPath(job, outputPath);
+if (outputPath != null) {
+fs = outputPath.getFileSystem(configuration);
+fs.delete(outputPath, true);
+FileOutputFormat.setOutputPath(job, outputPath);
+}
 
 if (!useSnapshot) {
 PhoenixMapReduceUtil.setInput(job, 
PhoenixIndexDBWritable.class, qDataTable,



[phoenix] 11/34: PHOENIX-4864 Fix NullPointerException while Logging some DDL Statements

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 7094a0c255d0b411f7e668a2f68e76807368ecf4
Author: Ashutosh Parekh 
AuthorDate: Thu Aug 23 12:11:23 2018 -0700

PHOENIX-4864 Fix NullPointerException while Logging some DDL Statements
---
 .../monitoring/PhoenixLoggingMetricsIT.java| 42 +-
 .../jdbc/LoggingPhoenixPreparedStatement.java  |  5 ++-
 .../phoenix/jdbc/LoggingPhoenixStatement.java  |  5 ++-
 3 files changed, 49 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
index 7e56902..5d5524c 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.monitoring;
 import com.google.common.collect.Maps;
 import org.apache.phoenix.jdbc.LoggingPhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixMetricsLog;
+import org.apache.phoenix.jdbc.LoggingPhoenixResultSet;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -69,6 +70,35 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 }
 
 @Test
+public void testResultSetTypeForQueries() throws Exception {
+String tableName3 = generateUniqueName();
+
+String create = "CREATE TABLE " + tableName3 + " (K INTEGER PRIMARY 
KEY)";
+assertTrue(executeAndGetResultSet(create) == null);
+
+String upsert = "UPSERT INTO " + tableName3 + " VALUES (42)";
+assertTrue(executeAndGetResultSet(upsert) == null);
+
+String select = "SELECT * FROM " + tableName3;
+assertTrue(executeAndGetResultSet(select) instanceof 
LoggingPhoenixResultSet);
+
+String createView = "CREATE VIEW TEST_VIEW (K INTEGER) AS SELECT * 
FROM " + tableName3;
+assertTrue(executeAndGetResultSet(createView) == null);
+
+String createIndex = "CREATE INDEX TEST_INDEX ON " + tableName3 + " 
(K)";
+assertTrue(executeAndGetResultSet(createIndex) == null);
+
+String dropIndex = "DROP INDEX TEST_INDEX ON " + tableName3;
+assertTrue(executeAndGetResultSet(dropIndex) == null);
+
+String dropView = "DROP VIEW TEST_VIEW";
+assertTrue(executeAndGetResultSet(dropView) == null);
+
+String dropTable = "DROP TABLE " + tableName3;
+assertTrue(executeAndGetResultSet(dropTable) == null);
+}
+
+@Test
 public void testPhoenixMetricsLoggedOnCommit() throws Exception {
 // run SELECT to verify read metrics are logged
 String query = "SELECT * FROM " + tableName1;
@@ -134,12 +164,22 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 mutationReadMetricsMap.size() == 0);
 }
 
+private ResultSet executeAndGetResultSet(String query) throws Exception {
+Statement stmt = loggedConn.createStatement();
+stmt.execute(query);
+return stmt.getResultSet();
+}
+
 private void verifyQueryLevelMetricsLogging(String query) throws 
SQLException {
 Statement stmt = loggedConn.createStatement();
 ResultSet rs = stmt.executeQuery(query);
+assertTrue(rs instanceof LoggingPhoenixResultSet);
+int rowsRetrievedCounter = 0;
 while (rs.next()) {
+rowsRetrievedCounter++;
 }
 rs.close();
+assertTrue(rowsRetrievedCounter == NUM_ROWS);
 assertTrue("Read metrics for not found for " + tableName1,
 requestReadMetricsMap.get(tableName1).size() > 0);
 assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
@@ -192,4 +232,4 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 }
 });
 }
-}
+}
\ No newline at end of file
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixPreparedStatement.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixPreparedStatement.java
index 952e3fd..dbeea0d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixPreparedStatement.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixPreparedStatement.java
@@ -45,7 +45,10 @@ public class LoggingPhoenixPreparedStatement extends 
DelegatePreparedStatement {
 
 @Override
 public ResultSet getResultSet() throws SQLException {
-return new LoggingPhoenixResultSet(super.getResultSet(), 
phoenixMetricsLog, sql);
+// Re-use the cached ResultSet value since call to getResultSet() is 
not idem

[phoenix] 05/34: PHOENIX-4755 Provide an option to plugin custom avatica server config in PQS

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 0a8d170dc5aba133eefd38e28e98b6438db63552
Author: Karan Mehta 
AuthorDate: Mon Aug 20 16:52:22 2018 -0700

PHOENIX-4755 Provide an option to plugin custom avatica server config in PQS
---
 .../org/apache/phoenix/query/QueryServices.java|   3 +-
 .../apache/phoenix/query/QueryServicesOptions.java |   1 +
 .../phoenix/end2end/ServerCustomizersIT.java   |   4 +-
 .../server/AvaticaServerConfigurationFactory.java  |  20 +++
 .../phoenix/queryserver/server/QueryServer.java| 167 ++---
 .../server/ServerCustomizersFactory.java   |   7 +-
 .../CustomAvaticaServerConfigurationTest.java  |  20 +++
 .../server/QueryServerConfigurationTest.java   |  26 +++-
 .../queryserver/server/ServerCustomizersTest.java  |  13 +-
 9 files changed, 194 insertions(+), 67 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index c7548df..9072d26 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -251,8 +251,9 @@ public interface QueryServices extends SQLCloseable {
 public static final String QUERY_SERVER_KERBEROS_ALLOWED_REALMS = 
"phoenix.queryserver.kerberos.allowed.realms";
 public static final String QUERY_SERVER_SPNEGO_AUTH_DISABLED_ATTRIB = 
"phoenix.queryserver.spnego.auth.disabled";
 public static final String QUERY_SERVER_WITH_REMOTEUSEREXTRACTOR_ATTRIB = 
"phoenix.queryserver.withRemoteUserExtractor";
-public static final String QUERY_SERVER_REMOTEUSEREXTRACTOR_PARAM = 
"phoenix.queryserver.remoteUserExtractor.param";
 public static final String QUERY_SERVER_CUSTOMIZERS_ENABLED = 
"phoenix.queryserver.customizers.enabled";
+public static final String QUERY_SERVER_CUSTOM_AUTH_ENABLED = 
"phoenix.queryserver.custom.auth.enabled";
+public static final String QUERY_SERVER_REMOTEUSEREXTRACTOR_PARAM = 
"phoenix.queryserver.remoteUserExtractor.param";
 public static final String QUERY_SERVER_DISABLE_KERBEROS_LOGIN = 
"phoenix.queryserver.disable.kerberos.login";
 
 // metadata configs
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 7933ba0..02a3d4b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -299,6 +299,7 @@ public class QueryServicesOptions {
 public static final int DEFAULT_QUERY_SERVER_UGI_CACHE_CONCURRENCY = 10;
 public static final boolean DEFAULT_QUERY_SERVER_SPNEGO_AUTH_DISABLED = 
false;
 public static final boolean DEFAULT_QUERY_SERVER_WITH_REMOTEUSEREXTRACTOR 
= false;
+public static final boolean DEFAULT_QUERY_SERVER_CUSTOM_AUTH_ENABLED = 
false;
 public static final String DEFAULT_QUERY_SERVER_REMOTEUSEREXTRACTOR_PARAM 
= "doAs";
 public static final boolean DEFAULT_QUERY_SERVER_DISABLE_KERBEROS_LOGIN = 
false;
 public static final boolean DEFAULT_QUERY_SERVER_CUSTOMIZERS_ENABLED = 
false;
diff --git 
a/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/ServerCustomizersIT.java
 
b/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/ServerCustomizersIT.java
index d990adb..db08908 100644
--- 
a/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/ServerCustomizersIT.java
+++ 
b/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/ServerCustomizersIT.java
@@ -24,6 +24,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.calcite.avatica.server.AvaticaServerConfiguration;
 import org.apache.calcite.avatica.server.ServerCustomizer;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.phoenix.query.QueryServices;
@@ -66,7 +67,8 @@ public class ServerCustomizersIT extends 
BaseHBaseManagedTimeIT {
 InstanceResolver.clearSingletons();
 InstanceResolver.getSingleton(ServerCustomizersFactory.class, new 
ServerCustomizersFactory() {
 @Override
-public List> 
createServerCustomizers(Configuration conf) {
+public List> 
createServerCustomizers(Configuration conf,
+  
AvaticaServerConfiguration avaticaServerConfiguration) {
 return Collections.>singletonList(new 
TestServerCustomizer());
 }
 });
diff --git 
a/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/AvaticaServerCo

[phoenix] 07/34: PHOENIX-4834 PhoenixMetricsLog interface methods should not depend on specific logger

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit dd5725ca0a873e42d6568490bf518db378cdf20c
Author: Karan Mehta 
AuthorDate: Tue Aug 7 15:47:33 2018 -0700

PHOENIX-4834 PhoenixMetricsLog interface methods should not depend on 
specific logger
---
 .../org/apache/phoenix/monitoring/PhoenixMetricsIT.java |  9 -
 .../org/apache/phoenix/jdbc/LoggingPhoenixConnection.java   | 11 ++-
 .../org/apache/phoenix/jdbc/LoggingPhoenixResultSet.java|  7 ++-
 .../java/org/apache/phoenix/jdbc/PhoenixMetricsLog.java | 13 ++---
 4 files changed, 18 insertions(+), 22 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
index f13391f..4c5c592 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
@@ -76,7 +76,6 @@ import org.apache.phoenix.util.ReadOnlyProps;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.mockito.internal.util.reflection.Whitebox;
-import org.slf4j.Logger;
 
 import com.google.common.base.Joiner;
 import com.google.common.collect.Lists;
@@ -1046,25 +1045,25 @@ public class PhoenixMetricsIT extends 
BaseUniqueNamesOwnClusterIT {
 LoggingPhoenixConnection protectedConn =
 new LoggingPhoenixConnection(conn, new PhoenixMetricsLog() {
 @Override
-public void logOverAllReadRequestMetrics(Logger logger,
+public void logOverAllReadRequestMetrics(
 Map overAllQueryMetrics) {
 overAllQueryMetricsMap.putAll(overAllQueryMetrics);
 }
 
 @Override
-public void logRequestReadMetrics(Logger logger,
+public void logRequestReadMetrics(
 Map> 
requestReadMetrics) {
 requestReadMetricsMap.putAll(requestReadMetrics);
 }
 
 @Override
-public void logWriteMetricsfoForMutations(Logger logger,
+public void logWriteMetricsfoForMutations(
 Map> 
mutationWriteMetrics) {
 mutationWriteMetricsMap.putAll(mutationWriteMetrics);
 }
 
 @Override
-public void 
logReadMetricInfoForMutationsSinceLastReset(Logger logger,
+public void logReadMetricInfoForMutationsSinceLastReset(
 Map> 
mutationReadMetrics) {
 mutationReadMetricsMap.putAll(mutationReadMetrics);
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java
index e1b5dee..d98da83 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java
@@ -23,12 +23,9 @@ import java.sql.SQLException;
 import java.sql.Statement;
 
 import org.apache.phoenix.util.PhoenixRuntime;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 public class LoggingPhoenixConnection extends DelegateConnection {
 
-private static final Logger logger = 
LoggerFactory.getLogger(LoggingPhoenixResultSet.class);
 private PhoenixMetricsLog phoenixMetricsLog;
 
 public LoggingPhoenixConnection(Connection conn,
@@ -37,6 +34,10 @@ public class LoggingPhoenixConnection extends 
DelegateConnection {
 this.phoenixMetricsLog = phoenixMetricsLog;
 }
 
+public PhoenixMetricsLog getPhoenixMetricsLog() {
+return phoenixMetricsLog;
+}
+
 @Override
 public Statement createStatement() throws SQLException {
 return new LoggingPhoenixStatement(super.createStatement(), 
phoenixMetricsLog);
@@ -101,8 +102,8 @@ public class LoggingPhoenixConnection extends 
DelegateConnection {
 @Override
 public void commit() throws SQLException {
 super.commit();
-phoenixMetricsLog.logWriteMetricsfoForMutations(logger, 
PhoenixRuntime.getWriteMetricInfoForMutationsSinceLastReset(conn));
-phoenixMetricsLog.logReadMetricInfoForMutationsSinceLastReset(logger, 
PhoenixRuntime.getReadMetricInfoForMutationsSinceLastReset(conn));
+
phoenixMetricsLog.logWriteMetricsfoForMutations(PhoenixRuntime.getWriteMetricInfoForMutationsSinceLastReset(conn));
+
phoenixMetricsLog.logReadMetricInfoForMutationsSinceLastReset(PhoenixRuntime.getReadMetricInfoForMutationsSinceLastReset(conn));
 PhoenixRuntime.resetMet

[phoenix] 34/34: PHOENIX-5184: HBase and Phoenix connection leaks in Indexing code path, OrphanViewTool and PhoenixConfigurationUtil

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 3d543f17e0bd7d7e2b172af594ad82748f7e8143
Author: Chinmay Kulkarni 
AuthorDate: Thu Mar 14 23:16:14 2019 -0700

PHOENIX-5184: HBase and Phoenix connection leaks in Indexing code path, 
OrphanViewTool and PhoenixConfigurationUtil
---
 .../UngroupedAggregateRegionObserver.java  |  6 ++-
 .../hbase/index/write/RecoveryIndexWriter.java | 10 ++--
 .../phoenix/mapreduce/AbstractBulkLoadTool.java| 15 ++
 .../apache/phoenix/mapreduce/OrphanViewTool.java   | 53 ++---
 .../phoenix/mapreduce/PhoenixRecordWriter.java | 18 +--
 .../mapreduce/index/DirectHTableWriter.java| 14 +-
 .../mapreduce/index/IndexScrutinyMapper.java   | 24 --
 .../apache/phoenix/mapreduce/index/IndexTool.java  | 55 --
 .../index/PhoenixIndexImportDirectMapper.java  | 26 +-
 .../mapreduce/index/PhoenixIndexImportMapper.java  | 16 ---
 .../index/PhoenixIndexPartialBuildMapper.java  | 25 ++
 .../mapreduce/util/PhoenixConfigurationUtil.java   | 45 +-
 .../apache/phoenix/parse/DropTableStatement.java   |  4 +-
 13 files changed, 190 insertions(+), 121 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 2eb15a1..f0ce5b2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -817,7 +817,11 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 }
 try {
 if (targetHTable != null) {
-targetHTable.close();
+try {
+targetHTable.close();
+} catch (IOException e) {
+logger.error("Closing table: " + targetHTable + " 
failed: ", e);
+}
 }
 } finally {
 try {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
index 35f0a6d..fb9 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
@@ -26,8 +26,6 @@ import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.MasterNotRunningException;
-import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@@ -55,15 +53,13 @@ public class RecoveryIndexWriter extends IndexWriter {
  * Directly specify the {@link IndexCommitter} and {@link 
IndexFailurePolicy}. Both are expected to be fully setup
  * before calling.
  * 
- * @param committer
  * @param policy
  * @param env
+ * @param name
  * @throws IOException
- * @throws ZooKeeperConnectionException
- * @throws MasterNotRunningException
  */
 public RecoveryIndexWriter(IndexFailurePolicy policy, 
RegionCoprocessorEnvironment env, String name)
-throws MasterNotRunningException, ZooKeeperConnectionException, 
IOException {
+throws IOException {
 super(new TrackingParallelWriterIndexCommitter(), policy, env, name);
 this.admin = new HBaseAdmin(env.getConfiguration());
 }
@@ -125,7 +121,7 @@ public class RecoveryIndexWriter extends IndexWriter {
 try {
 admin.close();
 } catch (IOException e) {
-// closing silently
+LOG.error("Closing the admin failed: ", e);
 }
 }
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
index f717647..4561152 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
@@ -17,7 +17,6 @@
  */
 package org.apache.phoenix.mapreduce;
 
-import java.io.IOException;
 import java.sql.Connection;
 import java.sql.ResultSet;
 import java.sql.SQLException;
@@ -37,20 +36,17 @@ import org.apache.commons.cli.ParseException;
 import org.apache.co

[phoenix] 15/34: PHOENIX-5048 Index Rebuilder does not handle INDEX_STATE timestamp check for all index

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 16ec10ca766d59e8587dcaddfb438d1759e7a3e4
Author: Monani Mihir 
AuthorDate: Fri Dec 14 18:15:55 2018 +0530

PHOENIX-5048 Index Rebuilder does not handle INDEX_STATE timestamp check 
for all index

Signed-off-by: Geoffrey Jacoby 
---
 .../coprocessor/MetaDataRegionObserver.java| 35 +-
 1 file changed, 21 insertions(+), 14 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
index 4968525..4045d47 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
@@ -512,20 +512,27 @@ public class MetaDataRegionObserver extends 
BaseRegionObserver {
String 
indexTableFullName = SchemaUtil.getTableName(

indexPTable.getSchemaName().getString(),

indexPTable.getTableName().getString());
-   if (scanEndTime 
== latestUpperBoundTimestamp) {
-   
IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.ACTIVE, 0L, 
latestUpperBoundTimestamp);
-   
batchExecutedPerTableMap.remove(dataPTable.getName());
-LOG.info("Making Index:" + 
indexPTable.getTableName() + " active after rebuilding");
-   } else {
-   // 
Increment timestamp so that client sees updated disable timestamp
-IndexUtil.updateIndexState(conn, 
indexTableFullName, indexPTable.getIndexState(), scanEndTime * 
signOfDisableTimeStamp, latestUpperBoundTimestamp);
-   Long 
noOfBatches = batchExecutedPerTableMap.get(dataPTable.getName());
-   if 
(noOfBatches == null) {
-   
noOfBatches = 0l;
-   }
-   
batchExecutedPerTableMap.put(dataPTable.getName(), ++noOfBatches);
-   
LOG.info("During Round-robin build: Successfully updated index disabled 
timestamp  for "
-   
+ indexTableFullName + " to " + scanEndTime);
+   try {
+   if 
(scanEndTime == latestUpperBoundTimestamp) {
+   
IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.ACTIVE, 0L,
+   
latestUpperBoundTimestamp);
+   
batchExecutedPerTableMap.remove(dataPTable.getName());
+   
LOG.info("Making Index:" + indexPTable.getTableName() + " active after 
rebuilding");
+   } else {
+   // 
Increment timestamp so that client sees updated disable timestamp
+   
IndexUtil.updateIndexState(conn, indexTableFullName, 
indexPTable.getIndexState(),
+   
scanEndTime * signOfDisableTimeStamp, latestUpperBoundTimestamp);
+   Long 
noOfBatches = batchExecutedPerTableMap.get(dataPTable.getName());
+   if 
(noOfBatches == null) {
+   
noOfBatches = 0l;
+   }
+

[phoenix] 33/34: PHOENIX-5018 Index mutations created by UPSERT SELECT will have wrong timestamps

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 474150eace4545257e70b6e4d17f2a610bfa9840
Author: Kadir 
AuthorDate: Tue Jan 29 17:14:02 2019 -0800

PHOENIX-5018 Index mutations created by UPSERT SELECT will have wrong 
timestamps

Signed-off-by: Geoffrey Jacoby 
---
 .../phoenix/end2end/IndexBuildTimestampIT.java | 246 
 .../org/apache/phoenix/end2end/IndexToolIT.java|  37 ++
 .../phoenix/end2end/TableDDLPermissionsIT.java |   8 -
 .../org/apache/phoenix/rpc/PhoenixServerRpcIT.java |   6 -
 .../phoenix/compile/ServerBuildIndexCompiler.java  | 138 +++
 .../org/apache/phoenix/index/IndexMaintainer.java  | 433 ++---
 .../phoenix/mapreduce/PhoenixInputFormat.java  |   3 +-
 .../phoenix/mapreduce/PhoenixRecordReader.java |   4 +-
 .../PhoenixServerBuildIndexInputFormat.java| 111 ++
 .../apache/phoenix/mapreduce/index/IndexTool.java  | 240 +++-
 .../index/PhoenixServerBuildIndexMapper.java   |  75 
 .../mapreduce/util/PhoenixConfigurationUtil.java   |  25 ++
 .../mapreduce/util/PhoenixMapReduceUtil.java   |  27 ++
 .../org/apache/phoenix/schema/MetaDataClient.java  |  16 +-
 14 files changed, 1031 insertions(+), 338 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexBuildTimestampIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexBuildTimestampIT.java
new file mode 100644
index 000..7efba07
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexBuildTimestampIT.java
@@ -0,0 +1,246 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Collection;
+import java.util.List;
+import java.util.Properties;
+
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixResultSet;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.util.EnvironmentEdge;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.QueryUtil;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+import com.google.common.collect.Lists;
+
+@RunWith(Parameterized.class)
+public class IndexBuildTimestampIT extends BaseUniqueNamesOwnClusterIT {
+private final boolean localIndex;
+private final boolean async;
+private final boolean view;
+private final String tableDDLOptions;
+
+public IndexBuildTimestampIT(boolean mutable, boolean localIndex,
+boolean async, boolean view) {
+this.localIndex = localIndex;
+this.async = async;
+this.view = view;
+StringBuilder optionBuilder = new StringBuilder();
+if (!mutable) {
+optionBuilder.append(" IMMUTABLE_ROWS=true ");
+}
+optionBuilder.append(" SPLIT ON(1,2)");
+this.tableDDLOptions = optionBuilder.toString();
+}
+
+@BeforeClass
+public static void setup() throws Exception {
+IndexToolIT.setup();
+}
+
+@Parameters(
+name = "mutable={0},localIndex={1},async={2},view={3}")
+public static Collection data() {
+List list = Lists.newArrayListWithExpectedSize(8);
+boolean[] Booleans = new boolean[]{false, true};
+for (boolean mutable : Booleans) {
+for (boolean localIndex : Booleans

[phoenix] 26/34: PHOENIX-4832: Add Canary Test Tool for Phoenix Query Server.

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit aa7b9702189fe9f05fa2d052160006f0781c1e3b
Author: s.kadam 
AuthorDate: Wed Dec 5 16:11:07 2018 -0800

PHOENIX-4832: Add Canary Test Tool for Phoenix Query Server.
---
 phoenix-core/pom.xml   |   7 +
 .../org/apache/phoenix/tool/CanaryTestResult.java  |  86 
 .../org/apache/phoenix/tool/PhoenixCanaryTool.java | 477 +
 .../resources/phoenix-canary-file-sink.properties  |  17 +
 .../apache/phoenix/tool/PhoenixCanaryToolTest.java | 140 ++
 5 files changed, 727 insertions(+)

diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index e539163..fe85290 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -26,6 +26,7 @@
 
   
 ${project.basedir}/..
+0.8.1
   
 
   
@@ -238,6 +239,12 @@
   sqlline
 
 
+  net.sourceforge.argparse4j
+  argparse4j
+  ${argparse4j.version}
+
+
+
   com.google.guava
   guava
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/tool/CanaryTestResult.java 
b/phoenix-core/src/main/java/org/apache/phoenix/tool/CanaryTestResult.java
new file mode 100644
index 000..b72439c
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/tool/CanaryTestResult.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.tool;
+
+public class CanaryTestResult {
+
+private boolean isSuccessful;
+private long startTime;
+private long executionTime;
+private String message;
+private String testName;
+private String timestamp;
+private Object miscellaneous;
+
+public Object getMiscellaneous() {
+return miscellaneous;
+}
+
+public void setMiscellaneous(Object miscellaneous) {
+this.miscellaneous = miscellaneous;
+}
+
+public long getStartTime() {
+return startTime;
+}
+
+public void setStartTime(long startTime) {
+this.startTime = startTime;
+}
+
+public String getTimestamp() {
+return timestamp;
+}
+
+public void setTimestamp(String timestamp) {
+this.timestamp = timestamp;
+}
+
+public boolean isSuccessful() {
+return isSuccessful;
+}
+
+public void setSuccessful(boolean successful) {
+isSuccessful = successful;
+}
+
+public long getExecutionTime() {
+return executionTime;
+}
+
+public void setExecutionTime(long executionTime) {
+this.executionTime = executionTime;
+}
+
+public String getMessage() {
+return message;
+}
+
+public void setMessage(String message) {
+this.message = message;
+}
+
+public String getTestName() {
+return testName;
+}
+
+public void setTestName(String testName) {
+this.testName = testName;
+}
+
+}
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java 
b/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java
new file mode 100644
index 000..405f54f
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java
@@ -0,0 +1,477 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.tool;
+
+import co

[phoenix] 24/34: PHOENIX-5247 DROP TABLE and DROP VIEW commands fail to drop second or higher level child views

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 7d2aac130180119fa183c717940a367e7706672f
Author: Kadir 
AuthorDate: Wed Apr 17 17:27:16 2019 -0700

PHOENIX-5247 DROP TABLE and DROP VIEW commands fail to drop second or 
higher level child views
---
 .../src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java | 4 ++--
 phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java   | 2 +-
 .../java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java | 4 ++--
 3 files changed, 5 insertions(+), 5 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
index d33d538..6d2dfb9 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
@@ -422,7 +422,7 @@ public class BasePermissionsIT extends BaseTest {
 @Override
 public Object run() throws Exception {
 try (Connection conn = getConnection(); Statement stmt = 
conn.createStatement();) {
-assertFalse(stmt.execute("DROP TABLE IF EXISTS " + 
tableName));
+assertFalse(stmt.execute(String.format("DROP TABLE IF 
EXISTS %s CASCADE", tableName)));
 }
 return null;
 }
@@ -647,7 +647,7 @@ public class BasePermissionsIT extends BaseTest {
 @Override
 public Object run() throws Exception {
 try (Connection conn = getConnection(); Statement stmt = 
conn.createStatement();) {
-assertFalse(stmt.execute("DROP VIEW " + viewName));
+assertFalse(stmt.execute(String.format("DROP VIEW %s 
CASCADE", viewName)));
 }
 return null;
 }
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
index 34292ba..a6e066b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
@@ -428,7 +428,7 @@ public class ViewIT extends BaseViewIT {
 ddl = "CREATE LOCAL INDEX idx on " + fullViewName1 + "(v2)";
 conn.createStatement().execute(ddl);
 String fullViewName2 = SchemaUtil.getTableName(viewSchemaName, "V_" + 
generateUniqueName());
-ddl = "CREATE VIEW " + fullViewName2 + "(v2 VARCHAR) AS SELECT * FROM 
" + fullTableName + " WHERE k > 10";
+ddl = "CREATE VIEW " + fullViewName2 + "(v3 VARCHAR) AS SELECT * FROM 
" + fullViewName1 + " WHERE k > 10";
 conn.createStatement().execute(ddl);
 
 validateCannotDropTableWithChildViewsWithoutCascade(conn, 
fullTableName);
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index a87325e..f810dd7 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -2169,7 +2169,7 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 EnvironmentEdgeManager.currentTimeMillis(), null);
 }
 
-if (tableType == PTableType.TABLE || tableType == 
PTableType.SYSTEM) {
+if (tableType == PTableType.TABLE || tableType == 
PTableType.SYSTEM || tableType == PTableType.VIEW) {
 // Handle any child views that exist
 TableViewFinder tableViewFinderResult = findChildViews(region, 
tenantId, table, clientVersion, !isCascade);
 if (tableViewFinderResult.hasViews()) {
@@ -2191,7 +2191,7 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 acquireLock(region, viewKey, locks);
 MetaDataMutationResult result = 
doDropTable(viewKey, viewTenantId, viewSchemaName,
 viewName, null, PTableType.VIEW, 
rowsToDelete, invalidateList, locks,
-tableNamesToDelete, 
sharedTablesToDelete, false, clientVersion);
+tableNamesToDelete, 
sharedTablesToDelete, true, clientVersion);
 if (result.getMutationCode() != 
MutationCode.TABLE_ALREADY_EXISTS) { return result; }
 }
 }



[phoenix] 30/34: PHOENIX-4822 Ensure the provided timezone is used client-side (Jaanai Zhang)

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 1777c54eee6bbb21144d91f3d936c968e3af614e
Author: Josh Elser 
AuthorDate: Tue Jul 31 15:53:11 2018 -0400

PHOENIX-4822 Ensure the provided timezone is used client-side (Jaanai Zhang)
---
 .../org/apache/phoenix/end2end/DateTimeIT.java | 77 ++
 .../apache/phoenix/compile/StatementContext.java   | 11 ++--
 .../org/apache/phoenix/jdbc/PhoenixConnection.java |  8 ++-
 .../java/org/apache/phoenix/util/DateUtil.java | 22 ---
 4 files changed, 101 insertions(+), 17 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
index c976114..cc7c7a7 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
@@ -54,12 +54,19 @@ import java.text.Format;
 import java.util.Calendar;
 import java.util.GregorianCalendar;
 import java.util.Properties;
+import java.util.TimeZone;
 
+import org.apache.commons.lang.time.FastDateFormat;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.compile.StatementContext;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PDate;
+import org.apache.phoenix.schema.types.PTime;
 import org.apache.phoenix.schema.types.PTimestamp;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.DateUtil;
@@ -1880,4 +1887,74 @@ public class DateTimeIT extends ParallelStatsDisabledIT {
 conn.close();
 }
 }
+
+@Test
+public void testDateFormatTimeZone()throws Exception {
+String[] timeZoneIDs = {DateUtil.DEFAULT_TIME_ZONE_ID, "Asia/Yerevan", 
"Australia/Adelaide", "Asia/Tokyo"};
+for (String timeZoneID : timeZoneIDs) {
+testDateFormatTimeZone(timeZoneID);
+}
+}
+
+public void testDateFormatTimeZone(String timeZoneId) throws Exception {
+Properties props = new Properties();
+props.setProperty("phoenix.query.dateFormatTimeZone", timeZoneId);
+Connection conn1 = DriverManager.getConnection(getUrl(), props);
+
+String tableName = generateUniqueName();
+String ddl = "CREATE TABLE IF NOT EXISTS " + tableName +
+" (k1 INTEGER PRIMARY KEY," +
+" v_date DATE," +
+" v_time TIME," +
+" v_timestamp TIMESTAMP)";
+try {
+conn1.createStatement().execute(ddl);
+
+PhoenixConnection pConn = conn1.unwrap(PhoenixConnection.class);
+verifyTimeZoneIDWithConn(pConn, PDate.INSTANCE, timeZoneId);
+verifyTimeZoneIDWithConn(pConn, PTime.INSTANCE, timeZoneId);
+verifyTimeZoneIDWithConn(pConn, PTimestamp.INSTANCE, timeZoneId);
+
+Calendar cal = 
Calendar.getInstance(TimeZone.getTimeZone(timeZoneId));
+cal.setTime(date);
+String dateStr = 
DateUtil.getDateFormatter(DateUtil.DEFAULT_MS_DATE_FORMAT).format(date);
+
+String dml = "UPSERT INTO " + tableName + " VALUES (" +
+"1," +
+"'" + dateStr + "'," +
+"'" + dateStr + "'," +
+"'" + dateStr + "'" +
+")";
+conn1.createStatement().execute(dml);
+conn1.commit();
+
+PhoenixStatement stmt = 
conn1.createStatement().unwrap(PhoenixStatement.class);
+ResultSet rs = stmt.executeQuery("SELECT v_date, v_time, 
v_timestamp FROM " + tableName);
+
+assertTrue(rs.next());
+assertEquals(rs.getDate(1).toString(), new 
Date(cal.getTimeInMillis()).toString());
+assertEquals(rs.getTime(2).toString(), new 
Time(cal.getTimeInMillis()).toString());
+assertEquals(rs.getTimestamp(3).getTime(), cal.getTimeInMillis());
+assertFalse(rs.next());
+
+StatementContext stmtContext = stmt.getQueryPlan().getContext();
+verifyTimeZoneIDWithFormatter(stmtContext.getDateFormatter(), 
timeZoneId);
+verifyTimeZoneIDWithFormatter(stmtContext.getTimeFormatter(), 
timeZoneId);
+verifyTimeZoneIDWithFormatter(stmtContext.getTimestampFormatter(), 
timeZoneId);
+
+stmt.close();
+} finally {
+  

[phoenix] 08/34: PHOENIX-4835 LoggingPhoenixConnection should log metrics upon connection close

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 4241f5711bf55e4fcf192414e7f4f7796122ba29
Author: Karan Mehta 
AuthorDate: Thu Aug 16 15:08:12 2018 -0700

PHOENIX-4835 LoggingPhoenixConnection should log metrics upon connection 
close
---
 .../phoenix/monitoring/BasePhoenixMetricsIT.java   | 128 +++
 .../monitoring/PhoenixLoggingMetricsIT.java| 181 +
 .../phoenix/monitoring/PhoenixMetricsIT.java   | 170 ++-
 .../phoenix/jdbc/LoggingPhoenixConnection.java |  12 +-
 4 files changed, 332 insertions(+), 159 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/BasePhoenixMetricsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/BasePhoenixMetricsIT.java
new file mode 100644
index 000..5c016f6
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/BasePhoenixMetricsIT.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
+import org.apache.phoenix.jdbc.PhoenixDriver;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.junit.BeforeClass;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.apache.phoenix.monitoring.MetricType.SCAN_BYTES;
+import static org.apache.phoenix.monitoring.MetricType.TASK_EXECUTED_COUNTER;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class BasePhoenixMetricsIT extends BaseUniqueNamesOwnClusterIT {
+
+static final int MAX_RETRIES = 5;
+
+static final List mutationMetricsToSkip =
+Lists.newArrayList(MetricType.MUTATION_COMMIT_TIME);
+static final List readMetricsToSkip =
+Lists.newArrayList(MetricType.TASK_QUEUE_WAIT_TIME,
+MetricType.TASK_EXECUTION_TIME, MetricType.TASK_END_TO_END_TIME,
+MetricType.COUNT_MILLS_BETWEEN_NEXTS);
+static final String CUSTOM_URL_STRING = "SESSION";
+static final AtomicInteger numConnections = new AtomicInteger(0);
+
+@BeforeClass
+public static void doSetup() throws Exception {
+Map props = Maps.newHashMapWithExpectedSize(1);
+// Phoenix Global client metrics are enabled by default
+// Enable request metric collection at the driver level
+props.put(QueryServices.COLLECT_REQUEST_LEVEL_METRICS, 
String.valueOf(true));
+// disable renewing leases as this will force spooling to happen.
+props.put(QueryServices.RENEW_LEASE_ENABLED, String.valueOf(false));
+setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+// need the non-test driver for some tests that check number of 
hconnections, etc.
+DriverManager.registerDriver(PhoenixDriver.INSTANCE);
+
+}
+
+Connection insertRowsInTable(String tableName, long numRows) throws 
SQLException {
+String dml = "UPSERT INTO " + tableName + " VALUES (?, ?)";
+Connection conn = DriverManager.getConnection(getUrl());
+PreparedStatement stmt = conn.prepareStatement(dml);
+for (int i = 1; i <= numRows; i++) {
+stmt.setString(1, "key" + i);
+stmt.setString(2, "value" + i);
+stmt.executeUpdate();
+}
+conn.commit();
+return conn;
+}
+
+void assertReadMetricsForMutatingSql(String tableName, long 
tableSaltBuckets,
+ Map> readMetrics) {
+assertTrue("No read metrics present when there should have been!", 
readMetrics.size() > 0);
+int numTables = 0;
+for (Map.Entry> entry : 
readMetrics.entrySet()) 

[phoenix] 09/34: PHOENIX-4853 Add sql statement to PhoenixMetricsLog interface for query level metrics logging

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 58e21cc14ee5e63dffdb3a5d4a5ed528c2a63602
Author: Karan Mehta 
AuthorDate: Fri Aug 17 13:02:08 2018 -0700

PHOENIX-4853 Add sql statement to PhoenixMetricsLog interface for query 
level metrics logging
---
 .../monitoring/PhoenixLoggingMetricsIT.java| 15 +++---
 .../phoenix/jdbc/LoggingPhoenixConnection.java | 16 +++
 .../jdbc/LoggingPhoenixPreparedStatement.java  | 13 +++-
 .../phoenix/jdbc/LoggingPhoenixResultSet.java  | 10 +
 .../phoenix/jdbc/LoggingPhoenixStatement.java  | 24 +-
 .../org/apache/phoenix/jdbc/PhoenixMetricsLog.java |  6 +++---
 6 files changed, 56 insertions(+), 28 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
index 02640e7..97b2c5d 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
@@ -43,6 +43,7 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 private String tableName1;
 private String tableName2;
 private LoggingPhoenixConnection loggedConn;
+private String loggedSql;
 
 @Before
 public void beforeTest() throws Exception {
@@ -75,7 +76,10 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 rs.close();
 assertTrue("Read metrics for not found for " + tableName1,
 requestReadMetricsMap.get(tableName1).size() > 0);
+assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
+
 assertTrue("Overall read metrics for not found ", 
overAllQueryMetricsMap.size() > 0);
+assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
 
 // run UPSERT SELECT to verify mutation metrics are logged
 String upsertSelect = "UPSERT INTO " + tableName2 + " SELECT * FROM " 
+ tableName1;
@@ -120,7 +124,10 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 rs.close();
 assertTrue("Read metrics for not found for " + tableName1,
 requestReadMetricsMap.get(tableName1).size() > 0);
+assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
+
 assertTrue("Overall read metrics for not found ", 
overAllQueryMetricsMap.size() > 0);
+assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
 
 // run UPSERT SELECT to verify mutation metrics are logged
 String upsertSelect = "UPSERT INTO " + tableName2 + " SELECT * FROM " 
+ tableName1;
@@ -155,18 +162,20 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 return new LoggingPhoenixConnection(conn, new PhoenixMetricsLog() {
 @Override
 public void logOverAllReadRequestMetrics(
-Map overAllQueryMetrics) {
+Map overAllQueryMetrics, String sql) {
 overAllQueryMetricsMap.putAll(overAllQueryMetrics);
+loggedSql = sql;
 }
 
 @Override
 public void logRequestReadMetrics(
-Map> requestReadMetrics) {
+Map> requestReadMetrics, 
String sql) {
 requestReadMetricsMap.putAll(requestReadMetrics);
+loggedSql = sql;
 }
 
 @Override
-public void logWriteMetricsfoForMutations(
+public void logWriteMetricsfoForMutationsSinceLastReset(
 Map> mutationWriteMetrics) {
 mutationWriteMetricsMap.putAll(mutationWriteMetrics);
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java
index 9a2e00f..37917e2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java
@@ -61,7 +61,7 @@ public class LoggingPhoenixConnection extends 
DelegateConnection {
 @Override
 public PreparedStatement prepareStatement(String sql) throws SQLException {
 return new LoggingPhoenixPreparedStatement(super.prepareStatement(sql),
-phoenixMetricsLog);
+phoenixMetricsLog, sql);
 }
 
 @Override
@@ -69,40 +69,40 @@ public class LoggingPhoenixConnection extends 
DelegateConnection {

[phoenix] 12/34: PHOENIX-4870 LoggingPhoenixConnection should log metrics when AutoCommit is set to True.

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 1961a97ce1e2a6dff48437872ca738e83968c498
Author: s.kadam 
AuthorDate: Wed Sep 5 17:00:03 2018 -0700

PHOENIX-4870 LoggingPhoenixConnection should log metrics when AutoCommit is 
set to True.
---
 .../monitoring/PhoenixLoggingMetricsIT.java| 61 --
 .../phoenix/jdbc/LoggingPhoenixConnection.java | 37 +++--
 .../jdbc/LoggingPhoenixPreparedStatement.java  | 25 +++--
 .../phoenix/jdbc/LoggingPhoenixStatement.java  | 28 --
 4 files changed, 125 insertions(+), 26 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
index 5d5524c..483d341 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
@@ -102,7 +102,8 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 public void testPhoenixMetricsLoggedOnCommit() throws Exception {
 // run SELECT to verify read metrics are logged
 String query = "SELECT * FROM " + tableName1;
-verifyQueryLevelMetricsLogging(query);
+ResultSet rs = upsertRows(query);
+verifyQueryLevelMetricsLogging(query, rs);
 
 // run UPSERT SELECT to verify mutation metrics are logged
 String upsertSelect = "UPSERT INTO " + tableName2 + " SELECT * FROM " 
+ tableName1;
@@ -140,7 +141,9 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 public void testPhoenixMetricsLoggedOnClose() throws Exception {
 // run SELECT to verify read metrics are logged
 String query = "SELECT * FROM " + tableName1;
-verifyQueryLevelMetricsLogging(query);
+
+ResultSet rs = upsertRows(query);
+verifyQueryLevelMetricsLogging(query, rs);
 
 // run UPSERT SELECT to verify mutation metrics are logged
 String upsertSelect = "UPSERT INTO " + tableName2 + " SELECT * FROM " 
+ tableName1;
@@ -164,13 +167,61 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 mutationReadMetricsMap.size() == 0);
 }
 
+/**
+ * This test is added to verify if metrics are being logged in case
+ * auto commit is set to true.
+ */
+@Test
+public void testPhoenixMetricsLoggedOnAutoCommitTrue() throws Exception {
+loggedConn.setAutoCommit(true);
+
+String query = "SELECT * FROM " + tableName1;
+ResultSet rs = upsertRows(query);
+verifyQueryLevelMetricsLogging(query, rs);
+
+// run UPSERT SELECT to verify mutation metrics are logged
+String upsertSelect = "UPSERT INTO " + tableName2 + " SELECT * FROM " 
+ tableName1;
+loggedConn.createStatement().executeUpdate(upsertSelect);
+
+assertTrue("Mutation write metrics are not logged for " + tableName2,
+mutationWriteMetricsMap.get(tableName2).size()  > 0);
+assertTrue("Mutation read metrics are not found for " + tableName1,
+mutationReadMetricsMap.get(tableName1).size() > 0);
+
+clearAllTestMetricMaps();
+
+loggedConn.createStatement().execute(query);
+assertTrue("Read metrics found for " + tableName1,
+mutationReadMetricsMap.size() == 0);
+loggedConn.createStatement().execute(upsertSelect);
+
+assertTrue("Mutation write metrics are not logged for " + tableName2
++ " in 
createStatement",mutationWriteMetricsMap.get(tableName2).size()  > 0);
+assertTrue("Mutation read metrics are not found for " + tableName1
++ " in 
createStatement",mutationReadMetricsMap.get(tableName1).size() > 0);
+
+clearAllTestMetricMaps();
+
+loggedConn.prepareStatement(query).executeQuery();
+assertTrue("Read metrics found for " + tableName1,
+mutationReadMetricsMap.size() == 0);
+
+loggedConn.prepareStatement(upsertSelect).executeUpdate();
+assertTrue("Mutation write metrics are not logged for " + tableName2
++ " in 
prepareStatement",mutationWriteMetricsMap.get(tableName2).size()  > 0);
+assertTrue("Mutation read metrics are not found for " + tableName1
++ " in 
prepareStatement",mutationReadMetricsMap.get(tableName1).size() > 0);
+
+
+}
+
 private ResultSet executeAndGetResultSet(String query) throws Exception {
 Statement stmt = loggedConn.createStatement();
 

[phoenix] 13/34: PHOENIX-4989 Include disruptor jar in shaded dependency

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 9e51efa927de7d352c5789f5e3f282c6f4fab9ce
Author: Aman Poonia 
AuthorDate: Tue Oct 30 13:57:52 2018 -0700

PHOENIX-4989 Include disruptor jar in shaded dependency
---
 phoenix-server/pom.xml | 1 +
 1 file changed, 1 insertion(+)

diff --git a/phoenix-server/pom.xml b/phoenix-server/pom.xml
index 139647d..39bd8b9 100644
--- a/phoenix-server/pom.xml
+++ b/phoenix-server/pom.xml
@@ -139,6 +139,7 @@
   com.ibm.icu:icu4j
   com.ibm.icu:icu4j-charset
   com.ibm.icu:icu4j-localespi
+  com.lmax:disruptor
 
   
 org.apache.phoenix:phoenix-server



[phoenix] 03/34: PHOENIX-5005 Server-side delete / upsert-select potentially blocked after a split

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit c5a1f043a401500d54440c9e427e22bb586ea310
Author: Vincent Poon 
AuthorDate: Thu Nov 8 15:38:20 2018 -0800

PHOENIX-5005 Server-side delete / upsert-select potentially blocked after a 
split
---
 .../UngroupedAggregateRegionObserver.java  | 43 +-
 1 file changed, 26 insertions(+), 17 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index c325d70..b5af271 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -259,7 +259,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
   // flush happen which decrease the memstore size and then writes allowed 
on the region.
   for (int i = 0; blockingMemstoreSize > 0 && region.getMemstoreSize() > 
blockingMemstoreSize && i < 30; i++) {
   try {
-  checkForRegionClosing();
+  checkForRegionClosingOrSplitting();
   Thread.sleep(100);
   } catch (InterruptedException e) {
   Thread.currentThread().interrupt();
@@ -308,7 +308,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
  * a high chance that flush might not proceed and memstore won't be freed 
up.
  * @throws IOException
  */
-private void checkForRegionClosing() throws IOException {
+private void checkForRegionClosingOrSplitting() throws IOException {
 synchronized (lock) {
 if(isRegionClosingOrSplitting) {
 lock.notifyAll();
@@ -1316,13 +1316,31 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 @Override
 public void preSplit(ObserverContext c, 
byte[] splitRow)
 throws IOException {
-// Don't allow splitting if operations need read and write to same 
region are going on in the
-// the coprocessors to avoid dead lock scenario. See PHOENIX-3111.
+waitForScansToFinish(c);
+}
+
+// Don't allow splitting/closing if operations need read and write to same 
region are going on in the
+// the coprocessors to avoid dead lock scenario. See PHOENIX-3111.
+private void 
waitForScansToFinish(ObserverContext c) throws 
IOException {
+int maxWaitTime = 
c.getEnvironment().getConfiguration().getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
+HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
+long start = EnvironmentEdgeManager.currentTimeMillis();
 synchronized (lock) {
 isRegionClosingOrSplitting = true;
-if (scansReferenceCount > 0) {
-throw new IOException("Operations like local index 
building/delete/upsert select"
-+ " might be going on so not allowing to split.");
+while (scansReferenceCount > 0) {
+try {
+lock.wait(1000);
+if (EnvironmentEdgeManager.currentTimeMillis() - start >= 
maxWaitTime) {
+isRegionClosingOrSplitting = false; // must reset in 
case split is not retried
+throw new IOException(String.format(
+"Operations like local index 
building/delete/upsert select"
++ " might be going on so not allowing to 
split/close. scansReferenceCount=%s region=%s",
+scansReferenceCount,
+
c.getEnvironment().getRegionInfo().getRegionNameAsString()));
+}
+} catch (InterruptedException e) {
+Thread.currentThread().interrupt();
+}
 }
 }
 }
@@ -1343,16 +1361,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 @Override
 public void preClose(ObserverContext c, 
boolean abortRequested)
 throws IOException {
-synchronized (lock) {
-isRegionClosingOrSplitting = true;
-while (scansReferenceCount > 0) {
-try {
-lock.wait(1000);
-} catch (InterruptedException e) {
-Thread.currentThread().interrupt();
-}
-}
-}
+waitForScansToFinish(c);
 }
 
 @Override



[phoenix] 27/34: PHOENIX-5172: Harden the PQS canary synth test tool with retry mechanism and more logging

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 30754af22e8a1d35f5e5ae88dbb1ff393bac69d1
Author: Swaroopa Kadam 
AuthorDate: Tue Mar 19 13:39:45 2019 -0700

PHOENIX-5172: Harden the PQS canary synth test tool with retry mechanism 
and more logging
---
 .../org/apache/phoenix/tool/PhoenixCanaryTool.java | 212 ++--
 .../tool/ParameterizedPhoenixCanaryToolIT.java | 280 +
 .../apache/phoenix/tool/PhoenixCanaryToolTest.java |  53 +---
 .../resources/phoenix-canary-file-sink.properties  |  17 ++
 4 files changed, 378 insertions(+), 184 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java 
b/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java
index 405f54f..865d210 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java
@@ -28,18 +28,20 @@ import 
net.sourceforge.argparse4j.inf.ArgumentParserException;
 import net.sourceforge.argparse4j.inf.Namespace;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.RetryCounter;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import java.io.File;
 import java.io.InputStream;
 import java.sql.Connection;
-import java.sql.DatabaseMetaData;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
-import java.sql.Statement;
+import java.sql.SQLException;
+import java.sql.Timestamp;
 import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.Date;
@@ -49,16 +51,23 @@ import java.util.concurrent.Callable;
 import java.util.concurrent.TimeUnit;
 
 /**
- * A Canary Tool to perform synthetic tests for Query Server
+ * A Canary Tool to perform synthetic tests for Phoenix
+ * It assumes that TEST.PQSTEST or the schema.table passed in the argument
+ * is already present as following command
+ * CREATE TABLE IF NOT EXISTS TEST.PQSTEST (mykey INTEGER NOT NULL
+ * PRIMARY KEY, mycolumn VARCHAR, insert_date TIMESTAMP);
+ *
  */
 public class PhoenixCanaryTool extends Configured implements Tool {
 
 private static String TEST_SCHEMA_NAME = "TEST";
 private static String TEST_TABLE_NAME = "PQSTEST";
 private static String FQ_TABLE_NAME = "TEST.PQSTEST";
-private boolean USE_NAMESPACE = true;
-
+private static Timestamp timestamp;
+private static final int MAX_CONNECTION_ATTEMPTS = 5;
+private final int FIRST_TIME_RETRY_TIMEOUT = 5000;
 private Sink sink = new StdOutSink();
+public static final String propFileName = 
"phoenix-canary-file-sink.properties";
 
 /**
  * Base class for a Canary Test
@@ -97,84 +106,38 @@ public class PhoenixCanaryTool extends Configured 
implements Tool {
 }
 }
 
-/**
- * Test which prepares environment before other tests run
- */
-static class PrepareTest extends CanaryTest {
-void onExecute() throws Exception {
-result.setTestName("prepare");
-Statement statement = connection.createStatement();
-DatabaseMetaData dbm = connection.getMetaData();
-ResultSet tables = dbm.getTables(null, TEST_SCHEMA_NAME, 
TEST_TABLE_NAME, null);
-if (tables.next()) {
-// Drop test Table if exists
-statement.executeUpdate("DROP TABLE IF EXISTS " + 
FQ_TABLE_NAME);
-}
-
-// Drop test schema if exists
-if (TEST_SCHEMA_NAME != null) {
-statement = connection.createStatement();
-statement.executeUpdate("DROP SCHEMA IF EXISTS " + 
TEST_SCHEMA_NAME);
-}
-}
-}
-
-/**
- * Create Schema Test
- */
-static class CreateSchemaTest extends CanaryTest {
-void onExecute() throws Exception {
-result.setTestName("createSchema");
-Statement statement = connection.createStatement();
-statement.executeUpdate("CREATE SCHEMA IF NOT EXISTS " + 
TEST_SCHEMA_NAME);
-}
-}
-
-/**
- * Create Table Test
- */
-static class CreateTableTest extends CanaryTest {
-void onExecute() throws Exception {
-result.setTestName("createTable");
-Statement statement = connection.createStatement();
-// Create Table
-statement.executeUpdate("CREATE TABLE IF NOT EXISTS" + 
FQ_TABLE_NAME + " (mykey " + "INTEGER "
-+ "NOT " + "NULL PRIMARY KEY, " + "mycolumn VARCHAR)");
-

[phoenix] 18/34: PHOENIX-5094 increment pending disable count for index when rebuild starts

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit b853be9fc4818c4d47594bb7e999c688bf059f12
Author: Kiran Kumar Maturi 
AuthorDate: Fri Feb 1 19:37:11 2019 +0530

PHOENIX-5094 increment pending disable count for index when rebuild starts
---
 .../index/IndexRebuildIncrementDisableCountIT.java | 237 +
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |   2 +-
 .../coprocessor/MetaDataRegionObserver.java|  23 +-
 .../phoenix/index/PhoenixIndexFailurePolicy.java   |  21 +-
 .../java/org/apache/phoenix/util/IndexUtil.java|  30 +++
 5 files changed, 292 insertions(+), 21 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexRebuildIncrementDisableCountIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexRebuildIncrementDisableCountIT.java
new file mode 100644
index 000..694f359
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexRebuildIncrementDisableCountIT.java
@@ -0,0 +1,237 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end.index;
+
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode;
+import org.apache.phoenix.coprocessor.MetaDataRegionObserver;
+import 
org.apache.phoenix.coprocessor.MetaDataRegionObserver.BuildIndexScheduleTask;
+import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.PIndexState;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.IndexUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.google.common.collect.Maps;
+
+public class IndexRebuildIncrementDisableCountIT extends 
BaseUniqueNamesOwnClusterIT {
+private static final Log LOG = 
LogFactory.getLog(IndexRebuildIncrementDisableCountIT.class);
+private static long pendingDisableCount = 0;
+private static String ORG_PREFIX = "ORG";
+private static Result pendingDisableCountResult = null;
+private static String indexState = null;
+private static final Random RAND = new Random(5);
+private static final int WAIT_AFTER_DISABLED = 5000;
+private static final long REBUILD_PERIOD = 5;
+private static final long REBUILD_INTERVAL = 2000;
+private static RegionCoprocessorEnvironment 
indexRebuildTaskRegionEnvironment;
+private static String schemaName;
+private static String tableName;
+private static String fullTableName;
+private static String indexName;
+private static String fullIndexName;
+private static Connection conn;
+private static PhoenixConnection phoenixConn;
+
+@BeforeClass
+public static void doSetup() throws Exception {
+Map serverProps = Maps.newHashMapWithExpectedSize(10);
+serverProps.put(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_ATTRIB,
+Boolean.TRUE.toString());
+
serverProps.put(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_INTERVAL_ATTRIB,
+Long.toString(REBUILD_INTERVA

[phoenix] 10/34: PHOENIX-4854 Make LoggingPhoenixResultSet idempotent when logging metrics

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit c6df225c633ad03da13b9ba559b502c216029fc1
Author: Karan Mehta 
AuthorDate: Mon Aug 20 10:12:37 2018 -0700

PHOENIX-4854 Make LoggingPhoenixResultSet idempotent when logging metrics
---
 .../monitoring/PhoenixLoggingMetricsIT.java| 49 --
 .../phoenix/jdbc/LoggingPhoenixResultSet.java  | 15 +--
 2 files changed, 38 insertions(+), 26 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
index 97b2c5d..7e56902 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
@@ -26,6 +26,7 @@ import org.junit.Test;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
+import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.Map;
 
@@ -44,6 +45,8 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 private String tableName2;
 private LoggingPhoenixConnection loggedConn;
 private String loggedSql;
+private int logOverAllReadRequestMetricsFuncCallCount;
+private int logRequestReadMetricsFuncCallCount;
 
 @Before
 public void beforeTest() throws Exception {
@@ -69,17 +72,7 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 public void testPhoenixMetricsLoggedOnCommit() throws Exception {
 // run SELECT to verify read metrics are logged
 String query = "SELECT * FROM " + tableName1;
-Statement stmt = loggedConn.createStatement();
-ResultSet rs = stmt.executeQuery(query);
-while (rs.next()) {
-}
-rs.close();
-assertTrue("Read metrics for not found for " + tableName1,
-requestReadMetricsMap.get(tableName1).size() > 0);
-assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
-
-assertTrue("Overall read metrics for not found ", 
overAllQueryMetricsMap.size() > 0);
-assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
+verifyQueryLevelMetricsLogging(query);
 
 // run UPSERT SELECT to verify mutation metrics are logged
 String upsertSelect = "UPSERT INTO " + tableName2 + " SELECT * FROM " 
+ tableName1;
@@ -117,17 +110,7 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 public void testPhoenixMetricsLoggedOnClose() throws Exception {
 // run SELECT to verify read metrics are logged
 String query = "SELECT * FROM " + tableName1;
-Statement stmt = loggedConn.createStatement();
-ResultSet rs = stmt.executeQuery(query);
-while (rs.next()) {
-}
-rs.close();
-assertTrue("Read metrics for not found for " + tableName1,
-requestReadMetricsMap.get(tableName1).size() > 0);
-assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
-
-assertTrue("Overall read metrics for not found ", 
overAllQueryMetricsMap.size() > 0);
-assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
+verifyQueryLevelMetricsLogging(query);
 
 // run UPSERT SELECT to verify mutation metrics are logged
 String upsertSelect = "UPSERT INTO " + tableName2 + " SELECT * FROM " 
+ tableName1;
@@ -151,6 +134,26 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 mutationReadMetricsMap.size() == 0);
 }
 
+private void verifyQueryLevelMetricsLogging(String query) throws 
SQLException {
+Statement stmt = loggedConn.createStatement();
+ResultSet rs = stmt.executeQuery(query);
+while (rs.next()) {
+}
+rs.close();
+assertTrue("Read metrics for not found for " + tableName1,
+requestReadMetricsMap.get(tableName1).size() > 0);
+assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
+assertTrue(logOverAllReadRequestMetricsFuncCallCount == 1);
+assertTrue(logRequestReadMetricsFuncCallCount == 1);
+
+assertTrue("Overall read metrics for not found ", 
overAllQueryMetricsMap.size() > 0);
+assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
+
+rs.close();
+assertTrue(logOverAllReadRequestMetricsFuncCallCount == 1);
+assertTrue(logRequestReadMetricsFuncCallCount 

[phoenix] 21/34: PHOENIX-5080 Index becomes Active during Partial Index Rebuilder if Index Failure happens

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 5e70e1889ba9fb1755099d5020f8807b3b670bf4
Author: Monani Mihir 
AuthorDate: Sat Feb 2 11:00:19 2019 +0530

PHOENIX-5080 Index becomes Active during Partial Index Rebuilder if Index 
Failure happens
---
 .../end2end/index/PartialIndexRebuilderIT.java |  66 +++-
 .../coprocessor/BaseScannerRegionObserver.java |   9 +-
 .../UngroupedAggregateRegionObserver.java  |  25 ++-
 .../org/apache/phoenix/execute/MutationState.java  |  14 +-
 .../org/apache/phoenix/hbase/index/Indexer.java|  10 +-
 .../hbase/index/builder/IndexBuildManager.java |   8 +
 .../phoenix/index/PhoenixIndexFailurePolicy.java   |  32 +++-
 .../apache/phoenix/index/PhoenixIndexMetaData.java |   3 +-
 .../java/org/apache/phoenix/query/BaseTest.java| 185 +
 9 files changed, 330 insertions(+), 22 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
index 46443e3..cda282b 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
@@ -47,7 +47,6 @@ import 
org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
 import org.apache.phoenix.coprocessor.MetaDataRegionObserver;
 import 
org.apache.phoenix.coprocessor.MetaDataRegionObserver.BuildIndexScheduleTask;
 import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
-import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.execute.CommitException;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
@@ -86,6 +85,7 @@ public class PartialIndexRebuilderIT extends 
BaseUniqueNamesOwnClusterIT {
 private static final long REBUILD_PERIOD = 5;
 private static final long REBUILD_INTERVAL = 2000;
 private static RegionCoprocessorEnvironment 
indexRebuildTaskRegionEnvironment;
+private static Boolean runRebuildOnce = true;
 
 
 @BeforeClass
@@ -125,6 +125,7 @@ public class PartialIndexRebuilderIT extends 
BaseUniqueNamesOwnClusterIT {
 runIndexRebuilderAsync(interval, cancel, 
Collections.singletonList(table));
 }
 private static void runIndexRebuilderAsync(final int interval, final 
boolean[] cancel, final List tables) {
+runRebuildOnce = true;
 Thread thread = new Thread(new Runnable() {
 @Override
 public void run() {
@@ -137,6 +138,8 @@ public class PartialIndexRebuilderIT extends 
BaseUniqueNamesOwnClusterIT {
 throw new RuntimeException(e);
 } catch (SQLException e) {
 LOG.error(e.getMessage(),e);
+} finally {
+runRebuildOnce = false;
 }
 }
 }
@@ -554,7 +557,7 @@ public class PartialIndexRebuilderIT extends 
BaseUniqueNamesOwnClusterIT {
 
 @Override
 public long currentTime() {
-return time;
+return time++;
 }
 }
 
@@ -1068,6 +1071,65 @@ public class PartialIndexRebuilderIT extends 
BaseUniqueNamesOwnClusterIT {
 }
 }
 
+@Test
+@Repeat(5)
+public void testIndexActiveIfRegionMovesWhileRebuilding() throws Throwable 
{
+final MyClock clock = new MyClock(1000);
+EnvironmentEdgeManager.injectEdge(clock);
+String schemaName = generateUniqueName();
+String tableName = generateUniqueName();
+String indexName = generateUniqueName();
+int nThreads = 5;
+int nRows = 50;
+int nIndexValues = 23;
+int batchSize = 200;
+final CountDownLatch doneSignal = new CountDownLatch(nThreads);
+boolean[] cancel = new boolean[1];
+
+final String fullTableName = SchemaUtil.getTableName(schemaName, 
tableName);
+final String fullIndexName = SchemaUtil.getTableName(schemaName, 
indexName);
+try (Connection conn = DriverManager.getConnection(getUrl())) {
+try {
+conn.createStatement().execute("CREATE TABLE " + fullTableName
++ "(k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, v1 INTEGER, "
++ "CONSTRAINT pk PRIMARY KEY (k1,k2)) STORE_NULLS=true, 
VERSIONS=1");
+conn.createStatement().execute("CREATE INDEX " + indexName + " 
ON "
++ fullTableName + "(v1)");
+conn.commit();
+long disableTS = clock.currentTime();
+HTableInterface metaTable = 
conn.unwrap(PhoenixConnection.c

[phoenix] 32/34: PHOENIX-5194 Thread Cache is not update for Index retries in for MutationState#send()#doMutation()

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit a0b6abaa3e1d0ee667cd1ed063e9833a0efaa0be
Author: Monani Mihir 
AuthorDate: Fri Apr 12 21:48:45 2019 +0530

PHOENIX-5194 Thread Cache is not update for Index retries in for 
MutationState#send()#doMutation()
---
 .../org/apache/phoenix/execute/MutationState.java  | 45 ++
 .../phoenix/index/PhoenixIndexFailurePolicy.java   | 10 -
 2 files changed, 45 insertions(+), 10 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java 
b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index 33cd596..1cbc4bc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -41,6 +41,7 @@ import javax.annotation.concurrent.Immutable;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
@@ -220,7 +221,7 @@ public class MutationState implements SQLCloseable {
  * Commit a write fence when creating an index so that we can detect when 
a data table transaction is started before
  * the create index but completes after it. In this case, we need to rerun 
the data table transaction after the
  * index creation so that the index rows are generated. See TEPHRA-157 for 
more information.
- * 
+ *
  * @param dataTable
  *the data table upon which an index is being added
  * @throws SQLException
@@ -445,7 +446,7 @@ public class MutationState implements SQLCloseable {
 /**
  * Combine a newer mutation with this one, where in the event of overlaps, 
the newer one will take precedence.
  * Combine any metrics collected for the newer mutation.
- * 
+ *
  * @param newMutationState
  *the newer mutation state
  */
@@ -500,8 +501,8 @@ public class MutationState implements SQLCloseable {
 final MultiRowMutationState values, final long mutationTimestamp, 
final long serverTimestamp,
 boolean includeAllIndexes, final boolean sendAll) {
 final PTable table = tableRef.getTable();
-final List indexList = includeAllIndexes ? 
-
Lists.newArrayList(IndexMaintainer.maintainedIndexes(table.getIndexes().iterator()))
 : 
+final List indexList = includeAllIndexes ?
+
Lists.newArrayList(IndexMaintainer.maintainedIndexes(table.getIndexes().iterator()))
 :
 IndexUtil.getClientMaintainedIndexes(table);
 final Iterator indexes = indexList.iterator();
 final List mutationList = 
Lists.newArrayListWithExpectedSize(values.size());
@@ -648,7 +649,7 @@ public class MutationState implements SQLCloseable {
 
 /**
  * Get the unsorted list of HBase mutations for the tables with 
uncommitted data.
- * 
+ *
  * @return list of HBase mutations for uncommitted data.
  */
 public Iterator>> toMutations(Long timestamp) {
@@ -730,7 +731,7 @@ public class MutationState implements SQLCloseable {
 /**
  * Validates that the meta data is valid against the server meta data if 
we haven't yet done so. Otherwise, for
  * every UPSERT VALUES call, we'd need to hit the server to see if the 
meta data has changed.
- * 
+ *
  * @return the server time to use for the upsert
  * @throws SQLException
  * if the table or any columns no longer exist
@@ -953,7 +954,7 @@ public class MutationState implements SQLCloseable {
 TableRef origTableRef = tableInfo.getOrigTableRef();
 PTable table = origTableRef.getTable();
 table.getIndexMaintainers(indexMetaDataPtr, connection);
-final ServerCache cache = tableInfo.isDataTable() ? 
+final ServerCache cache = tableInfo.isDataTable() ?
 
IndexMetaDataCacheClient.setMetaDataOnMutations(connection, table,
 mutationList, indexMetaDataPtr) : null;
 // If we haven't retried yet, retry for this case only, as 
it's possible that
@@ -982,7 +983,10 @@ public class MutationState implements SQLCloseable {
 for (final List mutationBatch : 
mutationBatchList) {
 if (shouldRetryIndexedMutation) {
 // if there was an index write failure, retry 
the mutation in a loop
-final HTableInterface finalHTable = 

[phoenix] 23/34: PHOENIX-5025 Tool to clean up orphan views (addendum)

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit d2e70b6756fc4fbe019334376886202e85e02bf7
Author: Kadir 
AuthorDate: Wed Dec 12 17:53:38 2018 -0800

PHOENIX-5025 Tool to clean up orphan views (addendum)
---
 .../apache/phoenix/end2end/OrphanViewToolIT.java   | 25 +++---
 .../apache/phoenix/mapreduce/OrphanViewTool.java   | 89 +-
 2 files changed, 71 insertions(+), 43 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java
index f9a1785..38d4afc 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java
@@ -19,7 +19,6 @@ package org.apache.phoenix.end2end;
 
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LINK_TYPE;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME;
-import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_TYPE;
 import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
@@ -27,9 +26,9 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 import java.io.File;
+import java.io.FileReader;
 import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Paths;
+import java.io.LineNumberReader;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
@@ -54,6 +53,7 @@ import org.slf4j.LoggerFactory;
 
 @RunWith(Parameterized.class)
 public class OrphanViewToolIT extends ParallelStatsDisabledIT {
+private static final String SYSTEM_CHILD_LINK_NAME = SYSTEM_CATALOG_NAME;
 private static final Logger LOG = 
LoggerFactory.getLogger(OrphanViewToolIT.class);
 
 private final boolean isMultiTenant;
@@ -206,9 +206,13 @@ public class OrphanViewToolIT extends 
ParallelStatsDisabledIT {
 }
 
 private void verifyLineCount(String fileName, long lineCount) throws 
IOException {
-if (Files.lines(Paths.get(fileName)).count() != lineCount)
-LOG.debug(Files.lines(Paths.get(fileName)).count() + " != " + 
lineCount);
-assertTrue(Files.lines(Paths.get(fileName)).count() == lineCount);
+LineNumberReader reader = new LineNumberReader(new 
FileReader(fileName));
+while (reader.readLine() != null) {
+}
+int count = reader.getLineNumber();
+if (count != lineCount)
+LOG.debug(count + " != " + lineCount);
+assertTrue(count == lineCount);
 }
 
 private void verifyCountQuery(Connection connection, String query, String 
schemaName, long count)
@@ -238,7 +242,6 @@ public class OrphanViewToolIT extends 
ParallelStatsDisabledIT {
 }
 }
 
-
 private void verifyNoChildLink(Connection connection, String 
viewSchemaName) throws Exception {
 // Verify that there there is no link in the system child link table
 verifyCountQuery(connection, countChildLinksQuery, viewSchemaName, 0);
@@ -264,6 +267,7 @@ public class OrphanViewToolIT extends 
ParallelStatsDisabledIT {
 schemaName == null ? "IS NULL" : "= '" + schemaName + "'"));
 connection.commit();
 }
+
 @Test
 public void testDeleteBaseTableRows() throws Exception {
 String baseTableName = generateUniqueName();
@@ -438,7 +442,8 @@ public class OrphanViewToolIT extends 
ParallelStatsDisabledIT {
 }
 }
 
-public static String[] getArgValues(boolean clean, boolean identify, 
boolean outputPath, boolean inputPath) {
+public static String[] getArgValues(boolean clean, boolean identify, 
boolean outputPath, boolean inputPath)
+throws InterruptedException{
 final List args = Lists.newArrayList();
 if (outputPath) {
 args.add("-op");
@@ -454,8 +459,10 @@ public class OrphanViewToolIT extends 
ParallelStatsDisabledIT {
 if (identify) {
 args.add("-i");
 }
+final long ageMs = 2000;
+Thread.sleep(ageMs);
 args.add("-a");
-args.add("0");
+args.add(Long.toString(ageMs));
 return args.toArray(new String[0]);
 }
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
index a8a30b6..2e0dd0d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
@@ -

[phoenix] branch 4.14-HBase-1.4 updated (15bc250 -> 3d543f1)

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a change to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from 15bc250  PHOENIX-5173: LIKE and ILIKE statements return empty result 
list for search without wildcard
 new 5b82f54  PHOENIX-5008: CQSI.init should not bubble up 
RetriableUpgradeException to client in case of an UpgradeRequiredException
 new 75f01d9  PHOENIX-5008 (Addendum): CQSI.init should not bubble up 
RetriableUpgradeException to client in case of an UpgradeRequiredException
 new c5a1f04  PHOENIX-5005 Server-side delete / upsert-select potentially 
blocked after a split
 new dae5b09  PHOENIX-4750 Resolve server customizers and provide them to 
Avatica
 new 0a8d170  PHOENIX-4755 Provide an option to plugin custom avatica 
server config in PQS
 new 9b21dab  PHOENIX-3991 ROW_TIMESTAMP on TIMESTAMP column type throws 
ArrayOutOfBound when upserting without providing a value.
 new dd5725c  PHOENIX-4834 PhoenixMetricsLog interface methods should not 
depend on specific logger
 new 4241f57  PHOENIX-4835 LoggingPhoenixConnection should log metrics upon 
connection close
 new 58e21cc  PHOENIX-4853 Add sql statement to PhoenixMetricsLog interface 
for query level metrics logging
 new c6df225  PHOENIX-4854 Make LoggingPhoenixResultSet idempotent when 
logging metrics
 new 7094a0c  PHOENIX-4864 Fix NullPointerException while Logging some DDL 
Statements
 new 1961a97  PHOENIX-4870 LoggingPhoenixConnection should log metrics when 
AutoCommit is set to True.
 new 9e51efa  PHOENIX-4989 Include disruptor jar in shaded dependency
 new c5396f0  PHOENIX-4781 Create artifact jar so that shaded jar replaces 
it properly
 new 16ec10c  PHOENIX-5048 Index Rebuilder does not handle INDEX_STATE 
timestamp check for all index
 new ed2d361  PHOENIX-5070 NPE when upgrading Phoenix 4.13.0 to Phoenix 
4.14.1 with hbase-1.x branch in secure setup
 new 6c9aee1  PHOENIX-5111: Null Pointer exception fix in index tool due to 
outputpath being null when direct option is supplied
 new b853be9  PHOENIX-5094 increment pending disable count for index when 
rebuild starts
 new 9bcd7c6  PHOENIX-4993 close cache connections when region server is 
going down
 new dcb8b7e  Add tenantId param to IndexTool
 new 5e70e18  PHOENIX-5080 Index becomes Active during Partial Index 
Rebuilder if Index Failure happens
 new e907249  PHOENIX-5025 Tool to clean up orphan views
 new d2e70b6  PHOENIX-5025 Tool to clean up orphan views (addendum)
 new 7d2aac1  PHOENIX-5247 DROP TABLE and DROP VIEW commands fail to drop 
second or higher level child views
 new 91988be  PHOENIX-5137 check region close before commiting a batch for 
index rebuild
 new aa7b970  PHOENIX-4832: Add Canary Test Tool for Phoenix Query Server.
 new 30754af  PHOENIX-5172: Harden the PQS canary synth test tool with 
retry mechanism and more logging
 new 0eeb6c9  PHOENIX-5188 - IndexedKeyValue should populate KeyValue fields
 new 019aa1e  PHOENIX-5124 PropertyPolicyProvider should not evaluate 
default hbase config properties
 new 1777c54  PHOENIX-4822 Ensure the provided timezone is used client-side 
(Jaanai Zhang)
 new e8522d6  PHOENIX-4822 Fixed Spelling.
 new a0b6aba  PHOENIX-5194 Thread Cache is not update for Index retries in 
for MutationState#send()#doMutation()
 new 474150e  PHOENIX-5018 Index mutations created by UPSERT SELECT will 
have wrong timestamps
 new 3d543f1  PHOENIX-5184: HBase and Phoenix connection leaks in Indexing 
code path, OrphanViewTool and PhoenixConfigurationUtil

The 34 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 phoenix-client/pom.xml |   9 +-
 phoenix-core/pom.xml   |   7 +
 .../apache/phoenix/end2end/AppendOnlySchemaIT.java |   2 +-
 .../apache/phoenix/end2end/BasePermissionsIT.java  |   4 +-
 .../org/apache/phoenix/end2end/DateTimeIT.java |  77 ++
 .../phoenix/end2end/IndexBuildTimestampIT.java | 246 ++
 .../org/apache/phoenix/end2end/IndexToolIT.java| 150 +++-
 .../apache/phoenix/end2end/OrphanViewToolIT.java   | 479 +++
 .../org/apache/phoenix/end2end/RowTimestampIT.java |  26 +-
 .../SystemCatalogCreationOnConnectionIT.java   | 121 ++-
 .../phoenix/end2end/TableDDLPermissionsIT.java |   8 -
 .../it/java/org/apache/phoenix/end2end/ViewIT.java |   2 +-
 .../index/IndexRebuildIncrementDisableCountIT.java | 237 ++
 .../end2end/index/PartialIndexRebuilderIT.java |  66 +-
 .../phoenix/monitoring/BasePhoenixMetricsIT.java   | 128 +++
 .../monitoring/PhoenixLoggingMetricsIT.java| 290 +++
 .../phoenix/monitoring/P

[phoenix] 25/34: PHOENIX-5137 check region close before commiting a batch for index rebuild

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 91988be2055423b5623576bbd1fdab4ea9e75d86
Author: Kiran Kumar Maturi 
AuthorDate: Fri Feb 22 09:45:13 2019 +0530

PHOENIX-5137 check region close before commiting a batch for index rebuild
---
 .../UngroupedAggregateRegionObserver.java  | 30 +-
 1 file changed, 18 insertions(+), 12 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 703ff97..2eb15a1 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -260,7 +260,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
   return;
   }
 
-Mutation[] mutationArray = new Mutation[mutations.size()];
+   Mutation[] mutationArray = new Mutation[mutations.size()];
   // When memstore size reaches blockingMemstoreSize we are waiting 3 
seconds for the
   // flush happen which decrease the memstore size and then writes allowed 
on the region.
   for (int i = 0; blockingMemstoreSize > 0 && region.getMemstoreSize() > 
blockingMemstoreSize && i < 30; i++) {
@@ -371,6 +371,17 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 super.clear();
 }
 }
+
+   private long getBlockingMemstoreSize(Region region, Configuration conf) {
+   long flushSize = region.getTableDesc().getMemStoreFlushSize();
+
+   if (flushSize <= 0) {
+   flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,
+   HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
+   }
+   return flushSize * 
(conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER,
+   HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER)-1);
+   }
 
 @Override
 protected RegionScanner doPostScannerOpen(final 
ObserverContext c, final Scan scan, final 
RegionScanner s) throws IOException, SQLException {
@@ -487,12 +498,6 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 MutationList mutations = new MutationList();
 boolean needToWrite = false;
 Configuration conf = env.getConfiguration();
-long flushSize = region.getTableDesc().getMemStoreFlushSize();
-
-if (flushSize <= 0) {
-flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,
-HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
-}
 
 /**
  * Slow down the writes if the memstore size more than
@@ -500,9 +505,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
  * bytes. This avoids flush storm to hdfs for cases like index 
building where reads and
  * write happen to all the table regions in the server.
  */
-final long blockingMemStoreSize = flushSize * (
-conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER,
-
HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER)-1) ;
+final long blockingMemStoreSize = getBlockingMemstoreSize(region, 
conf) ;
 
 boolean buildLocalIndex = indexMaintainers != null && 
dataColumns==null && !localIndexScan;
 if(buildLocalIndex) {
@@ -1043,6 +1046,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 int maxBatchSize = config.getInt(MUTATE_BATCH_SIZE_ATTRIB, 
QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
 long maxBatchSizeBytes = 
config.getLong(MUTATE_BATCH_SIZE_BYTES_ATTRIB,
 QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE_BYTES);
+final long blockingMemstoreSize = getBlockingMemstoreSize(region, 
config);
 MutationList mutations = new MutationList(maxBatchSize);
 region.startRegionOperation();
 byte[] uuidValue = ServerCacheClient.generateId();
@@ -1084,7 +1088,8 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 }
 }
 if (ServerUtil.readyToCommit(mutations.size(), 
mutations.byteSize(), maxBatchSize, maxBatchSizeBytes)) {
-commitBatchWithRetries(region, mutations, -1);
+checkForRegionClosingOrSplitting();
+commitBatchWithRetries(region, mutations, 
blockingMemstoreSize);
 uuidValue = ServerCacheClient.generateId(

[phoenix] 14/34: PHOENIX-4781 Create artifact jar so that shaded jar replaces it properly

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit c5396f07b5f510327ca53738a7c08a51eb57c1a6
Author: Vincent Poon 
AuthorDate: Fri Nov 30 17:55:34 2018 -0800

PHOENIX-4781 Create artifact jar so that shaded jar replaces it properly
---
 phoenix-client/pom.xml | 9 +++--
 phoenix-server/pom.xml | 9 +++--
 2 files changed, 6 insertions(+), 12 deletions(-)

diff --git a/phoenix-client/pom.xml b/phoenix-client/pom.xml
index a99194c..bcf0022 100644
--- a/phoenix-client/pom.xml
+++ b/phoenix-client/pom.xml
@@ -57,12 +57,9 @@
   
 org.apache.maven.plugins
 maven-jar-plugin
-
-  
-default-jar
-none
-  
-
+
+  phoenix-${project.version}-client
+
   
   
 org.apache.maven.plugins
diff --git a/phoenix-server/pom.xml b/phoenix-server/pom.xml
index 39bd8b9..5a84acc 100644
--- a/phoenix-server/pom.xml
+++ b/phoenix-server/pom.xml
@@ -61,12 +61,9 @@
   
 org.apache.maven.plugins
 maven-jar-plugin
-
-  
-default-jar
-none
-  
-
+
+  phoenix-${project.version}-server
+
   
   
 org.apache.maven.plugins



[phoenix] 31/34: PHOENIX-4822 Fixed Spelling.

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit e8522d6b7d47ddee57b478fef5d3d31ed5806a07
Author: Jimmy Casey 
AuthorDate: Sun Jul 29 21:43:55 2018 +

PHOENIX-4822 Fixed Spelling.

Closes #318

Signed-off-by: Josh Elser 
---
 phoenix-core/src/main/java/org/apache/phoenix/log/LogWriter.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/phoenix-core/src/main/java/org/apache/phoenix/log/LogWriter.java 
b/phoenix-core/src/main/java/org/apache/phoenix/log/LogWriter.java
index dab03e7..a7a2180 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/log/LogWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/log/LogWriter.java
@@ -21,7 +21,7 @@ import java.io.IOException;
 import java.sql.SQLException;
 
 /**
- * Used by the event handler to write RingBufferEvent, this is done in a 
seperate thread from the application configured
+ * Used by the event handler to write RingBufferEvent, this is done in a 
separate thread from the application configured
  * during disruptor
  */
 public interface LogWriter {



[phoenix] 16/34: PHOENIX-5070 NPE when upgrading Phoenix 4.13.0 to Phoenix 4.14.1 with hbase-1.x branch in secure setup

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit ed2d361ca2b76d37c15c93ba0333cc03d45c6fe3
Author: Monani Mihir 
AuthorDate: Fri Dec 14 16:20:17 2018 +0530

PHOENIX-5070 NPE when upgrading Phoenix 4.13.0 to Phoenix 4.14.1 with 
hbase-1.x branch in secure setup
---
 .../java/org/apache/phoenix/coprocessor/PhoenixAccessController.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
index 0db2801..a7026dc 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
@@ -406,7 +406,7 @@ public class PhoenixAccessController extends 
BaseMetaDataEndpointObserver {
 final List userPermissions = new 
ArrayList();
 try (Connection connection = 
ConnectionFactory.createConnection(env.getConfiguration())) {
 // Merge permissions from all accessController 
coprocessors loaded in memory
-for (BaseMasterAndRegionObserver service : 
accessControllers) {
+for (BaseMasterAndRegionObserver service : 
getAccessControllers()) {
 // Use AccessControlClient API's if the 
accessController is an instance of 
org.apache.hadoop.hbase.security.access.AccessController
 if 
(service.getClass().getName().equals(org.apache.hadoop.hbase.security.access.AccessController.class.getName()))
 {
 
userPermissions.addAll(AccessControlClient.getUserPermissions(connection, 
tableName.getNameAsString()));



[phoenix] 20/34: Add tenantId param to IndexTool

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit dcb8b7e2b18830370b63fa43809be722aca1b308
Author: Gokcen Iskender 
AuthorDate: Mon Feb 11 12:58:53 2019 -0800

Add tenantId param to IndexTool

Signed-off-by: Geoffrey Jacoby 
---
 .../org/apache/phoenix/end2end/IndexToolIT.java| 113 -
 .../apache/phoenix/mapreduce/index/IndexTool.java  |  43 +---
 2 files changed, 139 insertions(+), 17 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolIT.java
index a120aaa..9d6f881 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolIT.java
@@ -40,15 +40,19 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.mapreduce.index.IndexTool;
+import org.apache.phoenix.query.ConnectionQueryServices;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
@@ -73,14 +77,16 @@ public class IndexToolIT extends ParallelStatsEnabledIT {
 private final String tableDDLOptions;
 private final boolean mutable;
 private final boolean useSnapshot;
+private final boolean useTenantId;
 
 public IndexToolIT(boolean transactional, boolean mutable, boolean 
localIndex,
-boolean directApi, boolean useSnapshot) {
+boolean directApi, boolean useSnapshot, boolean useTenantId) {
 this.localIndex = localIndex;
 this.transactional = transactional;
 this.directApi = directApi;
 this.mutable = mutable;
 this.useSnapshot = useSnapshot;
+this.useTenantId = useTenantId;
 StringBuilder optionBuilder = new StringBuilder();
 if (!mutable) {
 optionBuilder.append(" IMMUTABLE_ROWS=true ");
@@ -117,12 +123,14 @@ public class IndexToolIT extends ParallelStatsEnabledIT {
 for (boolean localIndex : Booleans) {
 for (boolean directApi : Booleans) {
 for (boolean useSnapshot : Booleans) {
-list.add(new Boolean[] { transactional, mutable, 
localIndex, directApi, useSnapshot });
+list.add(new Boolean[] { transactional, mutable, 
localIndex, directApi, useSnapshot, false});
 }
 }
 }
 }
 }
+// Add the usetenantId
+list.add(new Boolean[] { false, false, false, true, false, true});
 return list;
 }
 
@@ -221,6 +229,90 @@ public class IndexToolIT extends ParallelStatsEnabledIT {
 }
 
 @Test
+public void testIndexToolWithTenantId() throws Exception {
+if (!useTenantId) { return;}
+String tenantId = generateUniqueName();
+String schemaName = generateUniqueName();
+String dataTableName = generateUniqueName();
+String viewTenantName = generateUniqueName();
+String indexNameGlobal = generateUniqueName();
+String indexNameTenant = generateUniqueName();
+String viewIndexTableName = "_IDX_" + dataTableName;
+
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+Connection connGlobal = DriverManager.getConnection(getUrl(), props);
+props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
+Connection connTenant = DriverManager.getConnection(getUrl(), props);
+String createTblStr = "CREATE TABLE %s (TENANT_ID VARCHAR(15) NOT 
NULL,ID INTEGER NOT NULL"
++ ", NAME VARCHAR, CONSTRAINT PK_1 PRIMARY KEY (TENANT_ID, 
ID)) MULTI_TENANT=true";
+String createViewStr = "CREATE VIEW %s AS SELECT * FROM %s";
+
+String upsertQueryStr = "UPSERT INTO %s (TENANT_ID, ID, NAME) 
VALUES('%s' , %d, '%s')";
+String createIndexStr = "CREATE INDEX %s ON %s (NAME) ";
+
+try {
+String tableStmtGlobal = String.format(createTblStr, 
dataTableName);
+connGl

[phoenix] 02/34: PHOENIX-5008 (Addendum): CQSI.init should not bubble up RetriableUpgradeException to client in case of an UpgradeRequiredException

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 75f01d937cd6207482623b7832a214d911464798
Author: Chinmay Kulkarni 
AuthorDate: Tue Nov 13 17:11:53 2018 -0800

PHOENIX-5008 (Addendum): CQSI.init should not bubble up 
RetriableUpgradeException to client in case of an UpgradeRequiredException
---
 .../org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
index 0cd206e..59af533 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
@@ -500,7 +500,7 @@ public class SystemCatalogCreationOnConnectionIT {
  */
 private Set getHBaseTables() throws IOException {
 Set tables = new HashSet<>();
-for (TableName tn : testUtil.getAdmin().listTableNames()) {
+for (TableName tn : testUtil.getHBaseAdmin().listTableNames()) {
 tables.add(tn.getNameAsString());
 }
 return tables;



[phoenix] 19/34: PHOENIX-4993 close cache connections when region server is going down

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 9bcd7c665883356cc74956c7a3d115d87a04c6b4
Author: Kiran Kumar Maturi 
AuthorDate: Thu Jan 17 10:32:49 2019 +0530

PHOENIX-4993 close cache connections when region server is going down
---
 .../java/org/apache/phoenix/util/ServerUtil.java   |  12 ++-
 .../CoprocessorHConnectionTableFactoryTest.java| 119 +
 2 files changed, 130 insertions(+), 1 deletion(-)

diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
index 09701c5..a8170ce 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
@@ -316,7 +316,10 @@ public class ServerUtil {
 
 @Override
 public void shutdown() {
-ConnectionFactory.shutdown();
+  // close the connections when region server is going down
+  if (this.server.isStopping() || this.server.isStopped() || 
this.server.isAborted()) {
+ConnectionFactory.shutdown();
+  }
 }
 
 @Override
@@ -383,6 +386,13 @@ public class ServerUtil {
 return conf;
 }
 }
+
+/**
+ * Added for testing
+ */
+public static int getConnectionsCount() {
+  return connections.size();
+}
 }
 
 public static Configuration getCompactionConfig(Configuration conf) {
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/util/CoprocessorHConnectionTableFactoryTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/util/CoprocessorHConnectionTableFactoryTest.java
new file mode 100644
index 000..a757780
--- /dev/null
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/util/CoprocessorHConnectionTableFactoryTest.java
@@ -0,0 +1,119 @@
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional 
information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache 
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the 
License. You may obtain a
+ * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless 
required by applicable
+ * law or agreed to in writing, software distributed under the License is 
distributed on an "AS IS"
+ * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 
implied. See the License
+ * for the specific language governing permissions and limitations under the 
License.
+ */
+
+package org.apache.phoenix.util;
+
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/*
+ * This test is wrt to https://issues.apache.org/jira/browse/PHOENIX-4993.Test 
checks 1. region
+ * close should not close the shared connections 2. region server close should 
close the shared
+ * connections
+ */
+public class CoprocessorHConnectionTableFactoryTest extends 
BaseUniqueNamesOwnClusterIT {
+  private static String ORG_PREFIX = "ORG";
+  private static final Log LOG = 
LogFactory.getLog(CoprocessorHConnectionTableFactoryTest.class);
+
+  @BeforeClass
+  public static final void doSetup() throws Exception {
+
+setUpTestDriver(ReadOnlyProps.EMPTY_PROPS);
+
+  }
+
+  static String getOrgId(long id) {
+return ORG_PREFIX + "-" + id;
+  }
+
+  static String getRandomOrgId(int maxOrgId) {
+return getOrgId(Math.round(Math.random() * maxOrgId));
+  }
+
+  static void writeToTable(String tableName, Connection conn, int maxOrgId) 
throws SQLException {
+try {
+
+  String orgId = getRandomOrgId(maxOrgId);
+  Statement stmt = conn.createStatement();
+  for (int i = 0; i < 10; i++) {
+stmt.executeUpdate("UPSERT INTO " + tableName + " VALUES('" + orgId + 
"'," + i + ","
++ (i + 1) + "," + (i + 2) + ")");
+
+  }
+  conn.commit();
+} catch (Exception e) {
+  LOG.error("Client side ex

[phoenix] 04/34: PHOENIX-4750 Resolve server customizers and provide them to Avatica

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit dae5b0908cc172230026f50b956a1cc1dedb9ff6
Author: Alex Araujo 
AuthorDate: Mon Jun 4 16:32:10 2018 -0700

PHOENIX-4750 Resolve server customizers and provide them to Avatica

Resolve server customizers on the PQS classpath and provide them to the
HttpServer builder.

Signed-off-by: Josh Elser 
---
 .../org/apache/phoenix/query/QueryServices.java|   1 +
 .../apache/phoenix/query/QueryServicesOptions.java |   1 +
 .../phoenix/end2end/QueryServerTestUtil.java   | 187 +
 .../phoenix/end2end/ServerCustomizersIT.java   | 147 
 .../phoenix/queryserver/server/QueryServer.java|  26 ++-
 .../server/ServerCustomizersFactory.java   |  49 ++
 .../queryserver/server/ServerCustomizersTest.java  |  87 ++
 7 files changed, 496 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index 48b7b7f..c7548df 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -252,6 +252,7 @@ public interface QueryServices extends SQLCloseable {
 public static final String QUERY_SERVER_SPNEGO_AUTH_DISABLED_ATTRIB = 
"phoenix.queryserver.spnego.auth.disabled";
 public static final String QUERY_SERVER_WITH_REMOTEUSEREXTRACTOR_ATTRIB = 
"phoenix.queryserver.withRemoteUserExtractor";
 public static final String QUERY_SERVER_REMOTEUSEREXTRACTOR_PARAM = 
"phoenix.queryserver.remoteUserExtractor.param";
+public static final String QUERY_SERVER_CUSTOMIZERS_ENABLED = 
"phoenix.queryserver.customizers.enabled";
 public static final String QUERY_SERVER_DISABLE_KERBEROS_LOGIN = 
"phoenix.queryserver.disable.kerberos.login";
 
 // metadata configs
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 307c5dd..7933ba0 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -301,6 +301,7 @@ public class QueryServicesOptions {
 public static final boolean DEFAULT_QUERY_SERVER_WITH_REMOTEUSEREXTRACTOR 
= false;
 public static final String DEFAULT_QUERY_SERVER_REMOTEUSEREXTRACTOR_PARAM 
= "doAs";
 public static final boolean DEFAULT_QUERY_SERVER_DISABLE_KERBEROS_LOGIN = 
false;
+public static final boolean DEFAULT_QUERY_SERVER_CUSTOMIZERS_ENABLED = 
false;
 
 public static final boolean DEFAULT_RENEW_LEASE_ENABLED = true;
 public static final int 
DEFAULT_RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS =
diff --git 
a/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/QueryServerTestUtil.java
 
b/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/QueryServerTestUtil.java
new file mode 100644
index 000..01f73ae
--- /dev/null
+++ 
b/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/QueryServerTestUtil.java
@@ -0,0 +1,187 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import java.io.File;
+import java.security.PrivilegedAction;
+import java.util.Map;
+import java.util.Objects;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.LocalHBaseCluster;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.phoenix.query.ConfigurationFactory;
+import org.apache.phoenix.queryserver.client.ThinClientUtil;
+import org.apache.phoenix.queryserver.server.QuerySe

[phoenix] 01/34: PHOENIX-5008: CQSI.init should not bubble up RetriableUpgradeException to client in case of an UpgradeRequiredException

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 5b82f5431d6be4baf8a95472a01d8715b769c9ee
Author: Chinmay Kulkarni 
AuthorDate: Fri Nov 9 19:22:57 2018 -0800

PHOENIX-5008: CQSI.init should not bubble up RetriableUpgradeException to 
client in case of an UpgradeRequiredException
---
 .../SystemCatalogCreationOnConnectionIT.java   | 123 +
 .../phoenix/query/ConnectionQueryServicesImpl.java |   4 +-
 2 files changed, 101 insertions(+), 26 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
index 689eb20..0cd206e 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
@@ -17,6 +17,24 @@
  */
 package org.apache.phoenix.end2end;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.apache.phoenix.query.BaseTest.generateUniqueName;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.concurrent.TimeoutException;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
@@ -26,6 +44,7 @@ import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.UpgradeRequiredException;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDriver;
 import org.apache.phoenix.jdbc.PhoenixEmbeddedDriver;
 import org.apache.phoenix.jdbc.PhoenixTestDriver;
 import org.apache.phoenix.query.*;
@@ -36,14 +55,6 @@ import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.SQLException;
-import java.util.*;
-import java.util.concurrent.TimeoutException;
-
-import static org.junit.Assert.*;
-
 @Category(NeedsOwnMiniClusterTest.class)
 public class SystemCatalogCreationOnConnectionIT {
 private HBaseTestingUtility testUtil = null;
@@ -57,6 +68,12 @@ public class SystemCatalogCreationOnConnectionIT {
 private static final String PHOENIX_SYSTEM_CATALOG = "SYSTEM.CATALOG";
 private static final String EXECUTE_UPGRADE_COMMAND = "EXECUTE UPGRADE";
 private static final String MODIFIED_MAX_VERSIONS ="5";
+private static final String CREATE_TABLE_STMT = "CREATE TABLE %s"
++ " (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY 
KEY(K1,K2))";
+private static final String SELECT_STMT = "SELECT * FROM %s";
+private static final String DELETE_STMT = "DELETE FROM %s";
+private static final String CREATE_INDEX_STMT = "CREATE INDEX DUMMY_IDX ON 
%s (K1) INCLUDE (K2)";
+private static final String UPSERT_STMT = "UPSERT INTO %s VALUES ('A', 
'B')";
 
 private static final Set PHOENIX_SYSTEM_TABLES = new 
HashSet<>(Arrays.asList(
   "SYSTEM.CATALOG", "SYSTEM.SEQUENCE", "SYSTEM.STATS", "SYSTEM.FUNCTION",
@@ -155,12 +172,8 @@ public class SystemCatalogCreationOnConnectionIT {
 UpgradeUtil.doNotUpgradeOnFirstConnection(propsDoNotUpgradePropSet);
 SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
driver =
   new 
SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver(ReadOnlyProps.EMPTY_PROPS);
-try {
-driver.getConnectionQueryServices(getJdbcUrl(), 
propsDoNotUpgradePropSet);
-fail("Client should not be able to create SYSTEM.CATALOG since we 
set the doNotUpgrade property");
-} catch (Exception e) {
-assertTrue(e instanceof UpgradeRequiredException);
-}
+
+driver.getConnectionQueryServices(getJdbcUrl(), 
propsDoNotUpgradePropSet);
 hbaseTables = getHBaseTables();
 assertFalse(hbaseTables.contains(PHOENIX_SYSTEM_CATALOG) || 
hbaseTables.contains(PHOENIX_NAMESPACE_MAPPED_SYSTEM_CATALOG));
 assertTrue(hbaseTables.size() == 0);
@@ -416,6 +429,70 @@ public class SystemCatalogCreationOnConnectionIT {
 assertEquals(Integer.parseInt(MODIFIED_MAX_VERSIONS), 
verifyModificationTableMetadata(driver, PHOENIX_SYSTEM_CATALOG));
 }

[phoenix] 25/34: PHOENIX-5137 check region close before commiting a batch for index rebuild

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit da6248a78c1861613cb43eb90c988eaf9457d36e
Author: Kiran Kumar Maturi 
AuthorDate: Fri Feb 22 09:45:13 2019 +0530

PHOENIX-5137 check region close before commiting a batch for index rebuild
---
 .../UngroupedAggregateRegionObserver.java  | 30 +-
 1 file changed, 18 insertions(+), 12 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 703ff97..2eb15a1 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -260,7 +260,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
   return;
   }
 
-Mutation[] mutationArray = new Mutation[mutations.size()];
+   Mutation[] mutationArray = new Mutation[mutations.size()];
   // When memstore size reaches blockingMemstoreSize we are waiting 3 
seconds for the
   // flush happen which decrease the memstore size and then writes allowed 
on the region.
   for (int i = 0; blockingMemstoreSize > 0 && region.getMemstoreSize() > 
blockingMemstoreSize && i < 30; i++) {
@@ -371,6 +371,17 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 super.clear();
 }
 }
+
+   private long getBlockingMemstoreSize(Region region, Configuration conf) {
+   long flushSize = region.getTableDesc().getMemStoreFlushSize();
+
+   if (flushSize <= 0) {
+   flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,
+   HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
+   }
+   return flushSize * 
(conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER,
+   HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER)-1);
+   }
 
 @Override
 protected RegionScanner doPostScannerOpen(final 
ObserverContext c, final Scan scan, final 
RegionScanner s) throws IOException, SQLException {
@@ -487,12 +498,6 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 MutationList mutations = new MutationList();
 boolean needToWrite = false;
 Configuration conf = env.getConfiguration();
-long flushSize = region.getTableDesc().getMemStoreFlushSize();
-
-if (flushSize <= 0) {
-flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,
-HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
-}
 
 /**
  * Slow down the writes if the memstore size more than
@@ -500,9 +505,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
  * bytes. This avoids flush storm to hdfs for cases like index 
building where reads and
  * write happen to all the table regions in the server.
  */
-final long blockingMemStoreSize = flushSize * (
-conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER,
-
HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER)-1) ;
+final long blockingMemStoreSize = getBlockingMemstoreSize(region, 
conf) ;
 
 boolean buildLocalIndex = indexMaintainers != null && 
dataColumns==null && !localIndexScan;
 if(buildLocalIndex) {
@@ -1043,6 +1046,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 int maxBatchSize = config.getInt(MUTATE_BATCH_SIZE_ATTRIB, 
QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
 long maxBatchSizeBytes = 
config.getLong(MUTATE_BATCH_SIZE_BYTES_ATTRIB,
 QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE_BYTES);
+final long blockingMemstoreSize = getBlockingMemstoreSize(region, 
config);
 MutationList mutations = new MutationList(maxBatchSize);
 region.startRegionOperation();
 byte[] uuidValue = ServerCacheClient.generateId();
@@ -1084,7 +1088,8 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 }
 }
 if (ServerUtil.readyToCommit(mutations.size(), 
mutations.byteSize(), maxBatchSize, maxBatchSizeBytes)) {
-commitBatchWithRetries(region, mutations, -1);
+checkForRegionClosingOrSplitting();
+commitBatchWithRetries(region, mutations, 
blockingMemstoreSize);
 uuidValue = ServerCacheClient.generateId(

[phoenix] 26/34: PHOENIX-4832: Add Canary Test Tool for Phoenix Query Server.

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 272e2adb540a7b1238e6f7428f48bfe49424536c
Author: s.kadam 
AuthorDate: Wed Dec 5 16:11:07 2018 -0800

PHOENIX-4832: Add Canary Test Tool for Phoenix Query Server.
---
 phoenix-core/pom.xml   |   7 +
 .../org/apache/phoenix/tool/CanaryTestResult.java  |  86 
 .../org/apache/phoenix/tool/PhoenixCanaryTool.java | 477 +
 .../resources/phoenix-canary-file-sink.properties  |  17 +
 .../apache/phoenix/tool/PhoenixCanaryToolTest.java | 140 ++
 5 files changed, 727 insertions(+)

diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 2b6bddb..291abec 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -26,6 +26,7 @@
 
   
 ${project.basedir}/..
+0.8.1
   
 
   
@@ -238,6 +239,12 @@
   sqlline
 
 
+  net.sourceforge.argparse4j
+  argparse4j
+  ${argparse4j.version}
+
+
+
   com.google.guava
   guava
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/tool/CanaryTestResult.java 
b/phoenix-core/src/main/java/org/apache/phoenix/tool/CanaryTestResult.java
new file mode 100644
index 000..b72439c
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/tool/CanaryTestResult.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.tool;
+
+public class CanaryTestResult {
+
+private boolean isSuccessful;
+private long startTime;
+private long executionTime;
+private String message;
+private String testName;
+private String timestamp;
+private Object miscellaneous;
+
+public Object getMiscellaneous() {
+return miscellaneous;
+}
+
+public void setMiscellaneous(Object miscellaneous) {
+this.miscellaneous = miscellaneous;
+}
+
+public long getStartTime() {
+return startTime;
+}
+
+public void setStartTime(long startTime) {
+this.startTime = startTime;
+}
+
+public String getTimestamp() {
+return timestamp;
+}
+
+public void setTimestamp(String timestamp) {
+this.timestamp = timestamp;
+}
+
+public boolean isSuccessful() {
+return isSuccessful;
+}
+
+public void setSuccessful(boolean successful) {
+isSuccessful = successful;
+}
+
+public long getExecutionTime() {
+return executionTime;
+}
+
+public void setExecutionTime(long executionTime) {
+this.executionTime = executionTime;
+}
+
+public String getMessage() {
+return message;
+}
+
+public void setMessage(String message) {
+this.message = message;
+}
+
+public String getTestName() {
+return testName;
+}
+
+public void setTestName(String testName) {
+this.testName = testName;
+}
+
+}
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java 
b/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java
new file mode 100644
index 000..405f54f
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java
@@ -0,0 +1,477 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.tool;
+
+import co

[phoenix] 34/34: PHOENIX-5184: HBase and Phoenix connection leaks in Indexing code path, OrphanViewTool and PhoenixConfigurationUtil

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit fc54cf39ad1fdc27db6bd39251b155d988d4895a
Author: Chinmay Kulkarni 
AuthorDate: Thu Mar 14 23:16:14 2019 -0700

PHOENIX-5184: HBase and Phoenix connection leaks in Indexing code path, 
OrphanViewTool and PhoenixConfigurationUtil
---
 .../UngroupedAggregateRegionObserver.java  |  6 ++-
 .../hbase/index/write/RecoveryIndexWriter.java | 10 ++--
 .../phoenix/mapreduce/AbstractBulkLoadTool.java| 15 ++
 .../apache/phoenix/mapreduce/OrphanViewTool.java   | 53 ++---
 .../phoenix/mapreduce/PhoenixRecordWriter.java | 18 +--
 .../mapreduce/index/DirectHTableWriter.java| 14 +-
 .../mapreduce/index/IndexScrutinyMapper.java   | 24 --
 .../apache/phoenix/mapreduce/index/IndexTool.java  | 55 --
 .../index/PhoenixIndexImportDirectMapper.java  | 26 +-
 .../mapreduce/index/PhoenixIndexImportMapper.java  | 16 ---
 .../index/PhoenixIndexPartialBuildMapper.java  | 25 ++
 .../mapreduce/util/PhoenixConfigurationUtil.java   | 45 +-
 .../apache/phoenix/parse/DropTableStatement.java   |  4 +-
 13 files changed, 190 insertions(+), 121 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 2eb15a1..f0ce5b2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -817,7 +817,11 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 }
 try {
 if (targetHTable != null) {
-targetHTable.close();
+try {
+targetHTable.close();
+} catch (IOException e) {
+logger.error("Closing table: " + targetHTable + " 
failed: ", e);
+}
 }
 } finally {
 try {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
index 35f0a6d..fb9 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
@@ -26,8 +26,6 @@ import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.MasterNotRunningException;
-import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@@ -55,15 +53,13 @@ public class RecoveryIndexWriter extends IndexWriter {
  * Directly specify the {@link IndexCommitter} and {@link 
IndexFailurePolicy}. Both are expected to be fully setup
  * before calling.
  * 
- * @param committer
  * @param policy
  * @param env
+ * @param name
  * @throws IOException
- * @throws ZooKeeperConnectionException
- * @throws MasterNotRunningException
  */
 public RecoveryIndexWriter(IndexFailurePolicy policy, 
RegionCoprocessorEnvironment env, String name)
-throws MasterNotRunningException, ZooKeeperConnectionException, 
IOException {
+throws IOException {
 super(new TrackingParallelWriterIndexCommitter(), policy, env, name);
 this.admin = new HBaseAdmin(env.getConfiguration());
 }
@@ -125,7 +121,7 @@ public class RecoveryIndexWriter extends IndexWriter {
 try {
 admin.close();
 } catch (IOException e) {
-// closing silently
+LOG.error("Closing the admin failed: ", e);
 }
 }
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
index f717647..4561152 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
@@ -17,7 +17,6 @@
  */
 package org.apache.phoenix.mapreduce;
 
-import java.io.IOException;
 import java.sql.Connection;
 import java.sql.ResultSet;
 import java.sql.SQLException;
@@ -37,20 +36,17 @@ import org.apache.commons.cli.ParseException;
 import org.apache.co

[phoenix] 22/34: PHOENIX-5025 Tool to clean up orphan views

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 0ec78d2a881dc82f240deb9e4a4b73f709cbf941
Author: Kadir 
AuthorDate: Mon Nov 12 22:24:10 2018 -0800

PHOENIX-5025 Tool to clean up orphan views
---
 .../apache/phoenix/end2end/OrphanViewToolIT.java   | 472 +++
 .../apache/phoenix/mapreduce/OrphanViewTool.java   | 879 +
 2 files changed, 1351 insertions(+)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java
new file mode 100644
index 000..f9a1785
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java
@@ -0,0 +1,472 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LINK_TYPE;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_TYPE;
+import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+
+import com.google.common.collect.Lists;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.phoenix.mapreduce.OrphanViewTool;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.AfterClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@RunWith(Parameterized.class)
+public class OrphanViewToolIT extends ParallelStatsDisabledIT {
+private static final Logger LOG = 
LoggerFactory.getLogger(OrphanViewToolIT.class);
+
+private final boolean isMultiTenant;
+private final boolean columnEncoded;
+
+private static final long fanout = 2;
+private static final long childCount = fanout;
+private static final long grandChildCount = fanout * fanout;
+private static final long grandGrandChildCount = fanout * fanout * fanout;
+
+private static final String filePath = "/tmp/";
+private static final String viewFileName = "/tmp/" + 
OrphanViewTool.fileName[OrphanViewTool.VIEW];
+private static final String physicalLinkFileName = "/tmp/" + 
OrphanViewTool.fileName[OrphanViewTool.PHYSICAL_TABLE_LINK];
+private static final String parentLinkFileName = "/tmp/" + 
OrphanViewTool.fileName[OrphanViewTool.PARENT_TABLE_LINK];
+private static final String childLinkFileName = "/tmp/" + 
OrphanViewTool.fileName[OrphanViewTool.CHILD_TABLE_LINK];
+
+protected static String SCHEMA1 = "SCHEMA1";
+protected static String SCHEMA2 = "SCHEMA2";
+protected static String SCHEMA3 = "SCHEMA3";
+protected static String SCHEMA4 = "SCHEMA4";
+
+private final String TENANT_SPECIFIC_URL = getUrl() + ';' + 
TENANT_ID_ATTRIB + "=tenant";
+
+private static final String createBaseTableFirstPartDDL = "CREATE TABLE IF 
NOT EXISTS %s";
+private static final String createBaseTableSecondPartDDL = "(%s PK2 
VARCHAR NOT NULL, V1 VARCHAR, V2 VARCHAR " +
+" CONSTRAINT NAME_PK PRIMARY KEY (%s PK2)) %s";
+private static final String deleteTableRows = "DELETE FROM " + 
SYSTEM_CATALOG_NAME +
+" 

[phoenix] 33/34: PHOENIX-5018 Index mutations created by UPSERT SELECT will have wrong timestamps

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit e5ba2e989fad4e3f1de6734a400502cb586d5e8a
Author: Kadir 
AuthorDate: Tue Jan 29 17:14:02 2019 -0800

PHOENIX-5018 Index mutations created by UPSERT SELECT will have wrong 
timestamps

Signed-off-by: Geoffrey Jacoby 
---
 .../phoenix/end2end/IndexBuildTimestampIT.java | 246 
 .../org/apache/phoenix/end2end/IndexToolIT.java|  37 ++
 .../phoenix/end2end/TableDDLPermissionsIT.java |   8 -
 .../org/apache/phoenix/rpc/PhoenixServerRpcIT.java |   6 -
 .../phoenix/compile/ServerBuildIndexCompiler.java  | 138 +++
 .../org/apache/phoenix/index/IndexMaintainer.java  | 433 ++---
 .../phoenix/mapreduce/PhoenixInputFormat.java  |   3 +-
 .../phoenix/mapreduce/PhoenixRecordReader.java |   4 +-
 .../PhoenixServerBuildIndexInputFormat.java| 111 ++
 .../apache/phoenix/mapreduce/index/IndexTool.java  | 241 +++-
 .../index/PhoenixServerBuildIndexMapper.java   |  75 
 .../mapreduce/util/PhoenixConfigurationUtil.java   |  25 ++
 .../mapreduce/util/PhoenixMapReduceUtil.java   |  27 ++
 .../org/apache/phoenix/schema/MetaDataClient.java  |  16 +-
 14 files changed, 1032 insertions(+), 338 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexBuildTimestampIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexBuildTimestampIT.java
new file mode 100644
index 000..7efba07
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexBuildTimestampIT.java
@@ -0,0 +1,246 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Collection;
+import java.util.List;
+import java.util.Properties;
+
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixResultSet;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.util.EnvironmentEdge;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.QueryUtil;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+import com.google.common.collect.Lists;
+
+@RunWith(Parameterized.class)
+public class IndexBuildTimestampIT extends BaseUniqueNamesOwnClusterIT {
+private final boolean localIndex;
+private final boolean async;
+private final boolean view;
+private final String tableDDLOptions;
+
+public IndexBuildTimestampIT(boolean mutable, boolean localIndex,
+boolean async, boolean view) {
+this.localIndex = localIndex;
+this.async = async;
+this.view = view;
+StringBuilder optionBuilder = new StringBuilder();
+if (!mutable) {
+optionBuilder.append(" IMMUTABLE_ROWS=true ");
+}
+optionBuilder.append(" SPLIT ON(1,2)");
+this.tableDDLOptions = optionBuilder.toString();
+}
+
+@BeforeClass
+public static void setup() throws Exception {
+IndexToolIT.setup();
+}
+
+@Parameters(
+name = "mutable={0},localIndex={1},async={2},view={3}")
+public static Collection data() {
+List list = Lists.newArrayListWithExpectedSize(8);
+boolean[] Booleans = new boolean[]{false, true};
+for (boolean mutable : Booleans) {
+for (boolean localIndex : Booleans

[phoenix] 17/34: PHOENIX-5111: Null Pointer exception fix in index tool due to outputpath being null when direct option is supplied

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit fbe6b6cb9d80aca950df102f745568e84d2c4ba0
Author: Gokcen Iskender 
AuthorDate: Mon Jan 28 13:16:44 2019 -0800

PHOENIX-5111: Null Pointer exception fix in index tool due to outputpath 
being null when direct option is supplied

Signed-off-by: Geoffrey Jacoby 
---
 .../java/org/apache/phoenix/mapreduce/index/IndexTool.java | 14 --
 1 file changed, 8 insertions(+), 6 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
index 15d41ea..78e946d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
@@ -249,7 +249,7 @@ public class IndexTool extends Configured implements Tool {
 if (isPartialBuild) {
 return configureJobForPartialBuild(schemaName, dataTable);
 } else {
-return configureJobForAysncIndex(schemaName, indexTable, 
dataTable, useDirectApi, useSnapshot);
+return configureJobForAsyncIndex(schemaName, indexTable, 
dataTable, useDirectApi, useSnapshot);
 }
 }
 
@@ -362,7 +362,7 @@ public class IndexTool extends Configured implements Tool {
 
 }
 
-private Job configureJobForAysncIndex(String schemaName, String 
indexTable, String dataTable, boolean useDirectApi, boolean useSnapshot)
+private Job configureJobForAsyncIndex(String schemaName, String 
indexTable, String dataTable, boolean useDirectApi, boolean useSnapshot)
 throws Exception {
 final String qDataTable = 
SchemaUtil.getQualifiedTableName(schemaName, dataTable);
 final String qIndexTable;
@@ -408,14 +408,16 @@ public class IndexTool extends Configured implements Tool 
{
 final List columnMetadataList =
 PhoenixRuntime.generateColumnInfo(connection, qIndexTable, 
indexColumns);
 ColumnInfoToStringEncoderDecoder.encode(configuration, 
columnMetadataList);
-fs = outputPath.getFileSystem(configuration);
-fs.delete(outputPath, true);
- 
+
 final String jobName = String.format(INDEX_JOB_NAME_TEMPLATE, 
schemaName, dataTable, indexTable);
 final Job job = Job.getInstance(configuration, jobName);
 job.setJarByClass(IndexTool.class);
 job.setMapOutputKeyClass(ImmutableBytesWritable.class);
-FileOutputFormat.setOutputPath(job, outputPath);
+if (outputPath != null) {
+fs = outputPath.getFileSystem(configuration);
+fs.delete(outputPath, true);
+FileOutputFormat.setOutputPath(job, outputPath);
+}
 
 if (!useSnapshot) {
 PhoenixMapReduceUtil.setInput(job, 
PhoenixIndexDBWritable.class, qDataTable,



[phoenix] 31/34: PHOENIX-4822 Fixed Spelling.

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 2c2065f959d0fc9d9eb9b7e3033aa66b9738ebc3
Author: Jimmy Casey 
AuthorDate: Sun Jul 29 21:43:55 2018 +

PHOENIX-4822 Fixed Spelling.

Closes #318

Signed-off-by: Josh Elser 
---
 phoenix-core/src/main/java/org/apache/phoenix/log/LogWriter.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/phoenix-core/src/main/java/org/apache/phoenix/log/LogWriter.java 
b/phoenix-core/src/main/java/org/apache/phoenix/log/LogWriter.java
index dab03e7..a7a2180 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/log/LogWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/log/LogWriter.java
@@ -21,7 +21,7 @@ import java.io.IOException;
 import java.sql.SQLException;
 
 /**
- * Used by the event handler to write RingBufferEvent, this is done in a 
seperate thread from the application configured
+ * Used by the event handler to write RingBufferEvent, this is done in a 
separate thread from the application configured
  * during disruptor
  */
 public interface LogWriter {



[phoenix] branch 4.14-HBase-1.3 updated (d165069 -> fc54cf3)

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a change to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from d165069  PHOENIX-5173: LIKE and ILIKE statements return empty result 
list for search without wildcard
 new 45db4c6  PHOENIX-5008: CQSI.init should not bubble up 
RetriableUpgradeException to client in case of an UpgradeRequiredException
 new 6e3af9e  PHOENIX-5008 (Addendum): CQSI.init should not bubble up 
RetriableUpgradeException to client in case of an UpgradeRequiredException
 new b9a98f5  PHOENIX-5005 Server-side delete / upsert-select potentially 
blocked after a split
 new 567d8bb  PHOENIX-4750 Resolve server customizers and provide them to 
Avatica
 new 7b8cf01  PHOENIX-4755 Provide an option to plugin custom avatica 
server config in PQS
 new b5ef5bc  PHOENIX-3991 ROW_TIMESTAMP on TIMESTAMP column type throws 
ArrayOutOfBound when upserting without providing a value.
 new 2561852  PHOENIX-4834 PhoenixMetricsLog interface methods should not 
depend on specific logger
 new 3a5fb00  PHOENIX-4835 LoggingPhoenixConnection should log metrics upon 
connection close
 new fa962f4  PHOENIX-4853 Add sql statement to PhoenixMetricsLog interface 
for query level metrics logging
 new 7dc1fbd  PHOENIX-4854 Make LoggingPhoenixResultSet idempotent when 
logging metrics
 new 98bb986  PHOENIX-4864 Fix NullPointerException while Logging some DDL 
Statements
 new 4f7065a  PHOENIX-4870 LoggingPhoenixConnection should log metrics when 
AutoCommit is set to True.
 new a9608bf  PHOENIX-4989 Include disruptor jar in shaded dependency
 new 1f41ccc  PHOENIX-4781 Create artifact jar so that shaded jar replaces 
it properly
 new 61b3880  PHOENIX-5048 Index Rebuilder does not handle INDEX_STATE 
timestamp check for all index
 new 5d48d30  PHOENIX-5070 NPE when upgrading Phoenix 4.13.0 to Phoenix 
4.14.1 with hbase-1.x branch in secure setup
 new fbe6b6c  PHOENIX-5111: Null Pointer exception fix in index tool due to 
outputpath being null when direct option is supplied
 new 336652d  PHOENIX-5094 increment pending disable count for index when 
rebuild starts
 new 293e75e  PHOENIX-4993 close cache connections when region server is 
going down
 new e41a96c  Add tenantId param to IndexTool
 new 7262f07  PHOENIX-5080 Index becomes Active during Partial Index 
Rebuilder if Index Failure happens
 new 0ec78d2  PHOENIX-5025 Tool to clean up orphan views
 new c9e850d  PHOENIX-5025 Tool to clean up orphan views (addendum)
 new 88a00e2  PHOENIX-5247 DROP TABLE and DROP VIEW commands fail to drop 
second or higher level child views
 new da6248a  PHOENIX-5137 check region close before commiting a batch for 
index rebuild
 new 272e2ad  PHOENIX-4832: Add Canary Test Tool for Phoenix Query Server.
 new 19d119a  PHOENIX-5172: Harden the PQS canary synth test tool with 
retry mechanism and more logging
 new bf79b6f  PHOENIX-5188 - IndexedKeyValue should populate KeyValue fields
 new 0b93d226 PHOENIX-5124 PropertyPolicyProvider should not evaluate 
default hbase config properties
 new 9a153c5  PHOENIX-4822 Ensure the provided timezone is used client-side 
(Jaanai Zhang)
 new 2c2065f  PHOENIX-4822 Fixed Spelling.
 new 66421fd  PHOENIX-5194 Thread Cache is not update for Index retries in 
for MutationState#send()#doMutation()
 new e5ba2e9  PHOENIX-5018 Index mutations created by UPSERT SELECT will 
have wrong timestamps
 new fc54cf3  PHOENIX-5184: HBase and Phoenix connection leaks in Indexing 
code path, OrphanViewTool and PhoenixConfigurationUtil

The 34 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 phoenix-client/pom.xml |   9 +-
 phoenix-core/pom.xml   |   7 +
 .../apache/phoenix/end2end/AppendOnlySchemaIT.java |   2 +-
 .../apache/phoenix/end2end/BasePermissionsIT.java  |   4 +-
 .../org/apache/phoenix/end2end/DateTimeIT.java |  77 ++
 .../phoenix/end2end/IndexBuildTimestampIT.java | 246 ++
 .../org/apache/phoenix/end2end/IndexToolIT.java| 150 +++-
 .../apache/phoenix/end2end/OrphanViewToolIT.java   | 479 +++
 .../org/apache/phoenix/end2end/RowTimestampIT.java |  26 +-
 .../SystemCatalogCreationOnConnectionIT.java   | 121 ++-
 .../phoenix/end2end/TableDDLPermissionsIT.java |   8 -
 .../it/java/org/apache/phoenix/end2end/ViewIT.java |   2 +-
 .../index/IndexRebuildIncrementDisableCountIT.java | 237 ++
 .../end2end/index/PartialIndexRebuilderIT.java |  66 +-
 .../phoenix/monitoring/BasePhoenixMetricsIT.java   | 128 +++
 .../monitoring/PhoenixLoggingMetricsIT.java| 290 +++
 .../phoenix/monitoring/P

[phoenix] 02/34: PHOENIX-5008 (Addendum): CQSI.init should not bubble up RetriableUpgradeException to client in case of an UpgradeRequiredException

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 6e3af9e674a7dc54b3526718c3dd23398d55d195
Author: Chinmay Kulkarni 
AuthorDate: Tue Nov 13 17:11:53 2018 -0800

PHOENIX-5008 (Addendum): CQSI.init should not bubble up 
RetriableUpgradeException to client in case of an UpgradeRequiredException
---
 .../org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
index 0cd206e..59af533 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
@@ -500,7 +500,7 @@ public class SystemCatalogCreationOnConnectionIT {
  */
 private Set getHBaseTables() throws IOException {
 Set tables = new HashSet<>();
-for (TableName tn : testUtil.getAdmin().listTableNames()) {
+for (TableName tn : testUtil.getHBaseAdmin().listTableNames()) {
 tables.add(tn.getNameAsString());
 }
 return tables;



[phoenix] 12/34: PHOENIX-4870 LoggingPhoenixConnection should log metrics when AutoCommit is set to True.

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit f9ff033ccef034e2164391594cc5649538b22f35
Author: s.kadam 
AuthorDate: Wed Sep 5 17:00:03 2018 -0700

PHOENIX-4870 LoggingPhoenixConnection should log metrics when AutoCommit is 
set to True.
---
 .../monitoring/PhoenixLoggingMetricsIT.java| 61 --
 .../phoenix/jdbc/LoggingPhoenixConnection.java | 37 +++--
 .../jdbc/LoggingPhoenixPreparedStatement.java  | 25 +++--
 .../phoenix/jdbc/LoggingPhoenixStatement.java  | 28 --
 4 files changed, 125 insertions(+), 26 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
index 5d5524c..483d341 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
@@ -102,7 +102,8 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 public void testPhoenixMetricsLoggedOnCommit() throws Exception {
 // run SELECT to verify read metrics are logged
 String query = "SELECT * FROM " + tableName1;
-verifyQueryLevelMetricsLogging(query);
+ResultSet rs = upsertRows(query);
+verifyQueryLevelMetricsLogging(query, rs);
 
 // run UPSERT SELECT to verify mutation metrics are logged
 String upsertSelect = "UPSERT INTO " + tableName2 + " SELECT * FROM " 
+ tableName1;
@@ -140,7 +141,9 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 public void testPhoenixMetricsLoggedOnClose() throws Exception {
 // run SELECT to verify read metrics are logged
 String query = "SELECT * FROM " + tableName1;
-verifyQueryLevelMetricsLogging(query);
+
+ResultSet rs = upsertRows(query);
+verifyQueryLevelMetricsLogging(query, rs);
 
 // run UPSERT SELECT to verify mutation metrics are logged
 String upsertSelect = "UPSERT INTO " + tableName2 + " SELECT * FROM " 
+ tableName1;
@@ -164,13 +167,61 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 mutationReadMetricsMap.size() == 0);
 }
 
+/**
+ * This test is added to verify if metrics are being logged in case
+ * auto commit is set to true.
+ */
+@Test
+public void testPhoenixMetricsLoggedOnAutoCommitTrue() throws Exception {
+loggedConn.setAutoCommit(true);
+
+String query = "SELECT * FROM " + tableName1;
+ResultSet rs = upsertRows(query);
+verifyQueryLevelMetricsLogging(query, rs);
+
+// run UPSERT SELECT to verify mutation metrics are logged
+String upsertSelect = "UPSERT INTO " + tableName2 + " SELECT * FROM " 
+ tableName1;
+loggedConn.createStatement().executeUpdate(upsertSelect);
+
+assertTrue("Mutation write metrics are not logged for " + tableName2,
+mutationWriteMetricsMap.get(tableName2).size()  > 0);
+assertTrue("Mutation read metrics are not found for " + tableName1,
+mutationReadMetricsMap.get(tableName1).size() > 0);
+
+clearAllTestMetricMaps();
+
+loggedConn.createStatement().execute(query);
+assertTrue("Read metrics found for " + tableName1,
+mutationReadMetricsMap.size() == 0);
+loggedConn.createStatement().execute(upsertSelect);
+
+assertTrue("Mutation write metrics are not logged for " + tableName2
++ " in 
createStatement",mutationWriteMetricsMap.get(tableName2).size()  > 0);
+assertTrue("Mutation read metrics are not found for " + tableName1
++ " in 
createStatement",mutationReadMetricsMap.get(tableName1).size() > 0);
+
+clearAllTestMetricMaps();
+
+loggedConn.prepareStatement(query).executeQuery();
+assertTrue("Read metrics found for " + tableName1,
+mutationReadMetricsMap.size() == 0);
+
+loggedConn.prepareStatement(upsertSelect).executeUpdate();
+assertTrue("Mutation write metrics are not logged for " + tableName2
++ " in 
prepareStatement",mutationWriteMetricsMap.get(tableName2).size()  > 0);
+assertTrue("Mutation read metrics are not found for " + tableName1
++ " in 
prepareStatement",mutationReadMetricsMap.get(tableName1).size() > 0);
+
+
+}
+
 private ResultSet executeAndGetResultSet(String query) throws Exception {
 Statement stmt = loggedConn.createStatement();
 

[phoenix] 18/34: PHOENIX-5094 increment pending disable count for index when rebuild starts

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit ca1b5888bafde6456e6a469399323923558e8a64
Author: Kiran Kumar Maturi 
AuthorDate: Fri Feb 1 19:37:11 2019 +0530

PHOENIX-5094 increment pending disable count for index when rebuild starts
---
 .../index/IndexRebuildIncrementDisableCountIT.java | 237 +
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |   2 +-
 .../coprocessor/MetaDataRegionObserver.java|  23 +-
 .../phoenix/index/PhoenixIndexFailurePolicy.java   |  21 +-
 .../java/org/apache/phoenix/util/IndexUtil.java|  30 +++
 5 files changed, 292 insertions(+), 21 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexRebuildIncrementDisableCountIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexRebuildIncrementDisableCountIT.java
new file mode 100644
index 000..694f359
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexRebuildIncrementDisableCountIT.java
@@ -0,0 +1,237 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end.index;
+
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode;
+import org.apache.phoenix.coprocessor.MetaDataRegionObserver;
+import 
org.apache.phoenix.coprocessor.MetaDataRegionObserver.BuildIndexScheduleTask;
+import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.PIndexState;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.IndexUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.google.common.collect.Maps;
+
+public class IndexRebuildIncrementDisableCountIT extends 
BaseUniqueNamesOwnClusterIT {
+private static final Log LOG = 
LogFactory.getLog(IndexRebuildIncrementDisableCountIT.class);
+private static long pendingDisableCount = 0;
+private static String ORG_PREFIX = "ORG";
+private static Result pendingDisableCountResult = null;
+private static String indexState = null;
+private static final Random RAND = new Random(5);
+private static final int WAIT_AFTER_DISABLED = 5000;
+private static final long REBUILD_PERIOD = 5;
+private static final long REBUILD_INTERVAL = 2000;
+private static RegionCoprocessorEnvironment 
indexRebuildTaskRegionEnvironment;
+private static String schemaName;
+private static String tableName;
+private static String fullTableName;
+private static String indexName;
+private static String fullIndexName;
+private static Connection conn;
+private static PhoenixConnection phoenixConn;
+
+@BeforeClass
+public static void doSetup() throws Exception {
+Map serverProps = Maps.newHashMapWithExpectedSize(10);
+serverProps.put(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_ATTRIB,
+Boolean.TRUE.toString());
+
serverProps.put(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_INTERVAL_ATTRIB,
+Long.toString(REBUILD_INTERVA

[phoenix] 05/34: PHOENIX-4755 Provide an option to plugin custom avatica server config in PQS

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 7b8cf010fa375359ee3c2f6187ff10af8eb34632
Author: Karan Mehta 
AuthorDate: Mon Aug 20 16:52:22 2018 -0700

PHOENIX-4755 Provide an option to plugin custom avatica server config in PQS
---
 .../org/apache/phoenix/query/QueryServices.java|   3 +-
 .../apache/phoenix/query/QueryServicesOptions.java |   1 +
 .../phoenix/end2end/ServerCustomizersIT.java   |   4 +-
 .../server/AvaticaServerConfigurationFactory.java  |  20 +++
 .../phoenix/queryserver/server/QueryServer.java| 167 ++---
 .../server/ServerCustomizersFactory.java   |   7 +-
 .../CustomAvaticaServerConfigurationTest.java  |  20 +++
 .../server/QueryServerConfigurationTest.java   |  26 +++-
 .../queryserver/server/ServerCustomizersTest.java  |  13 +-
 9 files changed, 194 insertions(+), 67 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index c7548df..9072d26 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -251,8 +251,9 @@ public interface QueryServices extends SQLCloseable {
 public static final String QUERY_SERVER_KERBEROS_ALLOWED_REALMS = 
"phoenix.queryserver.kerberos.allowed.realms";
 public static final String QUERY_SERVER_SPNEGO_AUTH_DISABLED_ATTRIB = 
"phoenix.queryserver.spnego.auth.disabled";
 public static final String QUERY_SERVER_WITH_REMOTEUSEREXTRACTOR_ATTRIB = 
"phoenix.queryserver.withRemoteUserExtractor";
-public static final String QUERY_SERVER_REMOTEUSEREXTRACTOR_PARAM = 
"phoenix.queryserver.remoteUserExtractor.param";
 public static final String QUERY_SERVER_CUSTOMIZERS_ENABLED = 
"phoenix.queryserver.customizers.enabled";
+public static final String QUERY_SERVER_CUSTOM_AUTH_ENABLED = 
"phoenix.queryserver.custom.auth.enabled";
+public static final String QUERY_SERVER_REMOTEUSEREXTRACTOR_PARAM = 
"phoenix.queryserver.remoteUserExtractor.param";
 public static final String QUERY_SERVER_DISABLE_KERBEROS_LOGIN = 
"phoenix.queryserver.disable.kerberos.login";
 
 // metadata configs
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 7933ba0..02a3d4b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -299,6 +299,7 @@ public class QueryServicesOptions {
 public static final int DEFAULT_QUERY_SERVER_UGI_CACHE_CONCURRENCY = 10;
 public static final boolean DEFAULT_QUERY_SERVER_SPNEGO_AUTH_DISABLED = 
false;
 public static final boolean DEFAULT_QUERY_SERVER_WITH_REMOTEUSEREXTRACTOR 
= false;
+public static final boolean DEFAULT_QUERY_SERVER_CUSTOM_AUTH_ENABLED = 
false;
 public static final String DEFAULT_QUERY_SERVER_REMOTEUSEREXTRACTOR_PARAM 
= "doAs";
 public static final boolean DEFAULT_QUERY_SERVER_DISABLE_KERBEROS_LOGIN = 
false;
 public static final boolean DEFAULT_QUERY_SERVER_CUSTOMIZERS_ENABLED = 
false;
diff --git 
a/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/ServerCustomizersIT.java
 
b/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/ServerCustomizersIT.java
index d990adb..db08908 100644
--- 
a/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/ServerCustomizersIT.java
+++ 
b/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/ServerCustomizersIT.java
@@ -24,6 +24,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.calcite.avatica.server.AvaticaServerConfiguration;
 import org.apache.calcite.avatica.server.ServerCustomizer;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.phoenix.query.QueryServices;
@@ -66,7 +67,8 @@ public class ServerCustomizersIT extends 
BaseHBaseManagedTimeIT {
 InstanceResolver.clearSingletons();
 InstanceResolver.getSingleton(ServerCustomizersFactory.class, new 
ServerCustomizersFactory() {
 @Override
-public List> 
createServerCustomizers(Configuration conf) {
+public List> 
createServerCustomizers(Configuration conf,
+  
AvaticaServerConfiguration avaticaServerConfiguration) {
 return Collections.>singletonList(new 
TestServerCustomizer());
 }
 });
diff --git 
a/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/AvaticaServerCo

[phoenix] 26/34: PHOENIX-4832: Add Canary Test Tool for Phoenix Query Server.

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit dc481cea2882a5548da7f01600a608bdef5de48c
Author: s.kadam 
AuthorDate: Wed Dec 5 16:11:07 2018 -0800

PHOENIX-4832: Add Canary Test Tool for Phoenix Query Server.
---
 phoenix-core/pom.xml   |   7 +
 .../org/apache/phoenix/tool/CanaryTestResult.java  |  86 
 .../org/apache/phoenix/tool/PhoenixCanaryTool.java | 477 +
 .../resources/phoenix-canary-file-sink.properties  |  17 +
 .../apache/phoenix/tool/PhoenixCanaryToolTest.java | 140 ++
 5 files changed, 727 insertions(+)

diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index b48e4fd..2543e36 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -26,6 +26,7 @@
 
   
 ${project.basedir}/..
+0.8.1
   
 
   
@@ -238,6 +239,12 @@
   sqlline
 
 
+  net.sourceforge.argparse4j
+  argparse4j
+  ${argparse4j.version}
+
+
+
   com.google.guava
   guava
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/tool/CanaryTestResult.java 
b/phoenix-core/src/main/java/org/apache/phoenix/tool/CanaryTestResult.java
new file mode 100644
index 000..b72439c
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/tool/CanaryTestResult.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.tool;
+
+public class CanaryTestResult {
+
+private boolean isSuccessful;
+private long startTime;
+private long executionTime;
+private String message;
+private String testName;
+private String timestamp;
+private Object miscellaneous;
+
+public Object getMiscellaneous() {
+return miscellaneous;
+}
+
+public void setMiscellaneous(Object miscellaneous) {
+this.miscellaneous = miscellaneous;
+}
+
+public long getStartTime() {
+return startTime;
+}
+
+public void setStartTime(long startTime) {
+this.startTime = startTime;
+}
+
+public String getTimestamp() {
+return timestamp;
+}
+
+public void setTimestamp(String timestamp) {
+this.timestamp = timestamp;
+}
+
+public boolean isSuccessful() {
+return isSuccessful;
+}
+
+public void setSuccessful(boolean successful) {
+isSuccessful = successful;
+}
+
+public long getExecutionTime() {
+return executionTime;
+}
+
+public void setExecutionTime(long executionTime) {
+this.executionTime = executionTime;
+}
+
+public String getMessage() {
+return message;
+}
+
+public void setMessage(String message) {
+this.message = message;
+}
+
+public String getTestName() {
+return testName;
+}
+
+public void setTestName(String testName) {
+this.testName = testName;
+}
+
+}
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java 
b/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java
new file mode 100644
index 000..405f54f
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/tool/PhoenixCanaryTool.java
@@ -0,0 +1,477 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.tool;
+
+import co

[phoenix] 18/34: PHOENIX-5094 increment pending disable count for index when rebuild starts

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 336652d6f444df0ce7846cbee5ad1424f468bc82
Author: Kiran Kumar Maturi 
AuthorDate: Fri Feb 1 19:37:11 2019 +0530

PHOENIX-5094 increment pending disable count for index when rebuild starts
---
 .../index/IndexRebuildIncrementDisableCountIT.java | 237 +
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |   2 +-
 .../coprocessor/MetaDataRegionObserver.java|  23 +-
 .../phoenix/index/PhoenixIndexFailurePolicy.java   |  21 +-
 .../java/org/apache/phoenix/util/IndexUtil.java|  30 +++
 5 files changed, 292 insertions(+), 21 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexRebuildIncrementDisableCountIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexRebuildIncrementDisableCountIT.java
new file mode 100644
index 000..694f359
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexRebuildIncrementDisableCountIT.java
@@ -0,0 +1,237 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end.index;
+
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode;
+import org.apache.phoenix.coprocessor.MetaDataRegionObserver;
+import 
org.apache.phoenix.coprocessor.MetaDataRegionObserver.BuildIndexScheduleTask;
+import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.PIndexState;
+import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.IndexUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.google.common.collect.Maps;
+
+public class IndexRebuildIncrementDisableCountIT extends 
BaseUniqueNamesOwnClusterIT {
+private static final Log LOG = 
LogFactory.getLog(IndexRebuildIncrementDisableCountIT.class);
+private static long pendingDisableCount = 0;
+private static String ORG_PREFIX = "ORG";
+private static Result pendingDisableCountResult = null;
+private static String indexState = null;
+private static final Random RAND = new Random(5);
+private static final int WAIT_AFTER_DISABLED = 5000;
+private static final long REBUILD_PERIOD = 5;
+private static final long REBUILD_INTERVAL = 2000;
+private static RegionCoprocessorEnvironment 
indexRebuildTaskRegionEnvironment;
+private static String schemaName;
+private static String tableName;
+private static String fullTableName;
+private static String indexName;
+private static String fullIndexName;
+private static Connection conn;
+private static PhoenixConnection phoenixConn;
+
+@BeforeClass
+public static void doSetup() throws Exception {
+Map serverProps = Maps.newHashMapWithExpectedSize(10);
+serverProps.put(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_ATTRIB,
+Boolean.TRUE.toString());
+
serverProps.put(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_INTERVAL_ATTRIB,
+Long.toString(REBUILD_INTERVA

[phoenix] 21/34: PHOENIX-5080 Index becomes Active during Partial Index Rebuilder if Index Failure happens

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 7262f0713abc582a8841f062b55ad537fdec5e51
Author: Monani Mihir 
AuthorDate: Sat Feb 2 11:00:19 2019 +0530

PHOENIX-5080 Index becomes Active during Partial Index Rebuilder if Index 
Failure happens
---
 .../end2end/index/PartialIndexRebuilderIT.java |  66 +++-
 .../coprocessor/BaseScannerRegionObserver.java |   9 +-
 .../UngroupedAggregateRegionObserver.java  |  25 ++-
 .../org/apache/phoenix/execute/MutationState.java  |  14 +-
 .../org/apache/phoenix/hbase/index/Indexer.java|  10 +-
 .../hbase/index/builder/IndexBuildManager.java |   8 +
 .../phoenix/index/PhoenixIndexFailurePolicy.java   |  32 +++-
 .../apache/phoenix/index/PhoenixIndexMetaData.java |   3 +-
 .../java/org/apache/phoenix/query/BaseTest.java| 185 +
 9 files changed, 330 insertions(+), 22 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
index 46443e3..cda282b 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
@@ -47,7 +47,6 @@ import 
org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
 import org.apache.phoenix.coprocessor.MetaDataRegionObserver;
 import 
org.apache.phoenix.coprocessor.MetaDataRegionObserver.BuildIndexScheduleTask;
 import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
-import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.execute.CommitException;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
@@ -86,6 +85,7 @@ public class PartialIndexRebuilderIT extends 
BaseUniqueNamesOwnClusterIT {
 private static final long REBUILD_PERIOD = 5;
 private static final long REBUILD_INTERVAL = 2000;
 private static RegionCoprocessorEnvironment 
indexRebuildTaskRegionEnvironment;
+private static Boolean runRebuildOnce = true;
 
 
 @BeforeClass
@@ -125,6 +125,7 @@ public class PartialIndexRebuilderIT extends 
BaseUniqueNamesOwnClusterIT {
 runIndexRebuilderAsync(interval, cancel, 
Collections.singletonList(table));
 }
 private static void runIndexRebuilderAsync(final int interval, final 
boolean[] cancel, final List tables) {
+runRebuildOnce = true;
 Thread thread = new Thread(new Runnable() {
 @Override
 public void run() {
@@ -137,6 +138,8 @@ public class PartialIndexRebuilderIT extends 
BaseUniqueNamesOwnClusterIT {
 throw new RuntimeException(e);
 } catch (SQLException e) {
 LOG.error(e.getMessage(),e);
+} finally {
+runRebuildOnce = false;
 }
 }
 }
@@ -554,7 +557,7 @@ public class PartialIndexRebuilderIT extends 
BaseUniqueNamesOwnClusterIT {
 
 @Override
 public long currentTime() {
-return time;
+return time++;
 }
 }
 
@@ -1068,6 +1071,65 @@ public class PartialIndexRebuilderIT extends 
BaseUniqueNamesOwnClusterIT {
 }
 }
 
+@Test
+@Repeat(5)
+public void testIndexActiveIfRegionMovesWhileRebuilding() throws Throwable 
{
+final MyClock clock = new MyClock(1000);
+EnvironmentEdgeManager.injectEdge(clock);
+String schemaName = generateUniqueName();
+String tableName = generateUniqueName();
+String indexName = generateUniqueName();
+int nThreads = 5;
+int nRows = 50;
+int nIndexValues = 23;
+int batchSize = 200;
+final CountDownLatch doneSignal = new CountDownLatch(nThreads);
+boolean[] cancel = new boolean[1];
+
+final String fullTableName = SchemaUtil.getTableName(schemaName, 
tableName);
+final String fullIndexName = SchemaUtil.getTableName(schemaName, 
indexName);
+try (Connection conn = DriverManager.getConnection(getUrl())) {
+try {
+conn.createStatement().execute("CREATE TABLE " + fullTableName
++ "(k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, v1 INTEGER, "
++ "CONSTRAINT pk PRIMARY KEY (k1,k2)) STORE_NULLS=true, 
VERSIONS=1");
+conn.createStatement().execute("CREATE INDEX " + indexName + " 
ON "
++ fullTableName + "(v1)");
+conn.commit();
+long disableTS = clock.currentTime();
+HTableInterface metaTable = 
conn.unwrap(PhoenixConnection.c

[phoenix] branch 4.14-HBase-1.2 updated (1d16446 -> 9e10faa)

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a change to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from 1d16446  PHOENIX-5173: LIKE and ILIKE statements return empty result 
list for search without wildcard
 new f1cdc17  PHOENIX-5008: CQSI.init should not bubble up 
RetriableUpgradeException to client in case of an UpgradeRequiredException
 new cb36277  PHOENIX-5008 (Addendum): CQSI.init should not bubble up 
RetriableUpgradeException to client in case of an UpgradeRequiredException
 new 5c36e50  PHOENIX-5005 Server-side delete / upsert-select potentially 
blocked after a split
 new 28fc50e  PHOENIX-4750 Resolve server customizers and provide them to 
Avatica
 new 77dde07  PHOENIX-4755 Provide an option to plugin custom avatica 
server config in PQS
 new 93e62ee  PHOENIX-3991 ROW_TIMESTAMP on TIMESTAMP column type throws 
ArrayOutOfBound when upserting without providing a value.
 new c5120c5  PHOENIX-4834 PhoenixMetricsLog interface methods should not 
depend on specific logger
 new 663eff7  PHOENIX-4835 LoggingPhoenixConnection should log metrics upon 
connection close
 new b19  PHOENIX-4853 Add sql statement to PhoenixMetricsLog interface 
for query level metrics logging
 new f8859c8  PHOENIX-4854 Make LoggingPhoenixResultSet idempotent when 
logging metrics
 new 8c5c1b6  PHOENIX-4864 Fix NullPointerException while Logging some DDL 
Statements
 new f9ff033  PHOENIX-4870 LoggingPhoenixConnection should log metrics when 
AutoCommit is set to True.
 new cb9c9a0  PHOENIX-4989 Include disruptor jar in shaded dependency
 new b3ecde7  PHOENIX-4781 Create artifact jar so that shaded jar replaces 
it properly
 new 512efd8  PHOENIX-5048 Index Rebuilder does not handle INDEX_STATE 
timestamp check for all index
 new 172149d  PHOENIX-5070 NPE when upgrading Phoenix 4.13.0 to Phoenix 
4.14.1 with hbase-1.x branch in secure setup
 new 286769c  PHOENIX-5111: Null Pointer exception fix in index tool due to 
outputpath being null when direct option is supplied
 new ca1b588  PHOENIX-5094 increment pending disable count for index when 
rebuild starts
 new 495448e  PHOENIX-4993 close cache connections when region server is 
going down
 new e1daa42  Add tenantId param to IndexTool
 new 7772f69  PHOENIX-5080 Index becomes Active during Partial Index 
Rebuilder if Index Failure happens
 new e3bd603  PHOENIX-5025 Tool to clean up orphan views
 new f741006  PHOENIX-5025 Tool to clean up orphan views (addendum)
 new 9070d99  PHOENIX-5247 DROP TABLE and DROP VIEW commands fail to drop 
second or higher level child views
 new 1916b3d  PHOENIX-5137 check region close before commiting a batch for 
index rebuild
 new dc481ce  PHOENIX-4832: Add Canary Test Tool for Phoenix Query Server.
 new e3ac1e9  PHOENIX-5172: Harden the PQS canary synth test tool with 
retry mechanism and more logging
 new 294de68  PHOENIX-5188 - IndexedKeyValue should populate KeyValue fields
 new fd7dd41  PHOENIX-5124 PropertyPolicyProvider should not evaluate 
default hbase config properties
 new 7a6daf0  PHOENIX-4822 Ensure the provided timezone is used client-side 
(Jaanai Zhang)
 new 6a3e8e8  PHOENIX-4822 Fixed Spelling.
 new d5d7cd6  PHOENIX-5194 Thread Cache is not update for Index retries in 
for MutationState#send()#doMutation()
 new c8e2e34  PHOENIX-5018 Index mutations created by UPSERT SELECT will 
have wrong timestamps
 new 9e10faa  PHOENIX-5184: HBase and Phoenix connection leaks in Indexing 
code path, OrphanViewTool and PhoenixConfigurationUtil

The 34 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 phoenix-client/pom.xml |   9 +-
 phoenix-core/pom.xml   |   7 +
 .../apache/phoenix/end2end/AppendOnlySchemaIT.java |   2 +-
 .../apache/phoenix/end2end/BasePermissionsIT.java  |   4 +-
 .../org/apache/phoenix/end2end/DateTimeIT.java |  77 ++
 .../phoenix/end2end/IndexBuildTimestampIT.java | 246 ++
 .../org/apache/phoenix/end2end/IndexToolIT.java| 150 +++-
 .../apache/phoenix/end2end/OrphanViewToolIT.java   | 479 +++
 .../org/apache/phoenix/end2end/RowTimestampIT.java |  26 +-
 .../SystemCatalogCreationOnConnectionIT.java   | 120 ++-
 .../phoenix/end2end/TableDDLPermissionsIT.java |   8 -
 .../it/java/org/apache/phoenix/end2end/ViewIT.java |   2 +-
 .../index/IndexRebuildIncrementDisableCountIT.java | 237 ++
 .../end2end/index/PartialIndexRebuilderIT.java |  66 +-
 .../phoenix/monitoring/BasePhoenixMetricsIT.java   | 128 +++
 .../monitoring/PhoenixLoggingMetricsIT.java| 290 +++
 .../phoenix/monitoring/P

[phoenix] 20/34: Add tenantId param to IndexTool

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit e1daa4225a6cb8c9c6c7174741f0d1202984d697
Author: Gokcen Iskender 
AuthorDate: Mon Feb 11 12:58:53 2019 -0800

Add tenantId param to IndexTool

Signed-off-by: Geoffrey Jacoby 
---
 .../org/apache/phoenix/end2end/IndexToolIT.java| 113 -
 .../apache/phoenix/mapreduce/index/IndexTool.java  |  43 +---
 2 files changed, 139 insertions(+), 17 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolIT.java
index a120aaa..9d6f881 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolIT.java
@@ -40,15 +40,19 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.mapreduce.index.IndexTool;
+import org.apache.phoenix.query.ConnectionQueryServices;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
@@ -73,14 +77,16 @@ public class IndexToolIT extends ParallelStatsEnabledIT {
 private final String tableDDLOptions;
 private final boolean mutable;
 private final boolean useSnapshot;
+private final boolean useTenantId;
 
 public IndexToolIT(boolean transactional, boolean mutable, boolean 
localIndex,
-boolean directApi, boolean useSnapshot) {
+boolean directApi, boolean useSnapshot, boolean useTenantId) {
 this.localIndex = localIndex;
 this.transactional = transactional;
 this.directApi = directApi;
 this.mutable = mutable;
 this.useSnapshot = useSnapshot;
+this.useTenantId = useTenantId;
 StringBuilder optionBuilder = new StringBuilder();
 if (!mutable) {
 optionBuilder.append(" IMMUTABLE_ROWS=true ");
@@ -117,12 +123,14 @@ public class IndexToolIT extends ParallelStatsEnabledIT {
 for (boolean localIndex : Booleans) {
 for (boolean directApi : Booleans) {
 for (boolean useSnapshot : Booleans) {
-list.add(new Boolean[] { transactional, mutable, 
localIndex, directApi, useSnapshot });
+list.add(new Boolean[] { transactional, mutable, 
localIndex, directApi, useSnapshot, false});
 }
 }
 }
 }
 }
+// Add the usetenantId
+list.add(new Boolean[] { false, false, false, true, false, true});
 return list;
 }
 
@@ -221,6 +229,90 @@ public class IndexToolIT extends ParallelStatsEnabledIT {
 }
 
 @Test
+public void testIndexToolWithTenantId() throws Exception {
+if (!useTenantId) { return;}
+String tenantId = generateUniqueName();
+String schemaName = generateUniqueName();
+String dataTableName = generateUniqueName();
+String viewTenantName = generateUniqueName();
+String indexNameGlobal = generateUniqueName();
+String indexNameTenant = generateUniqueName();
+String viewIndexTableName = "_IDX_" + dataTableName;
+
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+Connection connGlobal = DriverManager.getConnection(getUrl(), props);
+props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
+Connection connTenant = DriverManager.getConnection(getUrl(), props);
+String createTblStr = "CREATE TABLE %s (TENANT_ID VARCHAR(15) NOT 
NULL,ID INTEGER NOT NULL"
++ ", NAME VARCHAR, CONSTRAINT PK_1 PRIMARY KEY (TENANT_ID, 
ID)) MULTI_TENANT=true";
+String createViewStr = "CREATE VIEW %s AS SELECT * FROM %s";
+
+String upsertQueryStr = "UPSERT INTO %s (TENANT_ID, ID, NAME) 
VALUES('%s' , %d, '%s')";
+String createIndexStr = "CREATE INDEX %s ON %s (NAME) ";
+
+try {
+String tableStmtGlobal = String.format(createTblStr, 
dataTableName);
+connGl

[phoenix] 23/34: PHOENIX-5025 Tool to clean up orphan views (addendum)

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit c9e850de15f199d13c3fcb6d1f50a0499efdafa5
Author: Kadir 
AuthorDate: Wed Dec 12 17:53:38 2018 -0800

PHOENIX-5025 Tool to clean up orphan views (addendum)
---
 .../apache/phoenix/end2end/OrphanViewToolIT.java   | 25 +++---
 .../apache/phoenix/mapreduce/OrphanViewTool.java   | 89 +-
 2 files changed, 71 insertions(+), 43 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java
index f9a1785..38d4afc 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java
@@ -19,7 +19,6 @@ package org.apache.phoenix.end2end;
 
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LINK_TYPE;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME;
-import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_TYPE;
 import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
@@ -27,9 +26,9 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 import java.io.File;
+import java.io.FileReader;
 import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Paths;
+import java.io.LineNumberReader;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
@@ -54,6 +53,7 @@ import org.slf4j.LoggerFactory;
 
 @RunWith(Parameterized.class)
 public class OrphanViewToolIT extends ParallelStatsDisabledIT {
+private static final String SYSTEM_CHILD_LINK_NAME = SYSTEM_CATALOG_NAME;
 private static final Logger LOG = 
LoggerFactory.getLogger(OrphanViewToolIT.class);
 
 private final boolean isMultiTenant;
@@ -206,9 +206,13 @@ public class OrphanViewToolIT extends 
ParallelStatsDisabledIT {
 }
 
 private void verifyLineCount(String fileName, long lineCount) throws 
IOException {
-if (Files.lines(Paths.get(fileName)).count() != lineCount)
-LOG.debug(Files.lines(Paths.get(fileName)).count() + " != " + 
lineCount);
-assertTrue(Files.lines(Paths.get(fileName)).count() == lineCount);
+LineNumberReader reader = new LineNumberReader(new 
FileReader(fileName));
+while (reader.readLine() != null) {
+}
+int count = reader.getLineNumber();
+if (count != lineCount)
+LOG.debug(count + " != " + lineCount);
+assertTrue(count == lineCount);
 }
 
 private void verifyCountQuery(Connection connection, String query, String 
schemaName, long count)
@@ -238,7 +242,6 @@ public class OrphanViewToolIT extends 
ParallelStatsDisabledIT {
 }
 }
 
-
 private void verifyNoChildLink(Connection connection, String 
viewSchemaName) throws Exception {
 // Verify that there there is no link in the system child link table
 verifyCountQuery(connection, countChildLinksQuery, viewSchemaName, 0);
@@ -264,6 +267,7 @@ public class OrphanViewToolIT extends 
ParallelStatsDisabledIT {
 schemaName == null ? "IS NULL" : "= '" + schemaName + "'"));
 connection.commit();
 }
+
 @Test
 public void testDeleteBaseTableRows() throws Exception {
 String baseTableName = generateUniqueName();
@@ -438,7 +442,8 @@ public class OrphanViewToolIT extends 
ParallelStatsDisabledIT {
 }
 }
 
-public static String[] getArgValues(boolean clean, boolean identify, 
boolean outputPath, boolean inputPath) {
+public static String[] getArgValues(boolean clean, boolean identify, 
boolean outputPath, boolean inputPath)
+throws InterruptedException{
 final List args = Lists.newArrayList();
 if (outputPath) {
 args.add("-op");
@@ -454,8 +459,10 @@ public class OrphanViewToolIT extends 
ParallelStatsDisabledIT {
 if (identify) {
 args.add("-i");
 }
+final long ageMs = 2000;
+Thread.sleep(ageMs);
 args.add("-a");
-args.add("0");
+args.add(Long.toString(ageMs));
 return args.toArray(new String[0]);
 }
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
index a8a30b6..2e0dd0d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
@@ -

[phoenix] 01/34: PHOENIX-5008: CQSI.init should not bubble up RetriableUpgradeException to client in case of an UpgradeRequiredException

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 45db4c6d1d0befb7c9b40c946d72f2e9dcd55bbb
Author: Chinmay Kulkarni 
AuthorDate: Fri Nov 9 19:22:57 2018 -0800

PHOENIX-5008: CQSI.init should not bubble up RetriableUpgradeException to 
client in case of an UpgradeRequiredException
---
 .../SystemCatalogCreationOnConnectionIT.java   | 123 +
 .../phoenix/query/ConnectionQueryServicesImpl.java |   4 +-
 2 files changed, 101 insertions(+), 26 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
index 689eb20..0cd206e 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
@@ -17,6 +17,24 @@
  */
 package org.apache.phoenix.end2end;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.apache.phoenix.query.BaseTest.generateUniqueName;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.concurrent.TimeoutException;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
@@ -26,6 +44,7 @@ import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.UpgradeRequiredException;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDriver;
 import org.apache.phoenix.jdbc.PhoenixEmbeddedDriver;
 import org.apache.phoenix.jdbc.PhoenixTestDriver;
 import org.apache.phoenix.query.*;
@@ -36,14 +55,6 @@ import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.SQLException;
-import java.util.*;
-import java.util.concurrent.TimeoutException;
-
-import static org.junit.Assert.*;
-
 @Category(NeedsOwnMiniClusterTest.class)
 public class SystemCatalogCreationOnConnectionIT {
 private HBaseTestingUtility testUtil = null;
@@ -57,6 +68,12 @@ public class SystemCatalogCreationOnConnectionIT {
 private static final String PHOENIX_SYSTEM_CATALOG = "SYSTEM.CATALOG";
 private static final String EXECUTE_UPGRADE_COMMAND = "EXECUTE UPGRADE";
 private static final String MODIFIED_MAX_VERSIONS ="5";
+private static final String CREATE_TABLE_STMT = "CREATE TABLE %s"
++ " (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY 
KEY(K1,K2))";
+private static final String SELECT_STMT = "SELECT * FROM %s";
+private static final String DELETE_STMT = "DELETE FROM %s";
+private static final String CREATE_INDEX_STMT = "CREATE INDEX DUMMY_IDX ON 
%s (K1) INCLUDE (K2)";
+private static final String UPSERT_STMT = "UPSERT INTO %s VALUES ('A', 
'B')";
 
 private static final Set PHOENIX_SYSTEM_TABLES = new 
HashSet<>(Arrays.asList(
   "SYSTEM.CATALOG", "SYSTEM.SEQUENCE", "SYSTEM.STATS", "SYSTEM.FUNCTION",
@@ -155,12 +172,8 @@ public class SystemCatalogCreationOnConnectionIT {
 UpgradeUtil.doNotUpgradeOnFirstConnection(propsDoNotUpgradePropSet);
 SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
driver =
   new 
SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver(ReadOnlyProps.EMPTY_PROPS);
-try {
-driver.getConnectionQueryServices(getJdbcUrl(), 
propsDoNotUpgradePropSet);
-fail("Client should not be able to create SYSTEM.CATALOG since we 
set the doNotUpgrade property");
-} catch (Exception e) {
-assertTrue(e instanceof UpgradeRequiredException);
-}
+
+driver.getConnectionQueryServices(getJdbcUrl(), 
propsDoNotUpgradePropSet);
 hbaseTables = getHBaseTables();
 assertFalse(hbaseTables.contains(PHOENIX_SYSTEM_CATALOG) || 
hbaseTables.contains(PHOENIX_NAMESPACE_MAPPED_SYSTEM_CATALOG));
 assertTrue(hbaseTables.size() == 0);
@@ -416,6 +429,70 @@ public class SystemCatalogCreationOnConnectionIT {
 assertEquals(Integer.parseInt(MODIFIED_MAX_VERSIONS), 
verifyModificationTableMetadata(driver, PHOENIX_SYSTEM_CATALOG));
 }

[phoenix] 28/34: PHOENIX-5188 - IndexedKeyValue should populate KeyValue fields

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 294de68484d482ef945289ebac9fcb59d9b39a0a
Author: Geoffrey Jacoby 
AuthorDate: Tue Mar 12 11:17:50 2019 -0700

PHOENIX-5188 - IndexedKeyValue should populate KeyValue fields
---
 .../phoenix/hbase/index/wal/IndexedKeyValue.java   | 25 
 .../regionserver/wal/IndexedKeyValueTest.java  | 67 ++
 2 files changed, 92 insertions(+)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
index b04cf0a..f01dc06 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
@@ -53,6 +53,7 @@ public class IndexedKeyValue extends KeyValue {
 public IndexedKeyValue() {}
 
 public IndexedKeyValue(byte[] bs, Mutation mutation) {
+super(mutation.getRow(), 0, mutation.getRow().length);
 this.indexTableName = new ImmutableBytesPtr(bs);
 this.mutation = mutation;
 this.hashCode = calcHashCode(indexTableName, mutation);
@@ -117,6 +118,24 @@ public class IndexedKeyValue extends KeyValue {
 }
 
 @Override
+public int getRowOffset() {
+return this.offset;
+}
+
+@Override
+public short getRowLength() {
+return (short) this.length;
+}
+
+@Override
+public int getKeyLength(){
+//normally the key is row key + other key fields such as timestamp,
+// but those aren't defined here because a Mutation can contain 
multiple,
+// so we just return the length of the row key
+return this.length;
+}
+
+@Override
 public String toString() {
 return "IndexWrite:\n\ttable: " + indexTableName + "\n\tmutation:" + 
mutation;
 }
@@ -179,6 +198,12 @@ public class IndexedKeyValue extends KeyValue {
 MutationProto mProto = MutationProto.parseFrom(mutationData);
 this.mutation = 
org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation(mProto);
 this.hashCode = calcHashCode(indexTableName, mutation);
+if (mutation != null){
+bytes = mutation.getRow();
+offset = 0;
+length = bytes.length;
+}
+
 }
 
 public boolean getBatchFinished() {
diff --git 
a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/IndexedKeyValueTest.java
 
b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/IndexedKeyValueTest.java
new file mode 100644
index 000..7f34fcd
--- /dev/null
+++ 
b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/IndexedKeyValueTest.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver.wal;
+
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.phoenix.hbase.index.wal.IndexedKeyValue;
+import org.apache.phoenix.hbase.index.wal.KeyValueCodec;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+
+
+public class IndexedKeyValueTest {
+
+@Test
+public void testIndexedKeyValuePopulatesKVFields() throws Exception {
+byte[] row = Bytes.toBytes("foo");
+byte[] tableNameBytes = Bytes.toBytes("MyTableName");
+Mutation mutation = new Put(row);
+IndexedKeyValue indexedKeyValue = new IndexedKeyValue(tableNameBytes, 
mutation);
+testIndexedKeyValueHelper(indexedKeyValue, row, tableNameBytes, 
mutation);
+
+//now serialize the IndexedKeyValue and make sure the deserialized 
copy also
+//has all the right fields
+ByteArrayOutputStream baos = new ByteArrayOutputStream(

[phoenix] 09/34: PHOENIX-4853 Add sql statement to PhoenixMetricsLog interface for query level metrics logging

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit b1940cf4080675ad6ca24227a4ce0cc0b1de
Author: Karan Mehta 
AuthorDate: Fri Aug 17 13:02:08 2018 -0700

PHOENIX-4853 Add sql statement to PhoenixMetricsLog interface for query 
level metrics logging
---
 .../monitoring/PhoenixLoggingMetricsIT.java| 15 +++---
 .../phoenix/jdbc/LoggingPhoenixConnection.java | 16 +++
 .../jdbc/LoggingPhoenixPreparedStatement.java  | 13 +++-
 .../phoenix/jdbc/LoggingPhoenixResultSet.java  | 10 +
 .../phoenix/jdbc/LoggingPhoenixStatement.java  | 24 +-
 .../org/apache/phoenix/jdbc/PhoenixMetricsLog.java |  6 +++---
 6 files changed, 56 insertions(+), 28 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
index 02640e7..97b2c5d 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
@@ -43,6 +43,7 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 private String tableName1;
 private String tableName2;
 private LoggingPhoenixConnection loggedConn;
+private String loggedSql;
 
 @Before
 public void beforeTest() throws Exception {
@@ -75,7 +76,10 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 rs.close();
 assertTrue("Read metrics for not found for " + tableName1,
 requestReadMetricsMap.get(tableName1).size() > 0);
+assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
+
 assertTrue("Overall read metrics for not found ", 
overAllQueryMetricsMap.size() > 0);
+assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
 
 // run UPSERT SELECT to verify mutation metrics are logged
 String upsertSelect = "UPSERT INTO " + tableName2 + " SELECT * FROM " 
+ tableName1;
@@ -120,7 +124,10 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 rs.close();
 assertTrue("Read metrics for not found for " + tableName1,
 requestReadMetricsMap.get(tableName1).size() > 0);
+assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
+
 assertTrue("Overall read metrics for not found ", 
overAllQueryMetricsMap.size() > 0);
+assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
 
 // run UPSERT SELECT to verify mutation metrics are logged
 String upsertSelect = "UPSERT INTO " + tableName2 + " SELECT * FROM " 
+ tableName1;
@@ -155,18 +162,20 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 return new LoggingPhoenixConnection(conn, new PhoenixMetricsLog() {
 @Override
 public void logOverAllReadRequestMetrics(
-Map overAllQueryMetrics) {
+Map overAllQueryMetrics, String sql) {
 overAllQueryMetricsMap.putAll(overAllQueryMetrics);
+loggedSql = sql;
 }
 
 @Override
 public void logRequestReadMetrics(
-Map> requestReadMetrics) {
+Map> requestReadMetrics, 
String sql) {
 requestReadMetricsMap.putAll(requestReadMetrics);
+loggedSql = sql;
 }
 
 @Override
-public void logWriteMetricsfoForMutations(
+public void logWriteMetricsfoForMutationsSinceLastReset(
 Map> mutationWriteMetrics) {
 mutationWriteMetricsMap.putAll(mutationWriteMetrics);
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java
index 9a2e00f..37917e2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java
@@ -61,7 +61,7 @@ public class LoggingPhoenixConnection extends 
DelegateConnection {
 @Override
 public PreparedStatement prepareStatement(String sql) throws SQLException {
 return new LoggingPhoenixPreparedStatement(super.prepareStatement(sql),
-phoenixMetricsLog);
+phoenixMetricsLog, sql);
 }
 
 @Override
@@ -69,40 +69,40 @@ public class LoggingPhoenixConnection extends 
DelegateConnection {

[phoenix] 05/34: PHOENIX-4755 Provide an option to plugin custom avatica server config in PQS

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 77dde071113f3160bba6093fba928514efa4ff92
Author: Karan Mehta 
AuthorDate: Mon Aug 20 16:52:22 2018 -0700

PHOENIX-4755 Provide an option to plugin custom avatica server config in PQS
---
 .../org/apache/phoenix/query/QueryServices.java|   3 +-
 .../apache/phoenix/query/QueryServicesOptions.java |   1 +
 .../phoenix/end2end/ServerCustomizersIT.java   |   4 +-
 .../server/AvaticaServerConfigurationFactory.java  |  20 +++
 .../phoenix/queryserver/server/QueryServer.java| 167 ++---
 .../server/ServerCustomizersFactory.java   |   7 +-
 .../CustomAvaticaServerConfigurationTest.java  |  20 +++
 .../server/QueryServerConfigurationTest.java   |  26 +++-
 .../queryserver/server/ServerCustomizersTest.java  |  13 +-
 9 files changed, 194 insertions(+), 67 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index c7548df..9072d26 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -251,8 +251,9 @@ public interface QueryServices extends SQLCloseable {
 public static final String QUERY_SERVER_KERBEROS_ALLOWED_REALMS = 
"phoenix.queryserver.kerberos.allowed.realms";
 public static final String QUERY_SERVER_SPNEGO_AUTH_DISABLED_ATTRIB = 
"phoenix.queryserver.spnego.auth.disabled";
 public static final String QUERY_SERVER_WITH_REMOTEUSEREXTRACTOR_ATTRIB = 
"phoenix.queryserver.withRemoteUserExtractor";
-public static final String QUERY_SERVER_REMOTEUSEREXTRACTOR_PARAM = 
"phoenix.queryserver.remoteUserExtractor.param";
 public static final String QUERY_SERVER_CUSTOMIZERS_ENABLED = 
"phoenix.queryserver.customizers.enabled";
+public static final String QUERY_SERVER_CUSTOM_AUTH_ENABLED = 
"phoenix.queryserver.custom.auth.enabled";
+public static final String QUERY_SERVER_REMOTEUSEREXTRACTOR_PARAM = 
"phoenix.queryserver.remoteUserExtractor.param";
 public static final String QUERY_SERVER_DISABLE_KERBEROS_LOGIN = 
"phoenix.queryserver.disable.kerberos.login";
 
 // metadata configs
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 7933ba0..02a3d4b 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -299,6 +299,7 @@ public class QueryServicesOptions {
 public static final int DEFAULT_QUERY_SERVER_UGI_CACHE_CONCURRENCY = 10;
 public static final boolean DEFAULT_QUERY_SERVER_SPNEGO_AUTH_DISABLED = 
false;
 public static final boolean DEFAULT_QUERY_SERVER_WITH_REMOTEUSEREXTRACTOR 
= false;
+public static final boolean DEFAULT_QUERY_SERVER_CUSTOM_AUTH_ENABLED = 
false;
 public static final String DEFAULT_QUERY_SERVER_REMOTEUSEREXTRACTOR_PARAM 
= "doAs";
 public static final boolean DEFAULT_QUERY_SERVER_DISABLE_KERBEROS_LOGIN = 
false;
 public static final boolean DEFAULT_QUERY_SERVER_CUSTOMIZERS_ENABLED = 
false;
diff --git 
a/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/ServerCustomizersIT.java
 
b/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/ServerCustomizersIT.java
index d990adb..db08908 100644
--- 
a/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/ServerCustomizersIT.java
+++ 
b/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/ServerCustomizersIT.java
@@ -24,6 +24,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.calcite.avatica.server.AvaticaServerConfiguration;
 import org.apache.calcite.avatica.server.ServerCustomizer;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.phoenix.query.QueryServices;
@@ -66,7 +67,8 @@ public class ServerCustomizersIT extends 
BaseHBaseManagedTimeIT {
 InstanceResolver.clearSingletons();
 InstanceResolver.getSingleton(ServerCustomizersFactory.class, new 
ServerCustomizersFactory() {
 @Override
-public List> 
createServerCustomizers(Configuration conf) {
+public List> 
createServerCustomizers(Configuration conf,
+  
AvaticaServerConfiguration avaticaServerConfiguration) {
 return Collections.>singletonList(new 
TestServerCustomizer());
 }
 });
diff --git 
a/phoenix-queryserver/src/main/java/org/apache/phoenix/queryserver/server/AvaticaServerCo

[phoenix] 22/34: PHOENIX-5025 Tool to clean up orphan views

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit e3bd6032207360652f4586eddc4f0f630abc2c76
Author: Kadir 
AuthorDate: Mon Nov 12 22:24:10 2018 -0800

PHOENIX-5025 Tool to clean up orphan views
---
 .../apache/phoenix/end2end/OrphanViewToolIT.java   | 472 +++
 .../apache/phoenix/mapreduce/OrphanViewTool.java   | 879 +
 2 files changed, 1351 insertions(+)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java
new file mode 100644
index 000..f9a1785
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java
@@ -0,0 +1,472 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LINK_TYPE;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_TYPE;
+import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+
+import com.google.common.collect.Lists;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.phoenix.mapreduce.OrphanViewTool;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.AfterClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@RunWith(Parameterized.class)
+public class OrphanViewToolIT extends ParallelStatsDisabledIT {
+private static final Logger LOG = 
LoggerFactory.getLogger(OrphanViewToolIT.class);
+
+private final boolean isMultiTenant;
+private final boolean columnEncoded;
+
+private static final long fanout = 2;
+private static final long childCount = fanout;
+private static final long grandChildCount = fanout * fanout;
+private static final long grandGrandChildCount = fanout * fanout * fanout;
+
+private static final String filePath = "/tmp/";
+private static final String viewFileName = "/tmp/" + 
OrphanViewTool.fileName[OrphanViewTool.VIEW];
+private static final String physicalLinkFileName = "/tmp/" + 
OrphanViewTool.fileName[OrphanViewTool.PHYSICAL_TABLE_LINK];
+private static final String parentLinkFileName = "/tmp/" + 
OrphanViewTool.fileName[OrphanViewTool.PARENT_TABLE_LINK];
+private static final String childLinkFileName = "/tmp/" + 
OrphanViewTool.fileName[OrphanViewTool.CHILD_TABLE_LINK];
+
+protected static String SCHEMA1 = "SCHEMA1";
+protected static String SCHEMA2 = "SCHEMA2";
+protected static String SCHEMA3 = "SCHEMA3";
+protected static String SCHEMA4 = "SCHEMA4";
+
+private final String TENANT_SPECIFIC_URL = getUrl() + ';' + 
TENANT_ID_ATTRIB + "=tenant";
+
+private static final String createBaseTableFirstPartDDL = "CREATE TABLE IF 
NOT EXISTS %s";
+private static final String createBaseTableSecondPartDDL = "(%s PK2 
VARCHAR NOT NULL, V1 VARCHAR, V2 VARCHAR " +
+" CONSTRAINT NAME_PK PRIMARY KEY (%s PK2)) %s";
+private static final String deleteTableRows = "DELETE FROM " + 
SYSTEM_CATALOG_NAME +
+" 

[phoenix] 25/34: PHOENIX-5137 check region close before commiting a batch for index rebuild

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 1916b3d76cdc9a190680476828442da1dd9b91cc
Author: Kiran Kumar Maturi 
AuthorDate: Fri Feb 22 09:45:13 2019 +0530

PHOENIX-5137 check region close before commiting a batch for index rebuild
---
 .../UngroupedAggregateRegionObserver.java  | 30 +-
 1 file changed, 18 insertions(+), 12 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 703ff97..2eb15a1 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -260,7 +260,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
   return;
   }
 
-Mutation[] mutationArray = new Mutation[mutations.size()];
+   Mutation[] mutationArray = new Mutation[mutations.size()];
   // When memstore size reaches blockingMemstoreSize we are waiting 3 
seconds for the
   // flush happen which decrease the memstore size and then writes allowed 
on the region.
   for (int i = 0; blockingMemstoreSize > 0 && region.getMemstoreSize() > 
blockingMemstoreSize && i < 30; i++) {
@@ -371,6 +371,17 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 super.clear();
 }
 }
+
+   private long getBlockingMemstoreSize(Region region, Configuration conf) {
+   long flushSize = region.getTableDesc().getMemStoreFlushSize();
+
+   if (flushSize <= 0) {
+   flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,
+   HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
+   }
+   return flushSize * 
(conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER,
+   HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER)-1);
+   }
 
 @Override
 protected RegionScanner doPostScannerOpen(final 
ObserverContext c, final Scan scan, final 
RegionScanner s) throws IOException, SQLException {
@@ -487,12 +498,6 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 MutationList mutations = new MutationList();
 boolean needToWrite = false;
 Configuration conf = env.getConfiguration();
-long flushSize = region.getTableDesc().getMemStoreFlushSize();
-
-if (flushSize <= 0) {
-flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,
-HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
-}
 
 /**
  * Slow down the writes if the memstore size more than
@@ -500,9 +505,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
  * bytes. This avoids flush storm to hdfs for cases like index 
building where reads and
  * write happen to all the table regions in the server.
  */
-final long blockingMemStoreSize = flushSize * (
-conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER,
-
HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER)-1) ;
+final long blockingMemStoreSize = getBlockingMemstoreSize(region, 
conf) ;
 
 boolean buildLocalIndex = indexMaintainers != null && 
dataColumns==null && !localIndexScan;
 if(buildLocalIndex) {
@@ -1043,6 +1046,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 int maxBatchSize = config.getInt(MUTATE_BATCH_SIZE_ATTRIB, 
QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
 long maxBatchSizeBytes = 
config.getLong(MUTATE_BATCH_SIZE_BYTES_ATTRIB,
 QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE_BYTES);
+final long blockingMemstoreSize = getBlockingMemstoreSize(region, 
config);
 MutationList mutations = new MutationList(maxBatchSize);
 region.startRegionOperation();
 byte[] uuidValue = ServerCacheClient.generateId();
@@ -1084,7 +1088,8 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 }
 }
 if (ServerUtil.readyToCommit(mutations.size(), 
mutations.byteSize(), maxBatchSize, maxBatchSizeBytes)) {
-commitBatchWithRetries(region, mutations, -1);
+checkForRegionClosingOrSplitting();
+commitBatchWithRetries(region, mutations, 
blockingMemstoreSize);
 uuidValue = ServerCacheClient.generateId(

[phoenix] 02/34: PHOENIX-5008 (Addendum): CQSI.init should not bubble up RetriableUpgradeException to client in case of an UpgradeRequiredException

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit cb3627773d6cea538d525eb456b7e39bffa85825
Author: Chinmay Kulkarni 
AuthorDate: Tue Nov 13 17:11:53 2018 -0800

PHOENIX-5008 (Addendum): CQSI.init should not bubble up 
RetriableUpgradeException to client in case of an UpgradeRequiredException
---
 .../org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
index 8fe3b69..0edf8e0 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
@@ -501,7 +501,7 @@ public class SystemCatalogCreationOnConnectionIT {
  */
 private Set getHBaseTables() throws IOException {
 Set tables = new HashSet<>();
-for (TableName tn : testUtil.getAdmin().listTableNames()) {
+for (TableName tn : testUtil.getHBaseAdmin().listTableNames()) {
 tables.add(tn.getNameAsString());
 }
 return tables;



[phoenix] 30/34: PHOENIX-4822 Ensure the provided timezone is used client-side (Jaanai Zhang)

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 7a6daf0fcb88214f1273aa3e160f13db84036ffe
Author: Josh Elser 
AuthorDate: Tue Jul 31 15:53:11 2018 -0400

PHOENIX-4822 Ensure the provided timezone is used client-side (Jaanai Zhang)
---
 .../org/apache/phoenix/end2end/DateTimeIT.java | 77 ++
 .../apache/phoenix/compile/StatementContext.java   | 11 ++--
 .../org/apache/phoenix/jdbc/PhoenixConnection.java |  8 ++-
 .../java/org/apache/phoenix/util/DateUtil.java | 22 ---
 4 files changed, 101 insertions(+), 17 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
index c976114..cc7c7a7 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
@@ -54,12 +54,19 @@ import java.text.Format;
 import java.util.Calendar;
 import java.util.GregorianCalendar;
 import java.util.Properties;
+import java.util.TimeZone;
 
+import org.apache.commons.lang.time.FastDateFormat;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.compile.StatementContext;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PDate;
+import org.apache.phoenix.schema.types.PTime;
 import org.apache.phoenix.schema.types.PTimestamp;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.DateUtil;
@@ -1880,4 +1887,74 @@ public class DateTimeIT extends ParallelStatsDisabledIT {
 conn.close();
 }
 }
+
+@Test
+public void testDateFormatTimeZone()throws Exception {
+String[] timeZoneIDs = {DateUtil.DEFAULT_TIME_ZONE_ID, "Asia/Yerevan", 
"Australia/Adelaide", "Asia/Tokyo"};
+for (String timeZoneID : timeZoneIDs) {
+testDateFormatTimeZone(timeZoneID);
+}
+}
+
+public void testDateFormatTimeZone(String timeZoneId) throws Exception {
+Properties props = new Properties();
+props.setProperty("phoenix.query.dateFormatTimeZone", timeZoneId);
+Connection conn1 = DriverManager.getConnection(getUrl(), props);
+
+String tableName = generateUniqueName();
+String ddl = "CREATE TABLE IF NOT EXISTS " + tableName +
+" (k1 INTEGER PRIMARY KEY," +
+" v_date DATE," +
+" v_time TIME," +
+" v_timestamp TIMESTAMP)";
+try {
+conn1.createStatement().execute(ddl);
+
+PhoenixConnection pConn = conn1.unwrap(PhoenixConnection.class);
+verifyTimeZoneIDWithConn(pConn, PDate.INSTANCE, timeZoneId);
+verifyTimeZoneIDWithConn(pConn, PTime.INSTANCE, timeZoneId);
+verifyTimeZoneIDWithConn(pConn, PTimestamp.INSTANCE, timeZoneId);
+
+Calendar cal = 
Calendar.getInstance(TimeZone.getTimeZone(timeZoneId));
+cal.setTime(date);
+String dateStr = 
DateUtil.getDateFormatter(DateUtil.DEFAULT_MS_DATE_FORMAT).format(date);
+
+String dml = "UPSERT INTO " + tableName + " VALUES (" +
+"1," +
+"'" + dateStr + "'," +
+"'" + dateStr + "'," +
+"'" + dateStr + "'" +
+")";
+conn1.createStatement().execute(dml);
+conn1.commit();
+
+PhoenixStatement stmt = 
conn1.createStatement().unwrap(PhoenixStatement.class);
+ResultSet rs = stmt.executeQuery("SELECT v_date, v_time, 
v_timestamp FROM " + tableName);
+
+assertTrue(rs.next());
+assertEquals(rs.getDate(1).toString(), new 
Date(cal.getTimeInMillis()).toString());
+assertEquals(rs.getTime(2).toString(), new 
Time(cal.getTimeInMillis()).toString());
+assertEquals(rs.getTimestamp(3).getTime(), cal.getTimeInMillis());
+assertFalse(rs.next());
+
+StatementContext stmtContext = stmt.getQueryPlan().getContext();
+verifyTimeZoneIDWithFormatter(stmtContext.getDateFormatter(), 
timeZoneId);
+verifyTimeZoneIDWithFormatter(stmtContext.getTimeFormatter(), 
timeZoneId);
+verifyTimeZoneIDWithFormatter(stmtContext.getTimestampFormatter(), 
timeZoneId);
+
+stmt.close();
+} finally {
+  

[phoenix] 24/34: PHOENIX-5247 DROP TABLE and DROP VIEW commands fail to drop second or higher level child views

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 88a00e21ee700b8a4d011068a9b65461aa0c123c
Author: Kadir 
AuthorDate: Wed Apr 17 17:27:16 2019 -0700

PHOENIX-5247 DROP TABLE and DROP VIEW commands fail to drop second or 
higher level child views
---
 .../src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java | 4 ++--
 phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java   | 2 +-
 .../java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java | 4 ++--
 3 files changed, 5 insertions(+), 5 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
index d33d538..6d2dfb9 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BasePermissionsIT.java
@@ -422,7 +422,7 @@ public class BasePermissionsIT extends BaseTest {
 @Override
 public Object run() throws Exception {
 try (Connection conn = getConnection(); Statement stmt = 
conn.createStatement();) {
-assertFalse(stmt.execute("DROP TABLE IF EXISTS " + 
tableName));
+assertFalse(stmt.execute(String.format("DROP TABLE IF 
EXISTS %s CASCADE", tableName)));
 }
 return null;
 }
@@ -647,7 +647,7 @@ public class BasePermissionsIT extends BaseTest {
 @Override
 public Object run() throws Exception {
 try (Connection conn = getConnection(); Statement stmt = 
conn.createStatement();) {
-assertFalse(stmt.execute("DROP VIEW " + viewName));
+assertFalse(stmt.execute(String.format("DROP VIEW %s 
CASCADE", viewName)));
 }
 return null;
 }
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
index 34292ba..a6e066b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewIT.java
@@ -428,7 +428,7 @@ public class ViewIT extends BaseViewIT {
 ddl = "CREATE LOCAL INDEX idx on " + fullViewName1 + "(v2)";
 conn.createStatement().execute(ddl);
 String fullViewName2 = SchemaUtil.getTableName(viewSchemaName, "V_" + 
generateUniqueName());
-ddl = "CREATE VIEW " + fullViewName2 + "(v2 VARCHAR) AS SELECT * FROM 
" + fullTableName + " WHERE k > 10";
+ddl = "CREATE VIEW " + fullViewName2 + "(v3 VARCHAR) AS SELECT * FROM 
" + fullViewName1 + " WHERE k > 10";
 conn.createStatement().execute(ddl);
 
 validateCannotDropTableWithChildViewsWithoutCascade(conn, 
fullTableName);
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index a87325e..f810dd7 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -2169,7 +2169,7 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 EnvironmentEdgeManager.currentTimeMillis(), null);
 }
 
-if (tableType == PTableType.TABLE || tableType == 
PTableType.SYSTEM) {
+if (tableType == PTableType.TABLE || tableType == 
PTableType.SYSTEM || tableType == PTableType.VIEW) {
 // Handle any child views that exist
 TableViewFinder tableViewFinderResult = findChildViews(region, 
tenantId, table, clientVersion, !isCascade);
 if (tableViewFinderResult.hasViews()) {
@@ -2191,7 +2191,7 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
 acquireLock(region, viewKey, locks);
 MetaDataMutationResult result = 
doDropTable(viewKey, viewTenantId, viewSchemaName,
 viewName, null, PTableType.VIEW, 
rowsToDelete, invalidateList, locks,
-tableNamesToDelete, 
sharedTablesToDelete, false, clientVersion);
+tableNamesToDelete, 
sharedTablesToDelete, true, clientVersion);
 if (result.getMutationCode() != 
MutationCode.TABLE_ALREADY_EXISTS) { return result; }
 }
 }



[phoenix] 01/34: PHOENIX-5008: CQSI.init should not bubble up RetriableUpgradeException to client in case of an UpgradeRequiredException

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit f1cdc1715000d800fd23a5c0401a7f51d7728bf1
Author: Chinmay Kulkarni 
AuthorDate: Fri Nov 9 19:22:57 2018 -0800

PHOENIX-5008: CQSI.init should not bubble up RetriableUpgradeException to 
client in case of an UpgradeRequiredException
---
 .../SystemCatalogCreationOnConnectionIT.java   | 122 +
 .../phoenix/query/ConnectionQueryServicesImpl.java |   4 +-
 2 files changed, 101 insertions(+), 25 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
index 689eb20..8fe3b69 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SystemCatalogCreationOnConnectionIT.java
@@ -17,6 +17,24 @@
  */
 package org.apache.phoenix.end2end;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.apache.phoenix.query.BaseTest.generateUniqueName;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.concurrent.TimeoutException;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
@@ -26,6 +44,7 @@ import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.UpgradeRequiredException;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDriver;
 import org.apache.phoenix.jdbc.PhoenixEmbeddedDriver;
 import org.apache.phoenix.jdbc.PhoenixTestDriver;
 import org.apache.phoenix.query.*;
@@ -36,13 +55,6 @@ import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.SQLException;
-import java.util.*;
-import java.util.concurrent.TimeoutException;
-
-import static org.junit.Assert.*;
 
 @Category(NeedsOwnMiniClusterTest.class)
 public class SystemCatalogCreationOnConnectionIT {
@@ -57,6 +69,12 @@ public class SystemCatalogCreationOnConnectionIT {
 private static final String PHOENIX_SYSTEM_CATALOG = "SYSTEM.CATALOG";
 private static final String EXECUTE_UPGRADE_COMMAND = "EXECUTE UPGRADE";
 private static final String MODIFIED_MAX_VERSIONS ="5";
+private static final String CREATE_TABLE_STMT = "CREATE TABLE %s"
++ " (k1 VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY 
KEY(K1,K2))";
+private static final String SELECT_STMT = "SELECT * FROM %s";
+private static final String DELETE_STMT = "DELETE FROM %s";
+private static final String CREATE_INDEX_STMT = "CREATE INDEX DUMMY_IDX ON 
%s (K1) INCLUDE (K2)";
+private static final String UPSERT_STMT = "UPSERT INTO %s VALUES ('A', 
'B')";
 
 private static final Set PHOENIX_SYSTEM_TABLES = new 
HashSet<>(Arrays.asList(
   "SYSTEM.CATALOG", "SYSTEM.SEQUENCE", "SYSTEM.STATS", "SYSTEM.FUNCTION",
@@ -155,12 +173,8 @@ public class SystemCatalogCreationOnConnectionIT {
 UpgradeUtil.doNotUpgradeOnFirstConnection(propsDoNotUpgradePropSet);
 SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver 
driver =
   new 
SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver(ReadOnlyProps.EMPTY_PROPS);
-try {
-driver.getConnectionQueryServices(getJdbcUrl(), 
propsDoNotUpgradePropSet);
-fail("Client should not be able to create SYSTEM.CATALOG since we 
set the doNotUpgrade property");
-} catch (Exception e) {
-assertTrue(e instanceof UpgradeRequiredException);
-}
+
+driver.getConnectionQueryServices(getJdbcUrl(), 
propsDoNotUpgradePropSet);
 hbaseTables = getHBaseTables();
 assertFalse(hbaseTables.contains(PHOENIX_SYSTEM_CATALOG) || 
hbaseTables.contains(PHOENIX_NAMESPACE_MAPPED_SYSTEM_CATALOG));
 assertTrue(hbaseTables.size() == 0);
@@ -416,6 +430,70 @@ public class SystemCatalogCreationOnConnectionIT {
 assertEquals(Integer.parseInt(MODIFIED_MAX_VERSIONS), 
verifyModificationTableMetadata(driver, PHOENIX_SYSTEM_CATALOG));
 }
 
+// Test the case when an end-user uses th

[phoenix] 04/34: PHOENIX-4750 Resolve server customizers and provide them to Avatica

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 567d8bbad2522fb816295d742bfce94cd534bb43
Author: Alex Araujo 
AuthorDate: Mon Jun 4 16:32:10 2018 -0700

PHOENIX-4750 Resolve server customizers and provide them to Avatica

Resolve server customizers on the PQS classpath and provide them to the
HttpServer builder.

Signed-off-by: Josh Elser 
---
 .../org/apache/phoenix/query/QueryServices.java|   1 +
 .../apache/phoenix/query/QueryServicesOptions.java |   1 +
 .../phoenix/end2end/QueryServerTestUtil.java   | 187 +
 .../phoenix/end2end/ServerCustomizersIT.java   | 147 
 .../phoenix/queryserver/server/QueryServer.java|  26 ++-
 .../server/ServerCustomizersFactory.java   |  49 ++
 .../queryserver/server/ServerCustomizersTest.java  |  87 ++
 7 files changed, 496 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index 48b7b7f..c7548df 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -252,6 +252,7 @@ public interface QueryServices extends SQLCloseable {
 public static final String QUERY_SERVER_SPNEGO_AUTH_DISABLED_ATTRIB = 
"phoenix.queryserver.spnego.auth.disabled";
 public static final String QUERY_SERVER_WITH_REMOTEUSEREXTRACTOR_ATTRIB = 
"phoenix.queryserver.withRemoteUserExtractor";
 public static final String QUERY_SERVER_REMOTEUSEREXTRACTOR_PARAM = 
"phoenix.queryserver.remoteUserExtractor.param";
+public static final String QUERY_SERVER_CUSTOMIZERS_ENABLED = 
"phoenix.queryserver.customizers.enabled";
 public static final String QUERY_SERVER_DISABLE_KERBEROS_LOGIN = 
"phoenix.queryserver.disable.kerberos.login";
 
 // metadata configs
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 307c5dd..7933ba0 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -301,6 +301,7 @@ public class QueryServicesOptions {
 public static final boolean DEFAULT_QUERY_SERVER_WITH_REMOTEUSEREXTRACTOR 
= false;
 public static final String DEFAULT_QUERY_SERVER_REMOTEUSEREXTRACTOR_PARAM 
= "doAs";
 public static final boolean DEFAULT_QUERY_SERVER_DISABLE_KERBEROS_LOGIN = 
false;
+public static final boolean DEFAULT_QUERY_SERVER_CUSTOMIZERS_ENABLED = 
false;
 
 public static final boolean DEFAULT_RENEW_LEASE_ENABLED = true;
 public static final int 
DEFAULT_RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS =
diff --git 
a/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/QueryServerTestUtil.java
 
b/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/QueryServerTestUtil.java
new file mode 100644
index 000..01f73ae
--- /dev/null
+++ 
b/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/QueryServerTestUtil.java
@@ -0,0 +1,187 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import java.io.File;
+import java.security.PrivilegedAction;
+import java.util.Map;
+import java.util.Objects;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.LocalHBaseCluster;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.phoenix.query.ConfigurationFactory;
+import org.apache.phoenix.queryserver.client.ThinClientUtil;
+import org.apache.phoenix.queryserver.server.QuerySe

[phoenix] 09/34: PHOENIX-4853 Add sql statement to PhoenixMetricsLog interface for query level metrics logging

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit fa962f434a202c575c73e18e046d6bc3f2318f87
Author: Karan Mehta 
AuthorDate: Fri Aug 17 13:02:08 2018 -0700

PHOENIX-4853 Add sql statement to PhoenixMetricsLog interface for query 
level metrics logging
---
 .../monitoring/PhoenixLoggingMetricsIT.java| 15 +++---
 .../phoenix/jdbc/LoggingPhoenixConnection.java | 16 +++
 .../jdbc/LoggingPhoenixPreparedStatement.java  | 13 +++-
 .../phoenix/jdbc/LoggingPhoenixResultSet.java  | 10 +
 .../phoenix/jdbc/LoggingPhoenixStatement.java  | 24 +-
 .../org/apache/phoenix/jdbc/PhoenixMetricsLog.java |  6 +++---
 6 files changed, 56 insertions(+), 28 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
index 02640e7..97b2c5d 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
@@ -43,6 +43,7 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 private String tableName1;
 private String tableName2;
 private LoggingPhoenixConnection loggedConn;
+private String loggedSql;
 
 @Before
 public void beforeTest() throws Exception {
@@ -75,7 +76,10 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 rs.close();
 assertTrue("Read metrics for not found for " + tableName1,
 requestReadMetricsMap.get(tableName1).size() > 0);
+assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
+
 assertTrue("Overall read metrics for not found ", 
overAllQueryMetricsMap.size() > 0);
+assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
 
 // run UPSERT SELECT to verify mutation metrics are logged
 String upsertSelect = "UPSERT INTO " + tableName2 + " SELECT * FROM " 
+ tableName1;
@@ -120,7 +124,10 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 rs.close();
 assertTrue("Read metrics for not found for " + tableName1,
 requestReadMetricsMap.get(tableName1).size() > 0);
+assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
+
 assertTrue("Overall read metrics for not found ", 
overAllQueryMetricsMap.size() > 0);
+assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
 
 // run UPSERT SELECT to verify mutation metrics are logged
 String upsertSelect = "UPSERT INTO " + tableName2 + " SELECT * FROM " 
+ tableName1;
@@ -155,18 +162,20 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 return new LoggingPhoenixConnection(conn, new PhoenixMetricsLog() {
 @Override
 public void logOverAllReadRequestMetrics(
-Map overAllQueryMetrics) {
+Map overAllQueryMetrics, String sql) {
 overAllQueryMetricsMap.putAll(overAllQueryMetrics);
+loggedSql = sql;
 }
 
 @Override
 public void logRequestReadMetrics(
-Map> requestReadMetrics) {
+Map> requestReadMetrics, 
String sql) {
 requestReadMetricsMap.putAll(requestReadMetrics);
+loggedSql = sql;
 }
 
 @Override
-public void logWriteMetricsfoForMutations(
+public void logWriteMetricsfoForMutationsSinceLastReset(
 Map> mutationWriteMetrics) {
 mutationWriteMetricsMap.putAll(mutationWriteMetrics);
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java
index 9a2e00f..37917e2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java
@@ -61,7 +61,7 @@ public class LoggingPhoenixConnection extends 
DelegateConnection {
 @Override
 public PreparedStatement prepareStatement(String sql) throws SQLException {
 return new LoggingPhoenixPreparedStatement(super.prepareStatement(sql),
-phoenixMetricsLog);
+phoenixMetricsLog, sql);
 }
 
 @Override
@@ -69,40 +69,40 @@ public class LoggingPhoenixConnection extends 
DelegateConnection {

[phoenix] 14/34: PHOENIX-4781 Create artifact jar so that shaded jar replaces it properly

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 1f41ccc5f025bd15f9a802ee8c67839ab204c10d
Author: Vincent Poon 
AuthorDate: Fri Nov 30 17:55:34 2018 -0800

PHOENIX-4781 Create artifact jar so that shaded jar replaces it properly
---
 phoenix-client/pom.xml | 9 +++--
 phoenix-server/pom.xml | 9 +++--
 2 files changed, 6 insertions(+), 12 deletions(-)

diff --git a/phoenix-client/pom.xml b/phoenix-client/pom.xml
index 53ffc6a..71452bd 100644
--- a/phoenix-client/pom.xml
+++ b/phoenix-client/pom.xml
@@ -57,12 +57,9 @@
   
 org.apache.maven.plugins
 maven-jar-plugin
-
-  
-default-jar
-none
-  
-
+
+  phoenix-${project.version}-client
+
   
   
 org.apache.maven.plugins
diff --git a/phoenix-server/pom.xml b/phoenix-server/pom.xml
index 3074638..6b584bb 100644
--- a/phoenix-server/pom.xml
+++ b/phoenix-server/pom.xml
@@ -61,12 +61,9 @@
   
 org.apache.maven.plugins
 maven-jar-plugin
-
-  
-default-jar
-none
-  
-
+
+  phoenix-${project.version}-server
+
   
   
 org.apache.maven.plugins



[phoenix] 23/34: PHOENIX-5025 Tool to clean up orphan views (addendum)

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit f741006e7b5e32307cb2636a1a0bcb593116202a
Author: Kadir 
AuthorDate: Wed Dec 12 17:53:38 2018 -0800

PHOENIX-5025 Tool to clean up orphan views (addendum)
---
 .../apache/phoenix/end2end/OrphanViewToolIT.java   | 25 +++---
 .../apache/phoenix/mapreduce/OrphanViewTool.java   | 89 +-
 2 files changed, 71 insertions(+), 43 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java
index f9a1785..38d4afc 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrphanViewToolIT.java
@@ -19,7 +19,6 @@ package org.apache.phoenix.end2end;
 
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LINK_TYPE;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME;
-import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CHILD_LINK_NAME;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_TYPE;
 import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
@@ -27,9 +26,9 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 import java.io.File;
+import java.io.FileReader;
 import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Paths;
+import java.io.LineNumberReader;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
@@ -54,6 +53,7 @@ import org.slf4j.LoggerFactory;
 
 @RunWith(Parameterized.class)
 public class OrphanViewToolIT extends ParallelStatsDisabledIT {
+private static final String SYSTEM_CHILD_LINK_NAME = SYSTEM_CATALOG_NAME;
 private static final Logger LOG = 
LoggerFactory.getLogger(OrphanViewToolIT.class);
 
 private final boolean isMultiTenant;
@@ -206,9 +206,13 @@ public class OrphanViewToolIT extends 
ParallelStatsDisabledIT {
 }
 
 private void verifyLineCount(String fileName, long lineCount) throws 
IOException {
-if (Files.lines(Paths.get(fileName)).count() != lineCount)
-LOG.debug(Files.lines(Paths.get(fileName)).count() + " != " + 
lineCount);
-assertTrue(Files.lines(Paths.get(fileName)).count() == lineCount);
+LineNumberReader reader = new LineNumberReader(new 
FileReader(fileName));
+while (reader.readLine() != null) {
+}
+int count = reader.getLineNumber();
+if (count != lineCount)
+LOG.debug(count + " != " + lineCount);
+assertTrue(count == lineCount);
 }
 
 private void verifyCountQuery(Connection connection, String query, String 
schemaName, long count)
@@ -238,7 +242,6 @@ public class OrphanViewToolIT extends 
ParallelStatsDisabledIT {
 }
 }
 
-
 private void verifyNoChildLink(Connection connection, String 
viewSchemaName) throws Exception {
 // Verify that there there is no link in the system child link table
 verifyCountQuery(connection, countChildLinksQuery, viewSchemaName, 0);
@@ -264,6 +267,7 @@ public class OrphanViewToolIT extends 
ParallelStatsDisabledIT {
 schemaName == null ? "IS NULL" : "= '" + schemaName + "'"));
 connection.commit();
 }
+
 @Test
 public void testDeleteBaseTableRows() throws Exception {
 String baseTableName = generateUniqueName();
@@ -438,7 +442,8 @@ public class OrphanViewToolIT extends 
ParallelStatsDisabledIT {
 }
 }
 
-public static String[] getArgValues(boolean clean, boolean identify, 
boolean outputPath, boolean inputPath) {
+public static String[] getArgValues(boolean clean, boolean identify, 
boolean outputPath, boolean inputPath)
+throws InterruptedException{
 final List args = Lists.newArrayList();
 if (outputPath) {
 args.add("-op");
@@ -454,8 +459,10 @@ public class OrphanViewToolIT extends 
ParallelStatsDisabledIT {
 if (identify) {
 args.add("-i");
 }
+final long ageMs = 2000;
+Thread.sleep(ageMs);
 args.add("-a");
-args.add("0");
+args.add(Long.toString(ageMs));
 return args.toArray(new String[0]);
 }
 
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
index a8a30b6..2e0dd0d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
@@ -

[phoenix] 28/34: PHOENIX-5188 - IndexedKeyValue should populate KeyValue fields

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit bf79b6faebe7e877b75c8e58bd10ec091142da9f
Author: Geoffrey Jacoby 
AuthorDate: Tue Mar 12 11:17:50 2019 -0700

PHOENIX-5188 - IndexedKeyValue should populate KeyValue fields
---
 .../phoenix/hbase/index/wal/IndexedKeyValue.java   | 25 
 .../regionserver/wal/IndexedKeyValueTest.java  | 67 ++
 2 files changed, 92 insertions(+)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
index b04cf0a..f01dc06 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
@@ -53,6 +53,7 @@ public class IndexedKeyValue extends KeyValue {
 public IndexedKeyValue() {}
 
 public IndexedKeyValue(byte[] bs, Mutation mutation) {
+super(mutation.getRow(), 0, mutation.getRow().length);
 this.indexTableName = new ImmutableBytesPtr(bs);
 this.mutation = mutation;
 this.hashCode = calcHashCode(indexTableName, mutation);
@@ -117,6 +118,24 @@ public class IndexedKeyValue extends KeyValue {
 }
 
 @Override
+public int getRowOffset() {
+return this.offset;
+}
+
+@Override
+public short getRowLength() {
+return (short) this.length;
+}
+
+@Override
+public int getKeyLength(){
+//normally the key is row key + other key fields such as timestamp,
+// but those aren't defined here because a Mutation can contain 
multiple,
+// so we just return the length of the row key
+return this.length;
+}
+
+@Override
 public String toString() {
 return "IndexWrite:\n\ttable: " + indexTableName + "\n\tmutation:" + 
mutation;
 }
@@ -179,6 +198,12 @@ public class IndexedKeyValue extends KeyValue {
 MutationProto mProto = MutationProto.parseFrom(mutationData);
 this.mutation = 
org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation(mProto);
 this.hashCode = calcHashCode(indexTableName, mutation);
+if (mutation != null){
+bytes = mutation.getRow();
+offset = 0;
+length = bytes.length;
+}
+
 }
 
 public boolean getBatchFinished() {
diff --git 
a/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/IndexedKeyValueTest.java
 
b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/IndexedKeyValueTest.java
new file mode 100644
index 000..7f34fcd
--- /dev/null
+++ 
b/phoenix-core/src/test/java/org/apache/hadoop/hbase/regionserver/wal/IndexedKeyValueTest.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver.wal;
+
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.phoenix.hbase.index.wal.IndexedKeyValue;
+import org.apache.phoenix.hbase.index.wal.KeyValueCodec;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+
+
+public class IndexedKeyValueTest {
+
+@Test
+public void testIndexedKeyValuePopulatesKVFields() throws Exception {
+byte[] row = Bytes.toBytes("foo");
+byte[] tableNameBytes = Bytes.toBytes("MyTableName");
+Mutation mutation = new Put(row);
+IndexedKeyValue indexedKeyValue = new IndexedKeyValue(tableNameBytes, 
mutation);
+testIndexedKeyValueHelper(indexedKeyValue, row, tableNameBytes, 
mutation);
+
+//now serialize the IndexedKeyValue and make sure the deserialized 
copy also
+//has all the right fields
+ByteArrayOutputStream baos = new ByteArrayOutputStream(

[phoenix] 19/34: PHOENIX-4993 close cache connections when region server is going down

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 495448e0a99732aa2bee6810505b2bea9046faac
Author: Kiran Kumar Maturi 
AuthorDate: Thu Jan 17 10:32:49 2019 +0530

PHOENIX-4993 close cache connections when region server is going down
---
 .../java/org/apache/phoenix/util/ServerUtil.java   |  12 ++-
 .../CoprocessorHConnectionTableFactoryTest.java| 119 +
 2 files changed, 130 insertions(+), 1 deletion(-)

diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
index 09701c5..a8170ce 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ServerUtil.java
@@ -316,7 +316,10 @@ public class ServerUtil {
 
 @Override
 public void shutdown() {
-ConnectionFactory.shutdown();
+  // close the connections when region server is going down
+  if (this.server.isStopping() || this.server.isStopped() || 
this.server.isAborted()) {
+ConnectionFactory.shutdown();
+  }
 }
 
 @Override
@@ -383,6 +386,13 @@ public class ServerUtil {
 return conf;
 }
 }
+
+/**
+ * Added for testing
+ */
+public static int getConnectionsCount() {
+  return connections.size();
+}
 }
 
 public static Configuration getCompactionConfig(Configuration conf) {
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/util/CoprocessorHConnectionTableFactoryTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/util/CoprocessorHConnectionTableFactoryTest.java
new file mode 100644
index 000..a757780
--- /dev/null
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/util/CoprocessorHConnectionTableFactoryTest.java
@@ -0,0 +1,119 @@
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional 
information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache 
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the 
License. You may obtain a
+ * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless 
required by applicable
+ * law or agreed to in writing, software distributed under the License is 
distributed on an "AS IS"
+ * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 
implied. See the License
+ * for the specific language governing permissions and limitations under the 
License.
+ */
+
+package org.apache.phoenix.util;
+
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/*
+ * This test is wrt to https://issues.apache.org/jira/browse/PHOENIX-4993.Test 
checks 1. region
+ * close should not close the shared connections 2. region server close should 
close the shared
+ * connections
+ */
+public class CoprocessorHConnectionTableFactoryTest extends 
BaseUniqueNamesOwnClusterIT {
+  private static String ORG_PREFIX = "ORG";
+  private static final Log LOG = 
LogFactory.getLog(CoprocessorHConnectionTableFactoryTest.class);
+
+  @BeforeClass
+  public static final void doSetup() throws Exception {
+
+setUpTestDriver(ReadOnlyProps.EMPTY_PROPS);
+
+  }
+
+  static String getOrgId(long id) {
+return ORG_PREFIX + "-" + id;
+  }
+
+  static String getRandomOrgId(int maxOrgId) {
+return getOrgId(Math.round(Math.random() * maxOrgId));
+  }
+
+  static void writeToTable(String tableName, Connection conn, int maxOrgId) 
throws SQLException {
+try {
+
+  String orgId = getRandomOrgId(maxOrgId);
+  Statement stmt = conn.createStatement();
+  for (int i = 0; i < 10; i++) {
+stmt.executeUpdate("UPSERT INTO " + tableName + " VALUES('" + orgId + 
"'," + i + ","
++ (i + 1) + "," + (i + 2) + ")");
+
+  }
+  conn.commit();
+} catch (Exception e) {
+  LOG.error("Client side ex

[phoenix] 07/34: PHOENIX-4834 PhoenixMetricsLog interface methods should not depend on specific logger

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 25618522445ae28ff978ef25b50b59687c1ac705
Author: Karan Mehta 
AuthorDate: Tue Aug 7 15:47:33 2018 -0700

PHOENIX-4834 PhoenixMetricsLog interface methods should not depend on 
specific logger
---
 .../org/apache/phoenix/monitoring/PhoenixMetricsIT.java |  9 -
 .../org/apache/phoenix/jdbc/LoggingPhoenixConnection.java   | 11 ++-
 .../org/apache/phoenix/jdbc/LoggingPhoenixResultSet.java|  7 ++-
 .../java/org/apache/phoenix/jdbc/PhoenixMetricsLog.java | 13 ++---
 4 files changed, 18 insertions(+), 22 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
index f13391f..4c5c592 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
@@ -76,7 +76,6 @@ import org.apache.phoenix.util.ReadOnlyProps;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.mockito.internal.util.reflection.Whitebox;
-import org.slf4j.Logger;
 
 import com.google.common.base.Joiner;
 import com.google.common.collect.Lists;
@@ -1046,25 +1045,25 @@ public class PhoenixMetricsIT extends 
BaseUniqueNamesOwnClusterIT {
 LoggingPhoenixConnection protectedConn =
 new LoggingPhoenixConnection(conn, new PhoenixMetricsLog() {
 @Override
-public void logOverAllReadRequestMetrics(Logger logger,
+public void logOverAllReadRequestMetrics(
 Map overAllQueryMetrics) {
 overAllQueryMetricsMap.putAll(overAllQueryMetrics);
 }
 
 @Override
-public void logRequestReadMetrics(Logger logger,
+public void logRequestReadMetrics(
 Map> 
requestReadMetrics) {
 requestReadMetricsMap.putAll(requestReadMetrics);
 }
 
 @Override
-public void logWriteMetricsfoForMutations(Logger logger,
+public void logWriteMetricsfoForMutations(
 Map> 
mutationWriteMetrics) {
 mutationWriteMetricsMap.putAll(mutationWriteMetrics);
 }
 
 @Override
-public void 
logReadMetricInfoForMutationsSinceLastReset(Logger logger,
+public void logReadMetricInfoForMutationsSinceLastReset(
 Map> 
mutationReadMetrics) {
 mutationReadMetricsMap.putAll(mutationReadMetrics);
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java
index e1b5dee..d98da83 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/LoggingPhoenixConnection.java
@@ -23,12 +23,9 @@ import java.sql.SQLException;
 import java.sql.Statement;
 
 import org.apache.phoenix.util.PhoenixRuntime;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 public class LoggingPhoenixConnection extends DelegateConnection {
 
-private static final Logger logger = 
LoggerFactory.getLogger(LoggingPhoenixResultSet.class);
 private PhoenixMetricsLog phoenixMetricsLog;
 
 public LoggingPhoenixConnection(Connection conn,
@@ -37,6 +34,10 @@ public class LoggingPhoenixConnection extends 
DelegateConnection {
 this.phoenixMetricsLog = phoenixMetricsLog;
 }
 
+public PhoenixMetricsLog getPhoenixMetricsLog() {
+return phoenixMetricsLog;
+}
+
 @Override
 public Statement createStatement() throws SQLException {
 return new LoggingPhoenixStatement(super.createStatement(), 
phoenixMetricsLog);
@@ -101,8 +102,8 @@ public class LoggingPhoenixConnection extends 
DelegateConnection {
 @Override
 public void commit() throws SQLException {
 super.commit();
-phoenixMetricsLog.logWriteMetricsfoForMutations(logger, 
PhoenixRuntime.getWriteMetricInfoForMutationsSinceLastReset(conn));
-phoenixMetricsLog.logReadMetricInfoForMutationsSinceLastReset(logger, 
PhoenixRuntime.getReadMetricInfoForMutationsSinceLastReset(conn));
+
phoenixMetricsLog.logWriteMetricsfoForMutations(PhoenixRuntime.getWriteMetricInfoForMutationsSinceLastReset(conn));
+
phoenixMetricsLog.logReadMetricInfoForMutationsSinceLastReset(PhoenixRuntime.getReadMetricInfoForMutationsSinceLastReset(conn));
 PhoenixRuntime.resetMet

[phoenix] 20/34: Add tenantId param to IndexTool

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit e41a96c551d6753a7003dc63ea16251f23128950
Author: Gokcen Iskender 
AuthorDate: Mon Feb 11 12:58:53 2019 -0800

Add tenantId param to IndexTool

Signed-off-by: Geoffrey Jacoby 
---
 .../org/apache/phoenix/end2end/IndexToolIT.java| 113 -
 .../apache/phoenix/mapreduce/index/IndexTool.java  |  43 +---
 2 files changed, 139 insertions(+), 17 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolIT.java
index a120aaa..9d6f881 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolIT.java
@@ -40,15 +40,19 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.mapreduce.index.IndexTool;
+import org.apache.phoenix.query.ConnectionQueryServices;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
@@ -73,14 +77,16 @@ public class IndexToolIT extends ParallelStatsEnabledIT {
 private final String tableDDLOptions;
 private final boolean mutable;
 private final boolean useSnapshot;
+private final boolean useTenantId;
 
 public IndexToolIT(boolean transactional, boolean mutable, boolean 
localIndex,
-boolean directApi, boolean useSnapshot) {
+boolean directApi, boolean useSnapshot, boolean useTenantId) {
 this.localIndex = localIndex;
 this.transactional = transactional;
 this.directApi = directApi;
 this.mutable = mutable;
 this.useSnapshot = useSnapshot;
+this.useTenantId = useTenantId;
 StringBuilder optionBuilder = new StringBuilder();
 if (!mutable) {
 optionBuilder.append(" IMMUTABLE_ROWS=true ");
@@ -117,12 +123,14 @@ public class IndexToolIT extends ParallelStatsEnabledIT {
 for (boolean localIndex : Booleans) {
 for (boolean directApi : Booleans) {
 for (boolean useSnapshot : Booleans) {
-list.add(new Boolean[] { transactional, mutable, 
localIndex, directApi, useSnapshot });
+list.add(new Boolean[] { transactional, mutable, 
localIndex, directApi, useSnapshot, false});
 }
 }
 }
 }
 }
+// Add the usetenantId
+list.add(new Boolean[] { false, false, false, true, false, true});
 return list;
 }
 
@@ -221,6 +229,90 @@ public class IndexToolIT extends ParallelStatsEnabledIT {
 }
 
 @Test
+public void testIndexToolWithTenantId() throws Exception {
+if (!useTenantId) { return;}
+String tenantId = generateUniqueName();
+String schemaName = generateUniqueName();
+String dataTableName = generateUniqueName();
+String viewTenantName = generateUniqueName();
+String indexNameGlobal = generateUniqueName();
+String indexNameTenant = generateUniqueName();
+String viewIndexTableName = "_IDX_" + dataTableName;
+
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+Connection connGlobal = DriverManager.getConnection(getUrl(), props);
+props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
+Connection connTenant = DriverManager.getConnection(getUrl(), props);
+String createTblStr = "CREATE TABLE %s (TENANT_ID VARCHAR(15) NOT 
NULL,ID INTEGER NOT NULL"
++ ", NAME VARCHAR, CONSTRAINT PK_1 PRIMARY KEY (TENANT_ID, 
ID)) MULTI_TENANT=true";
+String createViewStr = "CREATE VIEW %s AS SELECT * FROM %s";
+
+String upsertQueryStr = "UPSERT INTO %s (TENANT_ID, ID, NAME) 
VALUES('%s' , %d, '%s')";
+String createIndexStr = "CREATE INDEX %s ON %s (NAME) ";
+
+try {
+String tableStmtGlobal = String.format(createTblStr, 
dataTableName);
+connGl

[phoenix] 13/34: PHOENIX-4989 Include disruptor jar in shaded dependency

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit a9608bf7fbf85e063bb7561b367a4c0382d81c82
Author: Aman Poonia 
AuthorDate: Tue Oct 30 13:57:52 2018 -0700

PHOENIX-4989 Include disruptor jar in shaded dependency
---
 phoenix-server/pom.xml | 1 +
 1 file changed, 1 insertion(+)

diff --git a/phoenix-server/pom.xml b/phoenix-server/pom.xml
index 102f074..3074638 100644
--- a/phoenix-server/pom.xml
+++ b/phoenix-server/pom.xml
@@ -139,6 +139,7 @@
   com.ibm.icu:icu4j
   com.ibm.icu:icu4j-charset
   com.ibm.icu:icu4j-localespi
+  com.lmax:disruptor
 
   
 org.apache.phoenix:phoenix-server



[phoenix] 04/34: PHOENIX-4750 Resolve server customizers and provide them to Avatica

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 28fc50efe215354e4d2fc9974273f380fb1a9841
Author: Alex Araujo 
AuthorDate: Mon Jun 4 16:32:10 2018 -0700

PHOENIX-4750 Resolve server customizers and provide them to Avatica

Resolve server customizers on the PQS classpath and provide them to the
HttpServer builder.

Signed-off-by: Josh Elser 
---
 .../org/apache/phoenix/query/QueryServices.java|   1 +
 .../apache/phoenix/query/QueryServicesOptions.java |   1 +
 .../phoenix/end2end/QueryServerTestUtil.java   | 187 +
 .../phoenix/end2end/ServerCustomizersIT.java   | 147 
 .../phoenix/queryserver/server/QueryServer.java|  26 ++-
 .../server/ServerCustomizersFactory.java   |  49 ++
 .../queryserver/server/ServerCustomizersTest.java  |  87 ++
 7 files changed, 496 insertions(+), 2 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index 48b7b7f..c7548df 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -252,6 +252,7 @@ public interface QueryServices extends SQLCloseable {
 public static final String QUERY_SERVER_SPNEGO_AUTH_DISABLED_ATTRIB = 
"phoenix.queryserver.spnego.auth.disabled";
 public static final String QUERY_SERVER_WITH_REMOTEUSEREXTRACTOR_ATTRIB = 
"phoenix.queryserver.withRemoteUserExtractor";
 public static final String QUERY_SERVER_REMOTEUSEREXTRACTOR_PARAM = 
"phoenix.queryserver.remoteUserExtractor.param";
+public static final String QUERY_SERVER_CUSTOMIZERS_ENABLED = 
"phoenix.queryserver.customizers.enabled";
 public static final String QUERY_SERVER_DISABLE_KERBEROS_LOGIN = 
"phoenix.queryserver.disable.kerberos.login";
 
 // metadata configs
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 307c5dd..7933ba0 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -301,6 +301,7 @@ public class QueryServicesOptions {
 public static final boolean DEFAULT_QUERY_SERVER_WITH_REMOTEUSEREXTRACTOR 
= false;
 public static final String DEFAULT_QUERY_SERVER_REMOTEUSEREXTRACTOR_PARAM 
= "doAs";
 public static final boolean DEFAULT_QUERY_SERVER_DISABLE_KERBEROS_LOGIN = 
false;
+public static final boolean DEFAULT_QUERY_SERVER_CUSTOMIZERS_ENABLED = 
false;
 
 public static final boolean DEFAULT_RENEW_LEASE_ENABLED = true;
 public static final int 
DEFAULT_RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS =
diff --git 
a/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/QueryServerTestUtil.java
 
b/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/QueryServerTestUtil.java
new file mode 100644
index 000..01f73ae
--- /dev/null
+++ 
b/phoenix-queryserver/src/it/java/org/apache/phoenix/end2end/QueryServerTestUtil.java
@@ -0,0 +1,187 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import java.io.File;
+import java.security.PrivilegedAction;
+import java.util.Map;
+import java.util.Objects;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.LocalHBaseCluster;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.phoenix.query.ConfigurationFactory;
+import org.apache.phoenix.queryserver.client.ThinClientUtil;
+import org.apache.phoenix.queryserver.server.QuerySe

[phoenix] 16/34: PHOENIX-5070 NPE when upgrading Phoenix 4.13.0 to Phoenix 4.14.1 with hbase-1.x branch in secure setup

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 172149dcf30e66dabbb0ad5b887a060dd475079b
Author: Monani Mihir 
AuthorDate: Fri Dec 14 16:20:17 2018 +0530

PHOENIX-5070 NPE when upgrading Phoenix 4.13.0 to Phoenix 4.14.1 with 
hbase-1.x branch in secure setup
---
 .../java/org/apache/phoenix/coprocessor/PhoenixAccessController.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
index 18a293f..befa87d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
@@ -406,7 +406,7 @@ public class PhoenixAccessController extends 
BaseMetaDataEndpointObserver {
 final List userPermissions = new 
ArrayList();
 try (Connection connection = 
ConnectionFactory.createConnection(env.getConfiguration())) {
 // Merge permissions from all accessController 
coprocessors loaded in memory
-for (BaseMasterAndRegionObserver service : 
accessControllers) {
+for (BaseMasterAndRegionObserver service : 
getAccessControllers()) {
 // Use AccessControlClient API's if the 
accessController is an instance of 
org.apache.hadoop.hbase.security.access.AccessController
 if 
(service.getClass().getName().equals(org.apache.hadoop.hbase.security.access.AccessController.class.getName()))
 {
 
userPermissions.addAll(AccessControlClient.getUserPermissions(connection, 
tableName.getNameAsString()));



[phoenix] 30/34: PHOENIX-4822 Ensure the provided timezone is used client-side (Jaanai Zhang)

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 9a153c59bd527de9f857fd6974471cd2d781fd7c
Author: Josh Elser 
AuthorDate: Tue Jul 31 15:53:11 2018 -0400

PHOENIX-4822 Ensure the provided timezone is used client-side (Jaanai Zhang)
---
 .../org/apache/phoenix/end2end/DateTimeIT.java | 77 ++
 .../apache/phoenix/compile/StatementContext.java   | 11 ++--
 .../org/apache/phoenix/jdbc/PhoenixConnection.java |  8 ++-
 .../java/org/apache/phoenix/util/DateUtil.java | 22 ---
 4 files changed, 101 insertions(+), 17 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
index c976114..cc7c7a7 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DateTimeIT.java
@@ -54,12 +54,19 @@ import java.text.Format;
 import java.util.Calendar;
 import java.util.GregorianCalendar;
 import java.util.Properties;
+import java.util.TimeZone;
 
+import org.apache.commons.lang.time.FastDateFormat;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.compile.StatementContext;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PDate;
+import org.apache.phoenix.schema.types.PTime;
 import org.apache.phoenix.schema.types.PTimestamp;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.DateUtil;
@@ -1880,4 +1887,74 @@ public class DateTimeIT extends ParallelStatsDisabledIT {
 conn.close();
 }
 }
+
+@Test
+public void testDateFormatTimeZone()throws Exception {
+String[] timeZoneIDs = {DateUtil.DEFAULT_TIME_ZONE_ID, "Asia/Yerevan", 
"Australia/Adelaide", "Asia/Tokyo"};
+for (String timeZoneID : timeZoneIDs) {
+testDateFormatTimeZone(timeZoneID);
+}
+}
+
+public void testDateFormatTimeZone(String timeZoneId) throws Exception {
+Properties props = new Properties();
+props.setProperty("phoenix.query.dateFormatTimeZone", timeZoneId);
+Connection conn1 = DriverManager.getConnection(getUrl(), props);
+
+String tableName = generateUniqueName();
+String ddl = "CREATE TABLE IF NOT EXISTS " + tableName +
+" (k1 INTEGER PRIMARY KEY," +
+" v_date DATE," +
+" v_time TIME," +
+" v_timestamp TIMESTAMP)";
+try {
+conn1.createStatement().execute(ddl);
+
+PhoenixConnection pConn = conn1.unwrap(PhoenixConnection.class);
+verifyTimeZoneIDWithConn(pConn, PDate.INSTANCE, timeZoneId);
+verifyTimeZoneIDWithConn(pConn, PTime.INSTANCE, timeZoneId);
+verifyTimeZoneIDWithConn(pConn, PTimestamp.INSTANCE, timeZoneId);
+
+Calendar cal = 
Calendar.getInstance(TimeZone.getTimeZone(timeZoneId));
+cal.setTime(date);
+String dateStr = 
DateUtil.getDateFormatter(DateUtil.DEFAULT_MS_DATE_FORMAT).format(date);
+
+String dml = "UPSERT INTO " + tableName + " VALUES (" +
+"1," +
+"'" + dateStr + "'," +
+"'" + dateStr + "'," +
+"'" + dateStr + "'" +
+")";
+conn1.createStatement().execute(dml);
+conn1.commit();
+
+PhoenixStatement stmt = 
conn1.createStatement().unwrap(PhoenixStatement.class);
+ResultSet rs = stmt.executeQuery("SELECT v_date, v_time, 
v_timestamp FROM " + tableName);
+
+assertTrue(rs.next());
+assertEquals(rs.getDate(1).toString(), new 
Date(cal.getTimeInMillis()).toString());
+assertEquals(rs.getTime(2).toString(), new 
Time(cal.getTimeInMillis()).toString());
+assertEquals(rs.getTimestamp(3).getTime(), cal.getTimeInMillis());
+assertFalse(rs.next());
+
+StatementContext stmtContext = stmt.getQueryPlan().getContext();
+verifyTimeZoneIDWithFormatter(stmtContext.getDateFormatter(), 
timeZoneId);
+verifyTimeZoneIDWithFormatter(stmtContext.getTimeFormatter(), 
timeZoneId);
+verifyTimeZoneIDWithFormatter(stmtContext.getTimestampFormatter(), 
timeZoneId);
+
+stmt.close();
+} finally {
+  

[phoenix] 15/34: PHOENIX-5048 Index Rebuilder does not handle INDEX_STATE timestamp check for all index

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 61b3880953bfbfcc59a2af2f91dd020f171de0c9
Author: Monani Mihir 
AuthorDate: Fri Dec 14 18:15:55 2018 +0530

PHOENIX-5048 Index Rebuilder does not handle INDEX_STATE timestamp check 
for all index

Signed-off-by: Geoffrey Jacoby 
---
 .../coprocessor/MetaDataRegionObserver.java| 35 +-
 1 file changed, 21 insertions(+), 14 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
index 4968525..4045d47 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
@@ -512,20 +512,27 @@ public class MetaDataRegionObserver extends 
BaseRegionObserver {
String 
indexTableFullName = SchemaUtil.getTableName(

indexPTable.getSchemaName().getString(),

indexPTable.getTableName().getString());
-   if (scanEndTime 
== latestUpperBoundTimestamp) {
-   
IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.ACTIVE, 0L, 
latestUpperBoundTimestamp);
-   
batchExecutedPerTableMap.remove(dataPTable.getName());
-LOG.info("Making Index:" + 
indexPTable.getTableName() + " active after rebuilding");
-   } else {
-   // 
Increment timestamp so that client sees updated disable timestamp
-IndexUtil.updateIndexState(conn, 
indexTableFullName, indexPTable.getIndexState(), scanEndTime * 
signOfDisableTimeStamp, latestUpperBoundTimestamp);
-   Long 
noOfBatches = batchExecutedPerTableMap.get(dataPTable.getName());
-   if 
(noOfBatches == null) {
-   
noOfBatches = 0l;
-   }
-   
batchExecutedPerTableMap.put(dataPTable.getName(), ++noOfBatches);
-   
LOG.info("During Round-robin build: Successfully updated index disabled 
timestamp  for "
-   
+ indexTableFullName + " to " + scanEndTime);
+   try {
+   if 
(scanEndTime == latestUpperBoundTimestamp) {
+   
IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.ACTIVE, 0L,
+   
latestUpperBoundTimestamp);
+   
batchExecutedPerTableMap.remove(dataPTable.getName());
+   
LOG.info("Making Index:" + indexPTable.getTableName() + " active after 
rebuilding");
+   } else {
+   // 
Increment timestamp so that client sees updated disable timestamp
+   
IndexUtil.updateIndexState(conn, indexTableFullName, 
indexPTable.getIndexState(),
+   
scanEndTime * signOfDisableTimeStamp, latestUpperBoundTimestamp);
+   Long 
noOfBatches = batchExecutedPerTableMap.get(dataPTable.getName());
+   if 
(noOfBatches == null) {
+   
noOfBatches = 0l;
+   }
+

[phoenix] 21/34: PHOENIX-5080 Index becomes Active during Partial Index Rebuilder if Index Failure happens

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 7772f6930cba4a6e4b17f895bf79b0b4aea00161
Author: Monani Mihir 
AuthorDate: Sat Feb 2 11:00:19 2019 +0530

PHOENIX-5080 Index becomes Active during Partial Index Rebuilder if Index 
Failure happens
---
 .../end2end/index/PartialIndexRebuilderIT.java |  66 +++-
 .../coprocessor/BaseScannerRegionObserver.java |   9 +-
 .../UngroupedAggregateRegionObserver.java  |  25 ++-
 .../org/apache/phoenix/execute/MutationState.java  |  14 +-
 .../org/apache/phoenix/hbase/index/Indexer.java|  10 +-
 .../hbase/index/builder/IndexBuildManager.java |   8 +
 .../phoenix/index/PhoenixIndexFailurePolicy.java   |  32 +++-
 .../apache/phoenix/index/PhoenixIndexMetaData.java |   3 +-
 .../java/org/apache/phoenix/query/BaseTest.java| 185 +
 9 files changed, 330 insertions(+), 22 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
index 46443e3..cda282b 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/PartialIndexRebuilderIT.java
@@ -47,7 +47,6 @@ import 
org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
 import org.apache.phoenix.coprocessor.MetaDataRegionObserver;
 import 
org.apache.phoenix.coprocessor.MetaDataRegionObserver.BuildIndexScheduleTask;
 import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
-import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.execute.CommitException;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
@@ -86,6 +85,7 @@ public class PartialIndexRebuilderIT extends 
BaseUniqueNamesOwnClusterIT {
 private static final long REBUILD_PERIOD = 5;
 private static final long REBUILD_INTERVAL = 2000;
 private static RegionCoprocessorEnvironment 
indexRebuildTaskRegionEnvironment;
+private static Boolean runRebuildOnce = true;
 
 
 @BeforeClass
@@ -125,6 +125,7 @@ public class PartialIndexRebuilderIT extends 
BaseUniqueNamesOwnClusterIT {
 runIndexRebuilderAsync(interval, cancel, 
Collections.singletonList(table));
 }
 private static void runIndexRebuilderAsync(final int interval, final 
boolean[] cancel, final List tables) {
+runRebuildOnce = true;
 Thread thread = new Thread(new Runnable() {
 @Override
 public void run() {
@@ -137,6 +138,8 @@ public class PartialIndexRebuilderIT extends 
BaseUniqueNamesOwnClusterIT {
 throw new RuntimeException(e);
 } catch (SQLException e) {
 LOG.error(e.getMessage(),e);
+} finally {
+runRebuildOnce = false;
 }
 }
 }
@@ -554,7 +557,7 @@ public class PartialIndexRebuilderIT extends 
BaseUniqueNamesOwnClusterIT {
 
 @Override
 public long currentTime() {
-return time;
+return time++;
 }
 }
 
@@ -1068,6 +1071,65 @@ public class PartialIndexRebuilderIT extends 
BaseUniqueNamesOwnClusterIT {
 }
 }
 
+@Test
+@Repeat(5)
+public void testIndexActiveIfRegionMovesWhileRebuilding() throws Throwable 
{
+final MyClock clock = new MyClock(1000);
+EnvironmentEdgeManager.injectEdge(clock);
+String schemaName = generateUniqueName();
+String tableName = generateUniqueName();
+String indexName = generateUniqueName();
+int nThreads = 5;
+int nRows = 50;
+int nIndexValues = 23;
+int batchSize = 200;
+final CountDownLatch doneSignal = new CountDownLatch(nThreads);
+boolean[] cancel = new boolean[1];
+
+final String fullTableName = SchemaUtil.getTableName(schemaName, 
tableName);
+final String fullIndexName = SchemaUtil.getTableName(schemaName, 
indexName);
+try (Connection conn = DriverManager.getConnection(getUrl())) {
+try {
+conn.createStatement().execute("CREATE TABLE " + fullTableName
++ "(k1 INTEGER NOT NULL, k2 INTEGER NOT NULL, v1 INTEGER, "
++ "CONSTRAINT pk PRIMARY KEY (k1,k2)) STORE_NULLS=true, 
VERSIONS=1");
+conn.createStatement().execute("CREATE INDEX " + indexName + " 
ON "
++ fullTableName + "(v1)");
+conn.commit();
+long disableTS = clock.currentTime();
+HTableInterface metaTable = 
conn.unwrap(PhoenixConnection.c

[phoenix] 10/34: PHOENIX-4854 Make LoggingPhoenixResultSet idempotent when logging metrics

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit f8859c82129fb56f37bc7c83a8a671b1f424cb7f
Author: Karan Mehta 
AuthorDate: Mon Aug 20 10:12:37 2018 -0700

PHOENIX-4854 Make LoggingPhoenixResultSet idempotent when logging metrics
---
 .../monitoring/PhoenixLoggingMetricsIT.java| 49 --
 .../phoenix/jdbc/LoggingPhoenixResultSet.java  | 15 +--
 2 files changed, 38 insertions(+), 26 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
index 97b2c5d..7e56902 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixLoggingMetricsIT.java
@@ -26,6 +26,7 @@ import org.junit.Test;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
+import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.Map;
 
@@ -44,6 +45,8 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 private String tableName2;
 private LoggingPhoenixConnection loggedConn;
 private String loggedSql;
+private int logOverAllReadRequestMetricsFuncCallCount;
+private int logRequestReadMetricsFuncCallCount;
 
 @Before
 public void beforeTest() throws Exception {
@@ -69,17 +72,7 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 public void testPhoenixMetricsLoggedOnCommit() throws Exception {
 // run SELECT to verify read metrics are logged
 String query = "SELECT * FROM " + tableName1;
-Statement stmt = loggedConn.createStatement();
-ResultSet rs = stmt.executeQuery(query);
-while (rs.next()) {
-}
-rs.close();
-assertTrue("Read metrics for not found for " + tableName1,
-requestReadMetricsMap.get(tableName1).size() > 0);
-assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
-
-assertTrue("Overall read metrics for not found ", 
overAllQueryMetricsMap.size() > 0);
-assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
+verifyQueryLevelMetricsLogging(query);
 
 // run UPSERT SELECT to verify mutation metrics are logged
 String upsertSelect = "UPSERT INTO " + tableName2 + " SELECT * FROM " 
+ tableName1;
@@ -117,17 +110,7 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 public void testPhoenixMetricsLoggedOnClose() throws Exception {
 // run SELECT to verify read metrics are logged
 String query = "SELECT * FROM " + tableName1;
-Statement stmt = loggedConn.createStatement();
-ResultSet rs = stmt.executeQuery(query);
-while (rs.next()) {
-}
-rs.close();
-assertTrue("Read metrics for not found for " + tableName1,
-requestReadMetricsMap.get(tableName1).size() > 0);
-assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
-
-assertTrue("Overall read metrics for not found ", 
overAllQueryMetricsMap.size() > 0);
-assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
+verifyQueryLevelMetricsLogging(query);
 
 // run UPSERT SELECT to verify mutation metrics are logged
 String upsertSelect = "UPSERT INTO " + tableName2 + " SELECT * FROM " 
+ tableName1;
@@ -151,6 +134,26 @@ public class PhoenixLoggingMetricsIT extends 
BasePhoenixMetricsIT {
 mutationReadMetricsMap.size() == 0);
 }
 
+private void verifyQueryLevelMetricsLogging(String query) throws 
SQLException {
+Statement stmt = loggedConn.createStatement();
+ResultSet rs = stmt.executeQuery(query);
+while (rs.next()) {
+}
+rs.close();
+assertTrue("Read metrics for not found for " + tableName1,
+requestReadMetricsMap.get(tableName1).size() > 0);
+assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
+assertTrue(logOverAllReadRequestMetricsFuncCallCount == 1);
+assertTrue(logRequestReadMetricsFuncCallCount == 1);
+
+assertTrue("Overall read metrics for not found ", 
overAllQueryMetricsMap.size() > 0);
+assertTrue("Logged query doesn't match actual query", 
loggedSql.equals(query));
+
+rs.close();
+assertTrue(logOverAllReadRequestMetricsFuncCallCount == 1);
+assertTrue(logRequestReadMetricsFuncCallCount 

[phoenix] 34/34: PHOENIX-5184: HBase and Phoenix connection leaks in Indexing code path, OrphanViewTool and PhoenixConfigurationUtil

2019-04-19 Thread tdsilva
This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.2
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 9e10faae49326fd54561f4b6972d67476bd85963
Author: Chinmay Kulkarni 
AuthorDate: Thu Mar 14 23:16:14 2019 -0700

PHOENIX-5184: HBase and Phoenix connection leaks in Indexing code path, 
OrphanViewTool and PhoenixConfigurationUtil
---
 .../UngroupedAggregateRegionObserver.java  |  6 ++-
 .../hbase/index/write/RecoveryIndexWriter.java | 10 ++--
 .../phoenix/mapreduce/AbstractBulkLoadTool.java| 15 ++
 .../apache/phoenix/mapreduce/OrphanViewTool.java   | 53 ++---
 .../phoenix/mapreduce/PhoenixRecordWriter.java | 18 +--
 .../mapreduce/index/DirectHTableWriter.java| 14 +-
 .../mapreduce/index/IndexScrutinyMapper.java   | 24 --
 .../apache/phoenix/mapreduce/index/IndexTool.java  | 55 --
 .../index/PhoenixIndexImportDirectMapper.java  | 26 +-
 .../mapreduce/index/PhoenixIndexImportMapper.java  | 16 ---
 .../index/PhoenixIndexPartialBuildMapper.java  | 25 ++
 .../mapreduce/util/PhoenixConfigurationUtil.java   | 45 +-
 .../apache/phoenix/parse/DropTableStatement.java   |  4 +-
 13 files changed, 190 insertions(+), 121 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 2eb15a1..f0ce5b2 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -817,7 +817,11 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 }
 try {
 if (targetHTable != null) {
-targetHTable.close();
+try {
+targetHTable.close();
+} catch (IOException e) {
+logger.error("Closing table: " + targetHTable + " 
failed: ", e);
+}
 }
 } finally {
 try {
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
index 35f0a6d..fb9 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
@@ -26,8 +26,6 @@ import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.MasterNotRunningException;
-import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@@ -55,15 +53,13 @@ public class RecoveryIndexWriter extends IndexWriter {
  * Directly specify the {@link IndexCommitter} and {@link 
IndexFailurePolicy}. Both are expected to be fully setup
  * before calling.
  * 
- * @param committer
  * @param policy
  * @param env
+ * @param name
  * @throws IOException
- * @throws ZooKeeperConnectionException
- * @throws MasterNotRunningException
  */
 public RecoveryIndexWriter(IndexFailurePolicy policy, 
RegionCoprocessorEnvironment env, String name)
-throws MasterNotRunningException, ZooKeeperConnectionException, 
IOException {
+throws IOException {
 super(new TrackingParallelWriterIndexCommitter(), policy, env, name);
 this.admin = new HBaseAdmin(env.getConfiguration());
 }
@@ -125,7 +121,7 @@ public class RecoveryIndexWriter extends IndexWriter {
 try {
 admin.close();
 } catch (IOException e) {
-// closing silently
+LOG.error("Closing the admin failed: ", e);
 }
 }
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
index f717647..4561152 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
@@ -17,7 +17,6 @@
  */
 package org.apache.phoenix.mapreduce;
 
-import java.io.IOException;
 import java.sql.Connection;
 import java.sql.ResultSet;
 import java.sql.SQLException;
@@ -37,20 +36,17 @@ import org.apache.commons.cli.ParseException;
 import org.apache.co

<    1   2   3   4   5   6   7   8   9   10   >