Jenkins build is back to normal : Phoenix-4.x-HBase-1.3 #689

2020-02-18 Thread Apache Jenkins Server
See 




[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5537 Phoenix-4701 made hard coupling between phoenix.log.level and getting request metrics.

2020-02-18 Thread stoty
This is an automated email from the ASF dual-hosted git repository.

stoty pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new 5033773  PHOENIX-5537 Phoenix-4701 made hard coupling between 
phoenix.log.level and getting request metrics.
5033773 is described below

commit 5033773d76bfbf3dfc749d8af6119a0acd1c828c
Author: Richard Antal 
AuthorDate: Tue Feb 11 15:09:26 2020 +0100

PHOENIX-5537 Phoenix-4701 made hard coupling between phoenix.log.level and 
getting request metrics.
---
 .../org/apache/phoenix/monitoring/MetricUtil.java  | 15 ++-
 .../phoenix/monitoring/MetricsStopWatch.java   |  5 +++
 .../phoenix/monitoring/OverAllQueryMetrics.java| 24 ++
 .../apache/phoenix/monitoring/MetricUtilTest.java  | 51 ++
 4 files changed, 85 insertions(+), 10 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricUtil.java
index e792c08..1974eb8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricUtil.java
@@ -22,9 +22,20 @@ import 
org.apache.phoenix.monitoring.CombinableMetric.NoOpRequestMetric;
 
 public class MetricUtil {
 
-public static CombinableMetric getCombinableMetric(boolean 
isRequestMetricsEnabled, LogLevel connectionLogLevel, MetricType type) {
-if (!type.isLoggingEnabled(connectionLogLevel) && 
!isRequestMetricsEnabled) { return NoOpRequestMetric.INSTANCE; }
+public static CombinableMetric getCombinableMetric(boolean 
isRequestMetricsEnabled,
+   LogLevel 
connectionLogLevel,
+   MetricType type) {
+if (!type.isLoggingEnabled(connectionLogLevel) && 
!isRequestMetricsEnabled) {
+return NoOpRequestMetric.INSTANCE; }
 return new CombinableMetricImpl(type);
 }
 
+public static MetricsStopWatch getMetricsStopWatch(boolean 
isRequestMetricsEnabled,
+   LogLevel 
connectionLogLevel,
+   MetricType type) {
+if(!type.isLoggingEnabled(connectionLogLevel) && 
!isRequestMetricsEnabled) {
+return new MetricsStopWatch(false); }
+return new MetricsStopWatch(true);
+}
+
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java
index ee260a8..a852ca9 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java
@@ -56,4 +56,9 @@ final class MetricsStopWatch {
 }
 return 0;
 }
+
+@com.google.common.annotations.VisibleForTesting
+final boolean getMetricsEnabled(){
+return isMetricsEnabled;
+}
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java
index 9a2f426..6202eee 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java
@@ -43,14 +43,22 @@ public class OverAllQueryMetrics {
 private final CombinableMetric cacheRefreshedDueToSplits;
 
 public OverAllQueryMetrics(boolean isRequestMetricsEnabled, LogLevel 
connectionLogLevel) {
-queryWatch = new 
MetricsStopWatch(WALL_CLOCK_TIME_MS.isLoggingEnabled(connectionLogLevel));
-resultSetWatch = new 
MetricsStopWatch(RESULT_SET_TIME_MS.isLoggingEnabled(connectionLogLevel));
-numParallelScans = 
MetricUtil.getCombinableMetric(isRequestMetricsEnabled,connectionLogLevel, 
NUM_PARALLEL_SCANS);
-wallClockTimeMS = 
MetricUtil.getCombinableMetric(isRequestMetricsEnabled,connectionLogLevel, 
WALL_CLOCK_TIME_MS);
-resultSetTimeMS = 
MetricUtil.getCombinableMetric(isRequestMetricsEnabled,connectionLogLevel, 
RESULT_SET_TIME_MS);
-queryTimedOut = 
MetricUtil.getCombinableMetric(isRequestMetricsEnabled,connectionLogLevel, 
QUERY_TIMEOUT_COUNTER);
-queryFailed = 
MetricUtil.getCombinableMetric(isRequestMetricsEnabled,connectionLogLevel, 
QUERY_FAILED_COUNTER);
-cacheRefreshedDueToSplits = 
MetricUtil.getCombinableMetric(isRequestMetricsEnabled,connectionLogLevel, 
CACHE_REFRESH_SPLITS_COUNTER);
+queryWatch = MetricUtil.getMetricsStopWatch(isRequestMetricsEnabled, 
connectionLogLevel,
+WALL_CLOCK_TIME_MS);
+resultSetWatch = 
MetricUtil.getMetricsStopWatch(isRequestMetricsEnabled, connect

[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5537 Phoenix-4701 made hard coupling between phoenix.log.level and getting request metrics.

2020-02-18 Thread stoty
This is an automated email from the ASF dual-hosted git repository.

stoty pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 70ccf2f  PHOENIX-5537 Phoenix-4701 made hard coupling between 
phoenix.log.level and getting request metrics.
70ccf2f is described below

commit 70ccf2fd7139c2cbe3c4a1e2ef7e3b7bbfa00683
Author: Richard Antal 
AuthorDate: Tue Feb 11 15:09:26 2020 +0100

PHOENIX-5537 Phoenix-4701 made hard coupling between phoenix.log.level and 
getting request metrics.
---
 .../org/apache/phoenix/monitoring/MetricUtil.java  | 15 ++-
 .../phoenix/monitoring/MetricsStopWatch.java   |  5 +++
 .../phoenix/monitoring/OverAllQueryMetrics.java| 24 ++
 .../apache/phoenix/monitoring/MetricUtilTest.java  | 51 ++
 4 files changed, 85 insertions(+), 10 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricUtil.java
index e792c08..1974eb8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricUtil.java
@@ -22,9 +22,20 @@ import 
org.apache.phoenix.monitoring.CombinableMetric.NoOpRequestMetric;
 
 public class MetricUtil {
 
-public static CombinableMetric getCombinableMetric(boolean 
isRequestMetricsEnabled, LogLevel connectionLogLevel, MetricType type) {
-if (!type.isLoggingEnabled(connectionLogLevel) && 
!isRequestMetricsEnabled) { return NoOpRequestMetric.INSTANCE; }
+public static CombinableMetric getCombinableMetric(boolean 
isRequestMetricsEnabled,
+   LogLevel 
connectionLogLevel,
+   MetricType type) {
+if (!type.isLoggingEnabled(connectionLogLevel) && 
!isRequestMetricsEnabled) {
+return NoOpRequestMetric.INSTANCE; }
 return new CombinableMetricImpl(type);
 }
 
+public static MetricsStopWatch getMetricsStopWatch(boolean 
isRequestMetricsEnabled,
+   LogLevel 
connectionLogLevel,
+   MetricType type) {
+if(!type.isLoggingEnabled(connectionLogLevel) && 
!isRequestMetricsEnabled) {
+return new MetricsStopWatch(false); }
+return new MetricsStopWatch(true);
+}
+
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java
index ee260a8..a852ca9 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java
@@ -56,4 +56,9 @@ final class MetricsStopWatch {
 }
 return 0;
 }
+
+@com.google.common.annotations.VisibleForTesting
+final boolean getMetricsEnabled(){
+return isMetricsEnabled;
+}
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java
index 9a2f426..6202eee 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java
@@ -43,14 +43,22 @@ public class OverAllQueryMetrics {
 private final CombinableMetric cacheRefreshedDueToSplits;
 
 public OverAllQueryMetrics(boolean isRequestMetricsEnabled, LogLevel 
connectionLogLevel) {
-queryWatch = new 
MetricsStopWatch(WALL_CLOCK_TIME_MS.isLoggingEnabled(connectionLogLevel));
-resultSetWatch = new 
MetricsStopWatch(RESULT_SET_TIME_MS.isLoggingEnabled(connectionLogLevel));
-numParallelScans = 
MetricUtil.getCombinableMetric(isRequestMetricsEnabled,connectionLogLevel, 
NUM_PARALLEL_SCANS);
-wallClockTimeMS = 
MetricUtil.getCombinableMetric(isRequestMetricsEnabled,connectionLogLevel, 
WALL_CLOCK_TIME_MS);
-resultSetTimeMS = 
MetricUtil.getCombinableMetric(isRequestMetricsEnabled,connectionLogLevel, 
RESULT_SET_TIME_MS);
-queryTimedOut = 
MetricUtil.getCombinableMetric(isRequestMetricsEnabled,connectionLogLevel, 
QUERY_TIMEOUT_COUNTER);
-queryFailed = 
MetricUtil.getCombinableMetric(isRequestMetricsEnabled,connectionLogLevel, 
QUERY_FAILED_COUNTER);
-cacheRefreshedDueToSplits = 
MetricUtil.getCombinableMetric(isRequestMetricsEnabled,connectionLogLevel, 
CACHE_REFRESH_SPLITS_COUNTER);
+queryWatch = MetricUtil.getMetricsStopWatch(isRequestMetricsEnabled, 
connectionLogLevel,
+WALL_CLOCK_TIME_MS);
+resultSetWatch = 
MetricUtil.getMetricsStopWatch(isRequestMetricsEnabled, connect

[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5537 Phoenix-4701 made hard coupling between phoenix.log.level and getting request metrics.

2020-02-18 Thread stoty
This is an automated email from the ASF dual-hosted git repository.

stoty pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 294eaf0  PHOENIX-5537 Phoenix-4701 made hard coupling between 
phoenix.log.level and getting request metrics.
294eaf0 is described below

commit 294eaf05218a9aeb865e019be81f8c4ce8d39de0
Author: Richard Antal 
AuthorDate: Tue Feb 11 15:09:26 2020 +0100

PHOENIX-5537 Phoenix-4701 made hard coupling between phoenix.log.level and 
getting request metrics.
---
 .../org/apache/phoenix/monitoring/MetricUtil.java  | 15 ++-
 .../phoenix/monitoring/MetricsStopWatch.java   |  5 +++
 .../phoenix/monitoring/OverAllQueryMetrics.java| 24 ++
 .../apache/phoenix/monitoring/MetricUtilTest.java  | 51 ++
 4 files changed, 85 insertions(+), 10 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricUtil.java
index e792c08..1974eb8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricUtil.java
@@ -22,9 +22,20 @@ import 
org.apache.phoenix.monitoring.CombinableMetric.NoOpRequestMetric;
 
 public class MetricUtil {
 
-public static CombinableMetric getCombinableMetric(boolean 
isRequestMetricsEnabled, LogLevel connectionLogLevel, MetricType type) {
-if (!type.isLoggingEnabled(connectionLogLevel) && 
!isRequestMetricsEnabled) { return NoOpRequestMetric.INSTANCE; }
+public static CombinableMetric getCombinableMetric(boolean 
isRequestMetricsEnabled,
+   LogLevel 
connectionLogLevel,
+   MetricType type) {
+if (!type.isLoggingEnabled(connectionLogLevel) && 
!isRequestMetricsEnabled) {
+return NoOpRequestMetric.INSTANCE; }
 return new CombinableMetricImpl(type);
 }
 
+public static MetricsStopWatch getMetricsStopWatch(boolean 
isRequestMetricsEnabled,
+   LogLevel 
connectionLogLevel,
+   MetricType type) {
+if(!type.isLoggingEnabled(connectionLogLevel) && 
!isRequestMetricsEnabled) {
+return new MetricsStopWatch(false); }
+return new MetricsStopWatch(true);
+}
+
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java
index ee260a8..a852ca9 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java
@@ -56,4 +56,9 @@ final class MetricsStopWatch {
 }
 return 0;
 }
+
+@com.google.common.annotations.VisibleForTesting
+final boolean getMetricsEnabled(){
+return isMetricsEnabled;
+}
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java
index 9a2f426..6202eee 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java
@@ -43,14 +43,22 @@ public class OverAllQueryMetrics {
 private final CombinableMetric cacheRefreshedDueToSplits;
 
 public OverAllQueryMetrics(boolean isRequestMetricsEnabled, LogLevel 
connectionLogLevel) {
-queryWatch = new 
MetricsStopWatch(WALL_CLOCK_TIME_MS.isLoggingEnabled(connectionLogLevel));
-resultSetWatch = new 
MetricsStopWatch(RESULT_SET_TIME_MS.isLoggingEnabled(connectionLogLevel));
-numParallelScans = 
MetricUtil.getCombinableMetric(isRequestMetricsEnabled,connectionLogLevel, 
NUM_PARALLEL_SCANS);
-wallClockTimeMS = 
MetricUtil.getCombinableMetric(isRequestMetricsEnabled,connectionLogLevel, 
WALL_CLOCK_TIME_MS);
-resultSetTimeMS = 
MetricUtil.getCombinableMetric(isRequestMetricsEnabled,connectionLogLevel, 
RESULT_SET_TIME_MS);
-queryTimedOut = 
MetricUtil.getCombinableMetric(isRequestMetricsEnabled,connectionLogLevel, 
QUERY_TIMEOUT_COUNTER);
-queryFailed = 
MetricUtil.getCombinableMetric(isRequestMetricsEnabled,connectionLogLevel, 
QUERY_FAILED_COUNTER);
-cacheRefreshedDueToSplits = 
MetricUtil.getCombinableMetric(isRequestMetricsEnabled,connectionLogLevel, 
CACHE_REFRESH_SPLITS_COUNTER);
+queryWatch = MetricUtil.getMetricsStopWatch(isRequestMetricsEnabled, 
connectionLogLevel,
+WALL_CLOCK_TIME_MS);
+resultSetWatch = 
MetricUtil.getMetricsStopWatch(isRequestMetricsEnabled, connect

[phoenix] branch master updated: PHOENIX-5537 Phoenix-4701 made hard coupling between phoenix.log.level and getting request metrics.

2020-02-18 Thread stoty
This is an automated email from the ASF dual-hosted git repository.

stoty pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 28de55b  PHOENIX-5537 Phoenix-4701 made hard coupling between 
phoenix.log.level and getting request metrics.
28de55b is described below

commit 28de55b5c79a5fc32dee97a59e5b4f528ca6dce6
Author: Richard Antal 
AuthorDate: Tue Feb 11 15:09:26 2020 +0100

PHOENIX-5537 Phoenix-4701 made hard coupling between phoenix.log.level and 
getting request metrics.
---
 .../org/apache/phoenix/monitoring/MetricUtil.java  | 15 ++-
 .../phoenix/monitoring/MetricsStopWatch.java   |  5 +++
 .../phoenix/monitoring/OverAllQueryMetrics.java| 24 ++
 .../apache/phoenix/monitoring/MetricUtilTest.java  | 51 ++
 4 files changed, 85 insertions(+), 10 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricUtil.java
index e792c08..1974eb8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricUtil.java
@@ -22,9 +22,20 @@ import 
org.apache.phoenix.monitoring.CombinableMetric.NoOpRequestMetric;
 
 public class MetricUtil {
 
-public static CombinableMetric getCombinableMetric(boolean 
isRequestMetricsEnabled, LogLevel connectionLogLevel, MetricType type) {
-if (!type.isLoggingEnabled(connectionLogLevel) && 
!isRequestMetricsEnabled) { return NoOpRequestMetric.INSTANCE; }
+public static CombinableMetric getCombinableMetric(boolean 
isRequestMetricsEnabled,
+   LogLevel 
connectionLogLevel,
+   MetricType type) {
+if (!type.isLoggingEnabled(connectionLogLevel) && 
!isRequestMetricsEnabled) {
+return NoOpRequestMetric.INSTANCE; }
 return new CombinableMetricImpl(type);
 }
 
+public static MetricsStopWatch getMetricsStopWatch(boolean 
isRequestMetricsEnabled,
+   LogLevel 
connectionLogLevel,
+   MetricType type) {
+if(!type.isLoggingEnabled(connectionLogLevel) && 
!isRequestMetricsEnabled) {
+return new MetricsStopWatch(false); }
+return new MetricsStopWatch(true);
+}
+
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java
index ee260a8..a852ca9 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/MetricsStopWatch.java
@@ -56,4 +56,9 @@ final class MetricsStopWatch {
 }
 return 0;
 }
+
+@com.google.common.annotations.VisibleForTesting
+final boolean getMetricsEnabled(){
+return isMetricsEnabled;
+}
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java
index 9a2f426..6202eee 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/OverAllQueryMetrics.java
@@ -43,14 +43,22 @@ public class OverAllQueryMetrics {
 private final CombinableMetric cacheRefreshedDueToSplits;
 
 public OverAllQueryMetrics(boolean isRequestMetricsEnabled, LogLevel 
connectionLogLevel) {
-queryWatch = new 
MetricsStopWatch(WALL_CLOCK_TIME_MS.isLoggingEnabled(connectionLogLevel));
-resultSetWatch = new 
MetricsStopWatch(RESULT_SET_TIME_MS.isLoggingEnabled(connectionLogLevel));
-numParallelScans = 
MetricUtil.getCombinableMetric(isRequestMetricsEnabled,connectionLogLevel, 
NUM_PARALLEL_SCANS);
-wallClockTimeMS = 
MetricUtil.getCombinableMetric(isRequestMetricsEnabled,connectionLogLevel, 
WALL_CLOCK_TIME_MS);
-resultSetTimeMS = 
MetricUtil.getCombinableMetric(isRequestMetricsEnabled,connectionLogLevel, 
RESULT_SET_TIME_MS);
-queryTimedOut = 
MetricUtil.getCombinableMetric(isRequestMetricsEnabled,connectionLogLevel, 
QUERY_TIMEOUT_COUNTER);
-queryFailed = 
MetricUtil.getCombinableMetric(isRequestMetricsEnabled,connectionLogLevel, 
QUERY_FAILED_COUNTER);
-cacheRefreshedDueToSplits = 
MetricUtil.getCombinableMetric(isRequestMetricsEnabled,connectionLogLevel, 
CACHE_REFRESH_SPLITS_COUNTER);
+queryWatch = MetricUtil.getMetricsStopWatch(isRequestMetricsEnabled, 
connectionLogLevel,
+WALL_CLOCK_TIME_MS);
+resultSetWatch = 
MetricUtil.getMetricsStopWatch(isRequestMetricsEnabled, connectionLogLevel,
+

Jenkins build is back to normal : Phoenix-4.x-HBase-1.5 #278

2020-02-18 Thread Apache Jenkins Server
See 




Apache Phoenix - Timeout crawler - Build https://builds.apache.org/job/Phoenix-master/2656/

2020-02-18 Thread Apache Jenkins Server
[...truncated 25 lines...]
Looking at the log, list of test(s) that timed-out:

Build:
https://builds.apache.org/job/Phoenix-master/2656/


Affected test class(es):
Set(['as SYSTEM'])


Build step 'Execute shell' marked build as failure
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any

Build failed in Jenkins: Phoenix | Master #2656

2020-02-18 Thread Apache Jenkins Server
See 


Changes:

[Rajeshbabu Chintaguntla] PHOENIX-5731 Loading bulkload hfiles should not be 
blocked if the upsert


--
[...truncated 128.53 KB...]
[INFO] 
[INFO] ---
[INFO]  T E S T S
[INFO] ---
[INFO] 
[INFO] Results:
[INFO] 
[INFO] Tests run: 0, Failures: 0, Errors: 0, Skipped: 0
[INFO] 
[INFO] 
[INFO] --- maven-failsafe-plugin:2.22.0:integration-test 
(NeedTheirOwnClusterTests) @ phoenix-core ---
[INFO] 
[INFO] ---
[INFO]  T E S T S
[INFO] ---
[INFO] Running 
org.apache.hadoop.hbase.regionserver.wal.WALReplayWithIndexWritesAndCompressedWALIT
[WARNING] Tests run: 1, Failures: 0, Errors: 0, Skipped: 1, Time elapsed: 0.002 
s - in 
org.apache.hadoop.hbase.regionserver.wal.WALReplayWithIndexWritesAndCompressedWALIT
[INFO] Running org.apache.phoenix.end2end.ConnectionUtilIT
[INFO] Running 
org.apache.hadoop.hbase.regionserver.wal.WALRecoveryRegionPostOpenIT
[INFO] Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 10.845 s 
- in org.apache.hadoop.hbase.regionserver.wal.WALRecoveryRegionPostOpenIT
[INFO] Running org.apache.phoenix.end2end.ConcurrentMutationsExtendedIT
[INFO] Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 37.756 s 
- in org.apache.phoenix.end2end.ConnectionUtilIT
[INFO] Running org.apache.phoenix.end2end.ContextClassloaderIT
[INFO] Running org.apache.phoenix.end2end.CsvBulkLoadToolIT
[INFO] Running org.apache.phoenix.end2end.CountDistinctCompressionIT
[INFO] Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.903 s 
- in org.apache.phoenix.end2end.ContextClassloaderIT
[INFO] Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.361 s 
- in org.apache.phoenix.end2end.CountDistinctCompressionIT
[INFO] Running org.apache.phoenix.end2end.CostBasedDecisionIT
[INFO] Running org.apache.phoenix.end2end.DropSchemaIT
[INFO] Running org.apache.phoenix.end2end.FlappingLocalIndexIT
[INFO] Running org.apache.phoenix.end2end.IndexExtendedIT
[INFO] Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 19.059 s 
- in org.apache.phoenix.end2end.DropSchemaIT
[INFO] Running org.apache.phoenix.end2end.IndexBuildTimestampIT
[INFO] Tests run: 16, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 126.545 
s - in org.apache.phoenix.end2end.CsvBulkLoadToolIT
[INFO] Running org.apache.phoenix.end2end.IndexRebuildTaskIT
[INFO] Running org.apache.phoenix.end2end.IndexScrutinyToolForTenantIT
[INFO] Tests run: 12, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 175.279 
s - in org.apache.phoenix.end2end.FlappingLocalIndexIT
[INFO] Tests run: 16, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 171.671 
s - in org.apache.phoenix.end2end.IndexBuildTimestampIT
[INFO] Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 58.646 s 
- in org.apache.phoenix.end2end.IndexScrutinyToolForTenantIT
[INFO] Tests run: 10, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 282.455 
s - in org.apache.phoenix.end2end.ConcurrentMutationsExtendedIT
[INFO] Tests run: 64, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 216.857 
s - in org.apache.phoenix.end2end.IndexExtendedIT
[INFO] Running org.apache.phoenix.end2end.IndexScrutinyToolIT
[INFO] Running org.apache.phoenix.end2end.IndexToolForPartialBuildIT
[INFO] Running 
org.apache.phoenix.end2end.IndexToolForPartialBuildWithNamespaceEnabledIT
[INFO] Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 22.627 s 
- in org.apache.phoenix.end2end.IndexToolForPartialBuildIT
[INFO] Running org.apache.phoenix.end2end.IndexToolIT
[INFO] Running org.apache.phoenix.end2end.MigrateSystemTablesToSystemNamespaceIT
[ERROR] Tests run: 1, Failures: 1, Errors: 0, Skipped: 0, Time elapsed: 222.257 
s <<< FAILURE! - in org.apache.phoenix.end2end.IndexRebuildTaskIT
[ERROR] testIndexRebuildTask(org.apache.phoenix.end2end.IndexRebuildTaskIT)  
Time elapsed: 222.257 s  <<< FAILURE!
java.lang.AssertionError: Ran out of time waiting for task state to become 
COMPLETED
at 
org.apache.phoenix.end2end.IndexRebuildTaskIT.waitForTaskState(IndexRebuildTaskIT.java:195)
at 
org.apache.phoenix.end2end.IndexRebuildTaskIT.testIndexRebuildTask(IndexRebuildTaskIT.java:155)

[INFO] Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 40.903 s 
- in org.apache.phoenix.end2end.IndexToolForPartialBuildWithNamespaceEnabledIT
[INFO] Running org.apache.phoenix.end2end.LocalIndexSplitMergeIT
[INFO] Running 
org.apache.phoenix.end2end.OrderByWithServerClientSpoolingDisabledIT
[INFO] Running org.apache.phoenix.end2end.OrderByWithServerMemoryLimitIT
[INFO] Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.027 s 
- in org.apache.phoenix.end2end.OrderByWithServerMemoryLimitIT
[INFO] Run

Apache Phoenix - Timeout crawler - Build https://builds.apache.org/job/Phoenix-master-matrix/8/

2020-02-18 Thread Apache Jenkins Server
[...truncated 21 lines...]
Looking at the log, list of test(s) that timed-out:

Build:
https://builds.apache.org/job/Phoenix-master-matrix/8/


Affected test class(es):
Set(['as SYSTEM'])


Build step 'Execute shell' marked build as failure
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any

[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert select happening for differet table-addendum.(Rajeshbabu)

2020-02-18 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 07fba15  PHOENIX-5731 Loading bulkload hfiles should not be blocked if 
the upsert select happening for differet table-addendum.(Rajeshbabu)
07fba15 is described below

commit 07fba1545f5b1887a1c1de70ca79226cad183b61
Author: Rajeshbabu Chintaguntla 
AuthorDate: Wed Feb 19 09:45:28 2020 +0530

PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert 
select happening for differet table-addendum.(Rajeshbabu)
---
 .../apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index ea5cb91..e8a20dd 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -523,7 +523,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 }
 if (isDescRowKeyOrderUpgrade || isDelete ||
 (isUpsert && (targetHTable == null ||
-
targetHTable.getName().equals(region.getTableDescriptor().getTableName(
+
targetHTable.getName().equals(region.getTableDesc().getTableName(
 || (deleteCQ != null && deleteCF != null) || emptyCF != null 
|| buildLocalIndex) {
 needToWrite = true;
 maxBatchSize = conf.getInt(MUTATE_BATCH_SIZE_ATTRIB, 
QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);



[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert select happening for differet table-addendum.(Rajeshbabu)

2020-02-18 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new 18c9594  PHOENIX-5731 Loading bulkload hfiles should not be blocked if 
the upsert select happening for differet table-addendum.(Rajeshbabu)
18c9594 is described below

commit 18c959433cb6dc7a73aaba7925919cfeb0a33f1f
Author: Rajeshbabu Chintaguntla 
AuthorDate: Wed Feb 19 09:45:28 2020 +0530

PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert 
select happening for differet table-addendum.(Rajeshbabu)
---
 .../apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index e1962a6..4f21511 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -530,7 +530,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 }
 if (isDescRowKeyOrderUpgrade || isDelete ||
 (isUpsert && (targetHTable == null ||
-
targetHTable.getName().equals(region.getTableDescriptor().getTableName(
+
targetHTable.getName().equals(region.getTableDesc().getTableName(
 || (deleteCQ != null && deleteCF != null) || emptyCF != null 
|| buildLocalIndex) {
 needToWrite = true;
 maxBatchSize = conf.getInt(MUTATE_BATCH_SIZE_ATTRIB, 
QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);



[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert select happening for differet table-addendum.(Rajeshbabu)

2020-02-18 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new 0007e56  PHOENIX-5731 Loading bulkload hfiles should not be blocked if 
the upsert select happening for differet table-addendum.(Rajeshbabu)
0007e56 is described below

commit 0007e56e2075ed1b12283df579bb40d915251247
Author: Rajeshbabu Chintaguntla 
AuthorDate: Wed Feb 19 09:45:28 2020 +0530

PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert 
select happening for differet table-addendum.(Rajeshbabu)
---
 .../apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index e1962a6..4f21511 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -530,7 +530,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 }
 if (isDescRowKeyOrderUpgrade || isDelete ||
 (isUpsert && (targetHTable == null ||
-
targetHTable.getName().equals(region.getTableDescriptor().getTableName(
+
targetHTable.getName().equals(region.getTableDesc().getTableName(
 || (deleteCQ != null && deleteCF != null) || emptyCF != null 
|| buildLocalIndex) {
 needToWrite = true;
 maxBatchSize = conf.getInt(MUTATE_BATCH_SIZE_ATTRIB, 
QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);



Build failed in Jenkins: Phoenix-4.x-HBase-1.3 #688

2020-02-18 Thread Apache Jenkins Server
See 


Changes:

[Rajeshbabu Chintaguntla] PHOENIX-5731 Loading bulkload hfiles should not be 
blocked if the upsert


--
Started by an SCM change
Started by an SCM change
Running as SYSTEM
[EnvInject] - Loading node environment variables.
Building remotely on H30 (ubuntu) in workspace 

No credentials specified
 > git rev-parse --is-inside-work-tree # timeout=10
Fetching changes from the remote Git repository
 > git config remote.origin.url https://gitbox.apache.org/repos/asf/phoenix.git 
 > # timeout=10
Fetching upstream changes from https://gitbox.apache.org/repos/asf/phoenix.git
 > git --version # timeout=10
 > git fetch --tags --progress -- 
 > https://gitbox.apache.org/repos/asf/phoenix.git 
 > +refs/heads/*:refs/remotes/origin/*
 > git rev-parse origin/4.x-HBase-1.3^{commit} # timeout=10
Checking out Revision 13486032db982687e4d5c1a1ae7246263fd39998 
(origin/4.x-HBase-1.3)
 > git config core.sparsecheckout # timeout=10
 > git checkout -f 13486032db982687e4d5c1a1ae7246263fd39998
Commit message: "PHOENIX-5731 Loading bulkload hfiles should not be blocked if 
the upsert select happening for differet table.(Rajeshbabu)"
 > git rev-list --no-walk 9e8f14da1157fad32679691c02e1d8164919842c # timeout=10
No emails were triggered.
[EnvInject] - Executing scripts and injecting environment variables after the 
SCM step.
[EnvInject] - Injecting as environment variables the properties content 
MAVEN_OPTS=-Xmx3G

[EnvInject] - Variables injected successfully.
[Phoenix-4.x-HBase-1.3] $ /bin/bash -xe /tmp/jenkins5007256114873339317.sh
+ echo 'DELETING ~/.m2/repository/org/apache/htrace. See 
https://issues.apache.org/jira/browse/PHOENIX-1802'
DELETING ~/.m2/repository/org/apache/htrace. See 
https://issues.apache.org/jira/browse/PHOENIX-1802
+ echo 'CURRENT CONTENT:'
CURRENT CONTENT:
+ ls /home/jenkins/.m2/repository/org/apache/htrace
htrace
htrace-core
htrace-core4
[Phoenix-4.x-HBase-1.3] $ /home/jenkins/tools/maven/latest3/bin/mvn -U clean 
install -Dcheckstyle.skip=true
[INFO] Scanning for projects...
[WARNING] 
[WARNING] Some problems were encountered while building the effective model for 
org.apache.phoenix:phoenix-assembly:pom:4.16.0-HBase-1.3-SNAPSHOT
[WARNING] 'build.plugins.plugin.version' for 
org.codehaus.mojo:exec-maven-plugin is missing. @ line 40, column 15
[WARNING] 
[WARNING] It is highly recommended to fix these problems because they threaten 
the stability of your build.
[WARNING] 
[WARNING] For this reason, future Maven versions might no longer support 
building such malformed projects.
[WARNING] 
[INFO] 
[INFO] Reactor Build Order:
[INFO] 
[INFO] Apache Phoenix [pom]
[INFO] Phoenix Core   [jar]
[INFO] Phoenix - Pherf[jar]
[INFO] Phoenix Client [jar]
[INFO] Phoenix Server [jar]
[INFO] Phoenix Assembly   [pom]
[INFO] Phoenix - Tracing Web Application  [jar]
[INFO] 
[INFO] -< org.apache.phoenix:phoenix >-
[INFO] Building Apache Phoenix 4.16.0-HBase-1.3-SNAPSHOT  [1/7]
[INFO] [ pom ]-
[INFO] 
[INFO] --- maven-clean-plugin:3.1.0:clean (default-clean) @ phoenix ---
[INFO] Deleting 
[INFO] 
[INFO] --- maven-enforcer-plugin:1.4.1:enforce (enforce-maven-version) @ 
phoenix ---
[INFO] 
[INFO] --- maven-checkstyle-plugin:3.1.0:check (validate) @ phoenix ---
[INFO] 
[INFO] --- maven-remote-resources-plugin:1.5:process (process-resource-bundles) 
@ phoenix ---
[INFO] 
[INFO] --- maven-source-plugin:3.0.1:jar-no-fork (attach-sources) @ phoenix ---
[INFO] 
[INFO] --- maven-jar-plugin:3.1.0:test-jar (default) @ phoenix ---
[WARNING] JAR will be empty - no content was marked for inclusion!
[INFO] Building jar: 

[INFO] 
[INFO] --- maven-site-plugin:3.7.1:attach-descriptor (attach-descriptor) @ 
phoenix ---
[INFO] No site descriptor found: nothing to attach.
[INFO] 
[INFO] --- maven-install-plugin:2.5.2:install (default-install) @ phoenix ---
[INFO] Installing 
 to 
/home/jenkins/.m2/repository/org/apache/phoenix/phoenix/4.16.0-HBase-1.3-SNAPSHOT/phoenix-4.16.0-HBase-1.3-SNAPSHOT.pom
[INFO] Installing 


[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert select happening for differet table.(Rajeshbabu)

2020-02-18 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 1348603  PHOENIX-5731 Loading bulkload hfiles should not be blocked if 
the upsert select happening for differet table.(Rajeshbabu)
1348603 is described below

commit 13486032db982687e4d5c1a1ae7246263fd39998
Author: Rajeshbabu Chintaguntla 
AuthorDate: Wed Feb 19 09:07:50 2020 +0530

PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert 
select happening for differet table.(Rajeshbabu)
---
 .../apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 8c3ac26..ea5cb91 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -521,7 +521,10 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 if(buildLocalIndex) {
 checkForLocalIndexColumnFamilies(region, indexMaintainers);
 }
-if (isDescRowKeyOrderUpgrade || isDelete || isUpsert || (deleteCQ != 
null && deleteCF != null) || emptyCF != null || buildLocalIndex) {
+if (isDescRowKeyOrderUpgrade || isDelete ||
+(isUpsert && (targetHTable == null ||
+
targetHTable.getName().equals(region.getTableDescriptor().getTableName(
+|| (deleteCQ != null && deleteCF != null) || emptyCF != null 
|| buildLocalIndex) {
 needToWrite = true;
 maxBatchSize = conf.getInt(MUTATE_BATCH_SIZE_ATTRIB, 
QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
 mutations = new MutationList(Ints.saturatedCast(maxBatchSize + 
maxBatchSize / 10));



Build failed in Jenkins: Phoenix-4.x-HBase-1.5 #277

2020-02-18 Thread Apache Jenkins Server
See 


Changes:

[Rajeshbabu Chintaguntla] PHOENIX-5731 Loading bulkload hfiles should not be 
blocked if the upsert


--
Started by an SCM change
Running as SYSTEM
[EnvInject] - Loading node environment variables.
Building remotely on H27 (ubuntu) in workspace 

No credentials specified
 > git rev-parse --is-inside-work-tree # timeout=10
Fetching changes from the remote Git repository
 > git config remote.origin.url https://gitbox.apache.org/repos/asf/phoenix.git 
 > # timeout=10
Fetching upstream changes from https://gitbox.apache.org/repos/asf/phoenix.git
 > git --version # timeout=10
 > git fetch --tags --progress -- 
 > https://gitbox.apache.org/repos/asf/phoenix.git 
 > +refs/heads/*:refs/remotes/origin/*
 > git rev-parse origin/4.x-HBase-1.5^{commit} # timeout=10
Checking out Revision 6c1a1c731ee66b5859a161284b99c6395ee60e8c 
(origin/4.x-HBase-1.5)
 > git config core.sparsecheckout # timeout=10
 > git checkout -f 6c1a1c731ee66b5859a161284b99c6395ee60e8c
Commit message: "PHOENIX-5731 Loading bulkload hfiles should not be blocked if 
the upsert select happening for differet table.(Rajeshbabu)"
 > git rev-list --no-walk 1622ef974ccd7d4ecd5d73058a6ff91a18b900c3 # timeout=10
No emails were triggered.
[EnvInject] - Executing scripts and injecting environment variables after the 
SCM step.
[EnvInject] - Injecting as environment variables the properties content 
MAVEN_OPTS=-Xmx3G

[EnvInject] - Variables injected successfully.
[Phoenix-4.x-HBase-1.5] $ /bin/bash -xe /tmp/jenkins1516043682936190129.sh
[Phoenix-4.x-HBase-1.5] $ /home/jenkins/tools/maven/latest3/bin/mvn -U clean 
install -Dcheckstyle.skip=true
[INFO] Scanning for projects...
[WARNING] 
[WARNING] Some problems were encountered while building the effective model for 
org.apache.phoenix:phoenix-assembly:pom:4.16.0-HBase-1.5-SNAPSHOT
[WARNING] 'build.plugins.plugin.version' for 
org.codehaus.mojo:exec-maven-plugin is missing. @ line 40, column 15
[WARNING] 
[WARNING] It is highly recommended to fix these problems because they threaten 
the stability of your build.
[WARNING] 
[WARNING] For this reason, future Maven versions might no longer support 
building such malformed projects.
[WARNING] 
[INFO] 
[INFO] Reactor Build Order:
[INFO] 
[INFO] Apache Phoenix [pom]
[INFO] Phoenix Core   [jar]
[INFO] Phoenix - Pherf[jar]
[INFO] Phoenix Client [jar]
[INFO] Phoenix Server [jar]
[INFO] Phoenix Assembly   [pom]
[INFO] Phoenix - Tracing Web Application  [jar]
[INFO] 
[INFO] -< org.apache.phoenix:phoenix >-
[INFO] Building Apache Phoenix 4.16.0-HBase-1.5-SNAPSHOT  [1/7]
[INFO] [ pom ]-
[INFO] 
[INFO] --- maven-clean-plugin:3.1.0:clean (default-clean) @ phoenix ---
[INFO] Deleting 
[INFO] 
[INFO] --- maven-enforcer-plugin:1.4.1:enforce (enforce-maven-version) @ 
phoenix ---
[INFO] 
[INFO] --- maven-checkstyle-plugin:3.1.0:check (validate) @ phoenix ---
[INFO] 
[INFO] --- maven-remote-resources-plugin:1.5:process (process-resource-bundles) 
@ phoenix ---
[INFO] 
[INFO] --- maven-source-plugin:3.0.1:jar-no-fork (attach-sources) @ phoenix ---
[INFO] 
[INFO] --- maven-jar-plugin:3.1.0:test-jar (default) @ phoenix ---
[WARNING] JAR will be empty - no content was marked for inclusion!
[INFO] Building jar: 

[INFO] 
[INFO] --- maven-site-plugin:3.7.1:attach-descriptor (attach-descriptor) @ 
phoenix ---
[INFO] No site descriptor found: nothing to attach.
[INFO] 
[INFO] --- maven-install-plugin:2.5.2:install (default-install) @ phoenix ---
[INFO] Installing 
 to 
/home/jenkins/.m2/repository/org/apache/phoenix/phoenix/4.16.0-HBase-1.5-SNAPSHOT/phoenix-4.16.0-HBase-1.5-SNAPSHOT.pom
[INFO] Installing 

 to 
/home/jenkins/.m2/repository/org/apache/phoenix/phoenix/4.16.0-HBase-1.5-SNAPSHOT/phoenix-4.16.0-HBase-1.5-SNAPSHOT-tests.jar
[INFO] 
[INFO] --< org.apache.phoenix:phoenix-core >---
[INFO] Building Phoenix Core 4.16.0-HBase-1.5-SNAPSHOT[2/7]
[INFO] --

[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert select happening for differet table.(Rajeshbabu)

2020-02-18 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new ee84df2  PHOENIX-5731 Loading bulkload hfiles should not be blocked if 
the upsert select happening for differet table.(Rajeshbabu)
ee84df2 is described below

commit ee84df2a58a5e8479d3d0319b84f301e04c97d9d
Author: Rajeshbabu Chintaguntla 
AuthorDate: Wed Feb 19 09:07:50 2020 +0530

PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert 
select happening for differet table.(Rajeshbabu)
---
 .../apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index fbd9d80..e1962a6 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -528,7 +528,10 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 if(buildLocalIndex) {
 checkForLocalIndexColumnFamilies(region, indexMaintainers);
 }
-if (isDescRowKeyOrderUpgrade || isDelete || isUpsert || (deleteCQ != 
null && deleteCF != null) || emptyCF != null || buildLocalIndex) {
+if (isDescRowKeyOrderUpgrade || isDelete ||
+(isUpsert && (targetHTable == null ||
+
targetHTable.getName().equals(region.getTableDescriptor().getTableName(
+|| (deleteCQ != null && deleteCF != null) || emptyCF != null 
|| buildLocalIndex) {
 needToWrite = true;
 maxBatchSize = conf.getInt(MUTATE_BATCH_SIZE_ATTRIB, 
QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
 mutations = new MutationList(Ints.saturatedCast(maxBatchSize + 
maxBatchSize / 10));



[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert select happening for differet table.(Rajeshbabu)

2020-02-18 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new 6c1a1c7  PHOENIX-5731 Loading bulkload hfiles should not be blocked if 
the upsert select happening for differet table.(Rajeshbabu)
6c1a1c7 is described below

commit 6c1a1c731ee66b5859a161284b99c6395ee60e8c
Author: Rajeshbabu Chintaguntla 
AuthorDate: Wed Feb 19 09:07:50 2020 +0530

PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert 
select happening for differet table.(Rajeshbabu)
---
 .../apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index fbd9d80..e1962a6 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -528,7 +528,10 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 if(buildLocalIndex) {
 checkForLocalIndexColumnFamilies(region, indexMaintainers);
 }
-if (isDescRowKeyOrderUpgrade || isDelete || isUpsert || (deleteCQ != 
null && deleteCF != null) || emptyCF != null || buildLocalIndex) {
+if (isDescRowKeyOrderUpgrade || isDelete ||
+(isUpsert && (targetHTable == null ||
+
targetHTable.getName().equals(region.getTableDescriptor().getTableName(
+|| (deleteCQ != null && deleteCF != null) || emptyCF != null 
|| buildLocalIndex) {
 needToWrite = true;
 maxBatchSize = conf.getInt(MUTATE_BATCH_SIZE_ATTRIB, 
QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
 mutations = new MutationList(Ints.saturatedCast(maxBatchSize + 
maxBatchSize / 10));



[phoenix] branch master updated: PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert select happening for differet table.(Rajeshbabu)

2020-02-18 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new c80059f  PHOENIX-5731 Loading bulkload hfiles should not be blocked if 
the upsert select happening for differet table.(Rajeshbabu)
c80059f is described below

commit c80059fa883288bcf774daef73329b8e0b2ba760
Author: Rajeshbabu Chintaguntla 
AuthorDate: Wed Feb 19 09:07:50 2020 +0530

PHOENIX-5731 Loading bulkload hfiles should not be blocked if the upsert 
select happening for differet table.(Rajeshbabu)
---
 .../apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index b38e144..d96956c 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -565,7 +565,10 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 if(buildLocalIndex) {
 checkForLocalIndexColumnFamilies(region, indexMaintainers);
 }
-if (isDescRowKeyOrderUpgrade || isDelete || isUpsert || (deleteCQ != 
null && deleteCF != null) || emptyCF != null || buildLocalIndex) {
+if (isDescRowKeyOrderUpgrade || isDelete ||
+(isUpsert && (targetHTable == null ||
+
targetHTable.getName().equals(region.getTableDescriptor().getTableName(
+|| (deleteCQ != null && deleteCF != null) || emptyCF != null 
|| buildLocalIndex) {
 needToWrite = true;
 maxBatchSize = conf.getInt(MUTATE_BATCH_SIZE_ATTRIB, 
QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
 mutations = new MutationList(Ints.saturatedCast(maxBatchSize + 
maxBatchSize / 10));



Build failed in Jenkins: Phoenix | Master | HBase Profile » 2.0 #8

2020-02-18 Thread Apache Jenkins Server
See 


Changes:

[s.kadam] PHOENIX-5723 PhoenixIndexImportDirectReducer cleanup method updates


--
[...truncated 528.40 KB...]
at 
org.apache.hadoop.hbase.regionserver.HRegion$BatchOperation.visitBatchOperations(HRegion.java:3068)
at 
org.apache.hadoop.hbase.regionserver.HRegion$MutationBatchOperation.checkAndPrepare(HRegion.java:3450)
at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:3887)
at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:3821)
at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:3812)
at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:3826)
at 
org.apache.hadoop.hbase.regionserver.HRegion.doBatchMutate(HRegion.java:4153)
at 
org.apache.hadoop.hbase.regionserver.HRegion.delete(HRegion.java:2907)
at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2840)
at 
org.apache.hadoop.hbase.client.ClientServiceCallable.doMutate(ClientServiceCallable.java:55)
at org.apache.hadoop.hbase.client.HTable$2.rpcCall(HTable.java:498)
at org.apache.hadoop.hbase.client.HTable$2.rpcCall(HTable.java:493)
at 
org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:127)
at 
org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:107)
at org.apache.hadoop.hbase.client.HTable.delete(HTable.java:503)
at 
org.apache.hadoop.hbase.security.access.AccessControlLists.removePermissionRecord(AccessControlLists.java:262)
at 
org.apache.hadoop.hbase.security.access.AccessControlLists.removeUserPermission(AccessControlLists.java:246)
at 
org.apache.hadoop.hbase.security.access.AccessController$8.run(AccessController.java:2123)
at 
org.apache.hadoop.hbase.security.access.AccessController$8.run(AccessController.java:2117)
at java.base/java.security.AccessController.doPrivileged(Native Method)
at java.base/javax.security.auth.Subject.doAs(Subject.java:423)
at 
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1962)
at 
org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:514)
at 
org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:495)
at jdk.internal.reflect.GeneratedMethodAccessor121.invoke(Unknown 
Source)
at 
java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.base/java.lang.reflect.Method.invoke(Method.java:566)
at org.apache.hadoop.hbase.util.Methods.call(Methods.java:40)
at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:183)
... 11 more

Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: 
java.io.IOException: org.apache.hadoop.hbase.security.AccessDeniedException: 
Insufficient permissions (user=regularUser1_N78, scope=hbase:acl, 
family=l:regularUser2_N79, 
params=[table=hbase:acl,family=l:regularUser2_N79],action=WRITE)
at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:185)
at 
org.apache.hadoop.hbase.security.access.AccessController.revoke(AccessController.java:2117)
at 
org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos$AccessControlService$1.revoke(AccessControlProtos.java:10031)
at 
org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos$AccessControlService.callMethod(AccessControlProtos.java:10192)
at 
org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8106)
at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2409)
at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2391)
at 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:42010)
at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:413)
at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:130)
at 
org.apache.hadoop.hbase.ipc.RpcExecutor$Handler.run(RpcExecutor.java:324)
at 
org.apache.hadoop.hbase.ipc.RpcExecutor$Handler.run(RpcExecutor.java:304)
Caused by: org.apache.hadoop.hbase.security.AccessDeniedException: Insufficient 
permissions (user=regularUser1_N78, scope=hbase:acl, 
family=l:regularUser2_N79, 
params=[table=hbase:acl,family=l:regularUser2_N79],action=WRITE)
at 
org.apache.hadoop.hbase.security.access.AccessController.preDelete(AccessController.java:1551)
at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$26.call(RegionCoprocessorHost.java:980)
at 
org.apache.hadoop.hbase.regi

Build failed in Jenkins: Phoenix | Master | HBase Profile » 2.1 #8

2020-02-18 Thread Apache Jenkins Server
See 


Changes:

[s.kadam] PHOENIX-5723 PhoenixIndexImportDirectReducer cleanup method updates


--
[...truncated 513.13 KB...]
[WARNING] See http://maven.apache.org/plugins/maven-shade-plugin/
[INFO] Replacing 

 with 

[INFO] 
[INFO] --- maven-install-plugin:2.5.2:install-file (default-install) @ 
phoenix-server ---
[INFO] Installing 

 to 
/home/jenkins/.m2/repository/org/apache/phoenix/phoenix-server/5.1.0-SNAPSHOT/phoenix-server-5.1.0-SNAPSHOT.jar
[INFO] Installing 

 to 
/home/jenkins/.m2/repository/org/apache/phoenix/phoenix-server/5.1.0-SNAPSHOT/phoenix-server-5.1.0-SNAPSHOT.pom
[INFO] 
[INFO] --- maven-install-plugin:2.5.2:install (default-install) @ 
phoenix-server ---
[INFO] Skipping artifact installation
[INFO] 
[INFO] < org.apache.phoenix:phoenix-assembly >-
[INFO] Building Phoenix Assembly 5.1.0-SNAPSHOT  [9/10]
[INFO] [ pom ]-
[INFO] 
[INFO] --- maven-clean-plugin:3.1.0:clean (default-clean) @ phoenix-assembly ---
[INFO] 
[INFO] --- maven-enforcer-plugin:3.0.0-M3:enforce (enforce-maven-version) @ 
phoenix-assembly ---
[INFO] 
[INFO] --- maven-enforcer-plugin:3.0.0-M3:enforce (check-hbase-compatibility) @ 
phoenix-assembly ---
[INFO] 
[INFO] --- maven-checkstyle-plugin:3.1.0:check (validate) @ phoenix-assembly ---
[INFO] 
[INFO] --- maven-remote-resources-plugin:1.5:process (process-resource-bundles) 
@ phoenix-assembly ---
[INFO] artifact net.minidev:json-smart: checking for updates from apache release
[INFO] artifact net.minidev:json-smart: checking for updates from central
[INFO] artifact net.minidev:json-smart: checking for updates from 
dynamodb-local-oregon
[INFO] artifact net.minidev:json-smart: checking for updates from 
apache.snapshots.https
[INFO] artifact net.minidev:json-smart: checking for updates from 
repository.jboss.org
[INFO] artifact net.minidev:json-smart: checking for updates from apache release
[INFO] artifact net.minidev:json-smart: checking for updates from central
[INFO] artifact net.minidev:json-smart: checking for updates from 
dynamodb-local-oregon
[INFO] artifact net.minidev:json-smart: checking for updates from 
apache.snapshots.https
[INFO] artifact net.minidev:json-smart: checking for updates from 
repository.jboss.org
[INFO] artifact org.glassfish:javax.el: checking for updates from apache release
[INFO] artifact org.glassfish:javax.el: checking for updates from central
[INFO] artifact org.glassfish:javax.el: checking for updates from 
java.net.Releases
[INFO] artifact net.minidev:json-smart: checking for updates from apache release
[INFO] artifact net.minidev:json-smart: checking for updates from central
[INFO] artifact net.minidev:json-smart: checking for updates from 
dynamodb-local-oregon
[INFO] artifact net.minidev:json-smart: checking for updates from 
apache.snapshots.https
[INFO] artifact net.minidev:json-smart: checking for updates from 
repository.jboss.org
[INFO] artifact org.glassfish:javax.el: checking for updates from apache release
[INFO] artifact org.glassfish:javax.el: checking for updates from central
[INFO] artifact org.glassfish:javax.el: checking for updates from 
java.net.Releases
[INFO] 
[INFO] --- exec-maven-plugin:1.6.0:exec (Symlink to deprecated client jar name) 
@ phoenix-assembly ---
'phoenix-5.1.0-SNAPSHOT-hbase-2.1-client.jar' -> 
'phoenix-client-5.1.0-SNAPSHOT-hbase-2.1.jar'
[INFO] 
[INFO] --- maven-source-plugin:3.0.1:jar-no-fork (attach-sources) @ 
phoenix-assembly ---
[INFO] 
[INFO] --- maven-jar-plugin:3.1.0:test-jar (default) @ phoenix-assembly ---
[INFO] Skipping packaging of the test-jar
[INFO] 
[INFO] --- maven-site-plugin:3.7.1:attach-descriptor (attach-descriptor) @ 
phoenix-assembly ---
[INFO] No site descriptor found: nothing to attach.
[INFO] 
[INFO] --- maven-assembly-plugin:3.0.0:single (package-to-tar) @ 
phoenix-assembly ---
[INFO] Reading assembly descriptor: src/build/package-to-tar-all.xml
[WARNING] The assembly descriptor contains a filesystem-root relative 
reference, which is not cross platform compatible /
[WARNING] The assembly descriptor contains a filesystem-root relative 
reference, which is not cross platform compatible /
[WARNING] The assembly descriptor contains a filesystem-root relative 
reference, which is not cross platform compatible /
[INF

Jenkins build is back to normal : Phoenix | Master #2655

2020-02-18 Thread Apache Jenkins Server
See 




Apache Phoenix - Timeout crawler - Build https://builds.apache.org/job/Phoenix-master/2655/

2020-02-18 Thread Apache Jenkins Server
[...truncated 51 lines...]

Build failed in Jenkins: Phoenix-4.x-HBase-1.3 #687

2020-02-18 Thread Apache Jenkins Server
See 


Changes:

[s.kadam] PHOENIX-5723 PhoenixIndexImportDirectReducer cleanup method updates


--
[...truncated 105.70 KB...]
[INFO] 
[INFO] --- maven-failsafe-plugin:2.22.0:integration-test 
(HBaseManagedTimeTests) @ phoenix-core ---
[INFO] 
[INFO] ---
[INFO]  T E S T S
[INFO] ---
[INFO] 
[INFO] Results:
[INFO] 
[INFO] Tests run: 0, Failures: 0, Errors: 0, Skipped: 0
[INFO] 
[INFO] 
[INFO] --- maven-failsafe-plugin:2.22.0:integration-test 
(NeedTheirOwnClusterTests) @ phoenix-core ---
[INFO] 
[INFO] ---
[INFO]  T E S T S
[INFO] ---
[INFO] Running 
org.apache.hadoop.hbase.regionserver.wal.WALReplayWithIndexWritesAndCompressedWALIT
[WARNING] Tests run: 1, Failures: 0, Errors: 0, Skipped: 1, Time elapsed: 0.004 
s - in 
org.apache.hadoop.hbase.regionserver.wal.WALReplayWithIndexWritesAndCompressedWALIT
[INFO] Running org.apache.phoenix.end2end.ConnectionUtilIT
[INFO] Running 
org.apache.hadoop.hbase.regionserver.wal.WALRecoveryRegionPostOpenIT
[INFO] Running org.apache.phoenix.end2end.ConcurrentMutationsExtendedIT
[INFO] Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 11.669 s 
- in org.apache.hadoop.hbase.regionserver.wal.WALRecoveryRegionPostOpenIT
[INFO] Running 
org.apache.phoenix.end2end.ColumnEncodedMutableNonTxStatsCollectorIT
[INFO] Running 
org.apache.phoenix.end2end.ColumnEncodedImmutableNonTxStatsCollectorIT
[INFO] Running 
org.apache.phoenix.end2end.ColumnEncodedImmutableTxStatsCollectorIT
[INFO] Running org.apache.phoenix.end2end.ColumnEncodedMutableTxStatsCollectorIT
[INFO] Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 54.794 s 
- in org.apache.phoenix.end2end.ConnectionUtilIT
[INFO] Running org.apache.phoenix.end2end.ContextClassloaderIT
[INFO] Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.725 s 
- in org.apache.phoenix.end2end.ContextClassloaderIT
[INFO] Running org.apache.phoenix.end2end.CostBasedDecisionIT
[INFO] Running org.apache.phoenix.end2end.CountDistinctCompressionIT
[INFO] Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.807 s 
- in org.apache.phoenix.end2end.CountDistinctCompressionIT
[WARNING] Tests run: 28, Failures: 0, Errors: 0, Skipped: 4, Time elapsed: 
184.515 s - in 
org.apache.phoenix.end2end.ColumnEncodedMutableNonTxStatsCollectorIT
[WARNING] Tests run: 28, Failures: 0, Errors: 0, Skipped: 4, Time elapsed: 
187.651 s - in 
org.apache.phoenix.end2end.ColumnEncodedImmutableNonTxStatsCollectorIT
[WARNING] Tests run: 28, Failures: 0, Errors: 0, Skipped: 4, Time elapsed: 
186.609 s - in org.apache.phoenix.end2end.ColumnEncodedMutableTxStatsCollectorIT
[WARNING] Tests run: 28, Failures: 0, Errors: 0, Skipped: 4, Time elapsed: 
188.026 s - in 
org.apache.phoenix.end2end.ColumnEncodedImmutableTxStatsCollectorIT
[INFO] Running org.apache.phoenix.end2end.CsvBulkLoadToolIT
[INFO] Running org.apache.phoenix.end2end.DropSchemaIT
[INFO] Running org.apache.phoenix.end2end.IndexBuildTimestampIT
[INFO] Running org.apache.phoenix.end2end.FlappingLocalIndexIT
[INFO] Running org.apache.phoenix.end2end.IndexExtendedIT
[INFO] Tests run: 10, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 292.055 
s - in org.apache.phoenix.end2end.ConcurrentMutationsExtendedIT
[INFO] Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 30.599 s 
- in org.apache.phoenix.end2end.DropSchemaIT
[INFO] Running org.apache.phoenix.end2end.IndexRebuildTaskIT
[INFO] Running org.apache.phoenix.end2end.IndexScrutinyToolForTenantIT
[INFO] Tests run: 16, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 158.142 
s - in org.apache.phoenix.end2end.CsvBulkLoadToolIT
[INFO] Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 31.675 s 
- in org.apache.phoenix.end2end.IndexRebuildTaskIT
[INFO] Running org.apache.phoenix.end2end.IndexScrutinyToolIT
[INFO] Tests run: 12, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 193.7 s 
- in org.apache.phoenix.end2end.FlappingLocalIndexIT
[INFO] Tests run: 16, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 195.515 
s - in org.apache.phoenix.end2end.IndexBuildTimestampIT
[INFO] Running org.apache.phoenix.end2end.IndexToolForPartialBuildIT
[INFO] Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 99.058 s 
- in org.apache.phoenix.end2end.IndexScrutinyToolForTenantIT
[INFO] Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 28.679 s 
- in org.apache.phoenix.end2end.IndexToolForPartialBuildIT
[INFO] Tests run: 64, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 268.984 
s - in org.apache.phoenix.end2end.IndexExtendedIT
[INFO] Running 
org.apache.phoenix.end2end.IndexToolForPartialBuildWithNamespaceEnabledIT
[INFO] Running org.apache.phoeni

Jenkins build is back to normal : Phoenix-4.x-HBase-1.5 #276

2020-02-18 Thread Apache Jenkins Server
See 




[phoenix] branch master updated (f592bfd -> 9cddb61)

2020-02-18 Thread skadam
This is an automated email from the ASF dual-hosted git repository.

skadam pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


from f592bfd  PHOENIX-5633: Add table name info to scan logging
 add 9cddb61  PHOENIX-5723 PhoenixIndexImportDirectReducer cleanup method 
updates index state to active

No new revisions were added by this update.

Summary of changes:
 .../apache/phoenix/end2end/IndexExtendedIT.java| 52 ++
 .../index/PhoenixIndexImportDirectReducer.java | 11 +++--
 2 files changed, 60 insertions(+), 3 deletions(-)



[phoenix] branch 4.x-HBase-1.4 updated: PHOENIX-5723 PhoenixIndexImportDirectReducer cleanup method updates index state to active

2020-02-18 Thread skadam
This is an automated email from the ASF dual-hosted git repository.

skadam pushed a commit to branch 4.x-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.4 by this push:
 new f48df18  PHOENIX-5723 PhoenixIndexImportDirectReducer cleanup method 
updates index state to active
f48df18 is described below

commit f48df1817763fc271f6fbd072e50c5bdcc72c06a
Author: Tanuj Khurana 
AuthorDate: Wed Feb 12 13:34:35 2020 -0800

PHOENIX-5723 PhoenixIndexImportDirectReducer cleanup method updates index 
state to active

Signed-off-by: s.kadam 
---
 .../apache/phoenix/end2end/IndexExtendedIT.java| 52 ++
 .../index/PhoenixIndexImportDirectReducer.java | 11 +++--
 2 files changed, 60 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java
index fd830c2..bf78cef 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java
@@ -36,6 +36,7 @@ import java.util.Properties;
 
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.hbase.index.IndexRegionObserver;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.mapreduce.index.IndexTool;
 import org.apache.phoenix.query.BaseTest;
@@ -314,4 +315,55 @@ public class IndexExtendedIT extends BaseTest {
 conn.close();
 }
 }
+
+@Test
+public void testIndexStateOnException() throws Exception {
+if (localIndex  || useSnapshot || useViewIndex) {
+return;
+}
+
+String schemaName = generateUniqueName();
+String dataTableName = generateUniqueName();
+String dataTableFullName = SchemaUtil.getTableName(schemaName, 
dataTableName);
+String indexTableName = generateUniqueName();
+String indexFullName = SchemaUtil.getTableName(schemaName, 
indexTableName);
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+try (Connection conn = DriverManager.getConnection(getUrl(), props)){
+Statement stmt = conn.createStatement();
+stmt.execute(String.format(
+"CREATE TABLE %s (ID INTEGER NOT NULL PRIMARY KEY, NAME 
VARCHAR, ZIP INTEGER) %s",
+dataTableFullName, tableDDLOptions));
+
+stmt.execute(String.format(
+"UPSERT INTO %s VALUES(1, 'Phoenix', 12345)", 
dataTableFullName));
+
+conn.commit();
+
+// Configure IndexRegionObserver to fail the first write phase. 
This should not
+// lead to any change on index and thus index verify during index 
rebuild should fail
+IndexRegionObserver.setIgnoreIndexRebuildForTesting(true);
+stmt.execute(String.format(
+"CREATE INDEX %s ON %s (NAME) INCLUDE (ZIP) ASYNC",
+indexTableName, dataTableFullName));
+
+// Verify that the index table is not in the ACTIVE state
+assertFalse(checkIndexState(conn, indexFullName, 
PIndexState.ACTIVE, 0L));
+
+// Run the index MR job and verify that the index table rebuild 
fails
+IndexToolIT.runIndexTool(true, false, schemaName, dataTableName,
+indexTableName, null, -1, IndexTool.IndexVerifyType.AFTER);
+
+IndexRegionObserver.setIgnoreIndexRebuildForTesting(false);
+
+// job failed, verify that the index table is still not in the 
ACTIVE state
+assertFalse(checkIndexState(conn, indexFullName, 
PIndexState.ACTIVE, 0L));
+
+// Run the index MR job and verify that the index table rebuild 
succeeds
+IndexToolIT.runIndexTool(true, false, schemaName, dataTableName,
+indexTableName, null, 0, IndexTool.IndexVerifyType.AFTER);
+
+// job passed, verify that the index table is in the ACTIVE state
+assertTrue(checkIndexState(conn, indexFullName, 
PIndexState.ACTIVE, 0L));
+}
+}
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
index 46fb9ec..98000f7 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
@@ -111,16 +111,21 @@ public class PhoenixIndexImportDirectReducer extends
 if (verifyType != IndexTool.IndexVerifyType.NONE) {
 updateCounters(verifyType, context);
 }
+
+try {
+IndexToolUtil.updateIndexState(context.getConfigura

[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5723 PhoenixIndexImportDirectReducer cleanup method updates index state to active

2020-02-18 Thread skadam
This is an automated email from the ASF dual-hosted git repository.

skadam pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
 new 9e8f14d  PHOENIX-5723 PhoenixIndexImportDirectReducer cleanup method 
updates index state to active
9e8f14d is described below

commit 9e8f14da1157fad32679691c02e1d8164919842c
Author: Tanuj Khurana 
AuthorDate: Wed Feb 12 13:34:35 2020 -0800

PHOENIX-5723 PhoenixIndexImportDirectReducer cleanup method updates index 
state to active

Signed-off-by: s.kadam 
---
 .../apache/phoenix/end2end/IndexExtendedIT.java| 52 ++
 .../index/PhoenixIndexImportDirectReducer.java | 11 +++--
 2 files changed, 60 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java
index fd830c2..bf78cef 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java
@@ -36,6 +36,7 @@ import java.util.Properties;
 
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.hbase.index.IndexRegionObserver;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.mapreduce.index.IndexTool;
 import org.apache.phoenix.query.BaseTest;
@@ -314,4 +315,55 @@ public class IndexExtendedIT extends BaseTest {
 conn.close();
 }
 }
+
+@Test
+public void testIndexStateOnException() throws Exception {
+if (localIndex  || useSnapshot || useViewIndex) {
+return;
+}
+
+String schemaName = generateUniqueName();
+String dataTableName = generateUniqueName();
+String dataTableFullName = SchemaUtil.getTableName(schemaName, 
dataTableName);
+String indexTableName = generateUniqueName();
+String indexFullName = SchemaUtil.getTableName(schemaName, 
indexTableName);
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+try (Connection conn = DriverManager.getConnection(getUrl(), props)){
+Statement stmt = conn.createStatement();
+stmt.execute(String.format(
+"CREATE TABLE %s (ID INTEGER NOT NULL PRIMARY KEY, NAME 
VARCHAR, ZIP INTEGER) %s",
+dataTableFullName, tableDDLOptions));
+
+stmt.execute(String.format(
+"UPSERT INTO %s VALUES(1, 'Phoenix', 12345)", 
dataTableFullName));
+
+conn.commit();
+
+// Configure IndexRegionObserver to fail the first write phase. 
This should not
+// lead to any change on index and thus index verify during index 
rebuild should fail
+IndexRegionObserver.setIgnoreIndexRebuildForTesting(true);
+stmt.execute(String.format(
+"CREATE INDEX %s ON %s (NAME) INCLUDE (ZIP) ASYNC",
+indexTableName, dataTableFullName));
+
+// Verify that the index table is not in the ACTIVE state
+assertFalse(checkIndexState(conn, indexFullName, 
PIndexState.ACTIVE, 0L));
+
+// Run the index MR job and verify that the index table rebuild 
fails
+IndexToolIT.runIndexTool(true, false, schemaName, dataTableName,
+indexTableName, null, -1, IndexTool.IndexVerifyType.AFTER);
+
+IndexRegionObserver.setIgnoreIndexRebuildForTesting(false);
+
+// job failed, verify that the index table is still not in the 
ACTIVE state
+assertFalse(checkIndexState(conn, indexFullName, 
PIndexState.ACTIVE, 0L));
+
+// Run the index MR job and verify that the index table rebuild 
succeeds
+IndexToolIT.runIndexTool(true, false, schemaName, dataTableName,
+indexTableName, null, 0, IndexTool.IndexVerifyType.AFTER);
+
+// job passed, verify that the index table is in the ACTIVE state
+assertTrue(checkIndexState(conn, indexFullName, 
PIndexState.ACTIVE, 0L));
+}
+}
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
index 46fb9ec..98000f7 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
@@ -111,16 +111,21 @@ public class PhoenixIndexImportDirectReducer extends
 if (verifyType != IndexTool.IndexVerifyType.NONE) {
 updateCounters(verifyType, context);
 }
+
+try {
+IndexToolUtil.updateIndexState(context.getConfigura

[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5723 PhoenixIndexImportDirectReducer cleanup method updates index state to active

2020-02-18 Thread skadam
This is an automated email from the ASF dual-hosted git repository.

skadam pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
 new 1622ef9  PHOENIX-5723 PhoenixIndexImportDirectReducer cleanup method 
updates index state to active
1622ef9 is described below

commit 1622ef974ccd7d4ecd5d73058a6ff91a18b900c3
Author: Tanuj Khurana 
AuthorDate: Wed Feb 12 13:34:35 2020 -0800

PHOENIX-5723 PhoenixIndexImportDirectReducer cleanup method updates index 
state to active

Signed-off-by: s.kadam 
---
 .../apache/phoenix/end2end/IndexExtendedIT.java| 52 ++
 .../index/PhoenixIndexImportDirectReducer.java | 11 +++--
 2 files changed, 60 insertions(+), 3 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java
index fd830c2..bf78cef 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java
@@ -36,6 +36,7 @@ import java.util.Properties;
 
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.hbase.index.IndexRegionObserver;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.mapreduce.index.IndexTool;
 import org.apache.phoenix.query.BaseTest;
@@ -314,4 +315,55 @@ public class IndexExtendedIT extends BaseTest {
 conn.close();
 }
 }
+
+@Test
+public void testIndexStateOnException() throws Exception {
+if (localIndex  || useSnapshot || useViewIndex) {
+return;
+}
+
+String schemaName = generateUniqueName();
+String dataTableName = generateUniqueName();
+String dataTableFullName = SchemaUtil.getTableName(schemaName, 
dataTableName);
+String indexTableName = generateUniqueName();
+String indexFullName = SchemaUtil.getTableName(schemaName, 
indexTableName);
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+try (Connection conn = DriverManager.getConnection(getUrl(), props)){
+Statement stmt = conn.createStatement();
+stmt.execute(String.format(
+"CREATE TABLE %s (ID INTEGER NOT NULL PRIMARY KEY, NAME 
VARCHAR, ZIP INTEGER) %s",
+dataTableFullName, tableDDLOptions));
+
+stmt.execute(String.format(
+"UPSERT INTO %s VALUES(1, 'Phoenix', 12345)", 
dataTableFullName));
+
+conn.commit();
+
+// Configure IndexRegionObserver to fail the first write phase. 
This should not
+// lead to any change on index and thus index verify during index 
rebuild should fail
+IndexRegionObserver.setIgnoreIndexRebuildForTesting(true);
+stmt.execute(String.format(
+"CREATE INDEX %s ON %s (NAME) INCLUDE (ZIP) ASYNC",
+indexTableName, dataTableFullName));
+
+// Verify that the index table is not in the ACTIVE state
+assertFalse(checkIndexState(conn, indexFullName, 
PIndexState.ACTIVE, 0L));
+
+// Run the index MR job and verify that the index table rebuild 
fails
+IndexToolIT.runIndexTool(true, false, schemaName, dataTableName,
+indexTableName, null, -1, IndexTool.IndexVerifyType.AFTER);
+
+IndexRegionObserver.setIgnoreIndexRebuildForTesting(false);
+
+// job failed, verify that the index table is still not in the 
ACTIVE state
+assertFalse(checkIndexState(conn, indexFullName, 
PIndexState.ACTIVE, 0L));
+
+// Run the index MR job and verify that the index table rebuild 
succeeds
+IndexToolIT.runIndexTool(true, false, schemaName, dataTableName,
+indexTableName, null, 0, IndexTool.IndexVerifyType.AFTER);
+
+// job passed, verify that the index table is in the ACTIVE state
+assertTrue(checkIndexState(conn, indexFullName, 
PIndexState.ACTIVE, 0L));
+}
+}
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
index 46fb9ec..98000f7 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
@@ -111,16 +111,21 @@ public class PhoenixIndexImportDirectReducer extends
 if (verifyType != IndexTool.IndexVerifyType.NONE) {
 updateCounters(verifyType, context);
 }
+
+try {
+IndexToolUtil.updateIndexState(context.getConfigura