See <https://builds.apache.org/job/Phoenix-master/1403/changes>

Changes:

[jamestaylor] PHOENIX-3280 Automatic attempt to rebuild all disabled index

------------------------------------------
[...truncated 1080 lines...]
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:33656)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2170)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:109)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)
: 2 times, 
        at 
org.apache.hadoop.hbase.client.AsyncProcess$BatchErrors.makeException(AsyncProcess.java:247)
        at 
org.apache.hadoop.hbase.client.AsyncProcess$BatchErrors.access$1800(AsyncProcess.java:227)
        at 
org.apache.hadoop.hbase.client.AsyncProcess$AsyncRequestFutureImpl.getErrors(AsyncProcess.java:1663)
        at org.apache.hadoop.hbase.client.HTable.batch(HTable.java:917)
        at org.apache.hadoop.hbase.client.HTable.batch(HTable.java:931)
        at 
org.apache.hadoop.hbase.client.HTableWrapper.batch(HTableWrapper.java:255)
        at 
org.apache.phoenix.hbase.index.write.ParallelWriterIndexCommitter$1.call(ParallelWriterIndexCommitter.java:173)
        at 
org.apache.phoenix.hbase.index.write.ParallelWriterIndexCommitter$1.call(ParallelWriterIndexCommitter.java:136)
        at java.util.concurrent.FutureTask.run(FutureTask.java:262)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        ... 1 more
: 1 time, 
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT.helpTestWriteFailureDisablesIndex(MutableIndexFailureIT.java:225)
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT.testWriteFailureDisablesIndex(MutableIndexFailureIT.java:127)

testWriteFailureDisablesIndex[transactional = false, localIndex = false, 
isNamespaceMapped = 
true](org.apache.phoenix.end2end.index.MutableIndexFailureIT)  Time elapsed: 
19.079 sec  <<< ERROR!
org.apache.phoenix.exception.PhoenixIOException: 
org.apache.phoenix.exception.PhoenixIOException: 
org.apache.hadoop.hbase.DoNotRetryIOException
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT$FailingRegionObserver.preBatchMutate(MutableIndexFailureIT.java:399)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$35.call(RegionCoprocessorHost.java:1007)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$RegionOperation.call(RegionCoprocessorHost.java:1673)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1749)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1705)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.preBatchMutate(RegionCoprocessorHost.java:1003)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.doMiniBatchMutation(HRegion.java:3080)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2867)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2809)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.commitBatch(UngroupedAggregateRegionObserver.java:205)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.doPostScannerOpen(UngroupedAggregateRegionObserver.java:623)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.overrideDelegate(BaseScannerRegionObserver.java:214)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.nextRaw(BaseScannerRegionObserver.java:259)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.scan(RSRpcServices.java:2555)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:33648)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2170)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:109)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)

Caused by: java.util.concurrent.ExecutionException: 
org.apache.phoenix.exception.PhoenixIOException: 
org.apache.hadoop.hbase.DoNotRetryIOException
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT$FailingRegionObserver.preBatchMutate(MutableIndexFailureIT.java:399)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$35.call(RegionCoprocessorHost.java:1007)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$RegionOperation.call(RegionCoprocessorHost.java:1673)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1749)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1705)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.preBatchMutate(RegionCoprocessorHost.java:1003)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.doMiniBatchMutation(HRegion.java:3080)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2867)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2809)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.commitBatch(UngroupedAggregateRegionObserver.java:205)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.doPostScannerOpen(UngroupedAggregateRegionObserver.java:623)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.overrideDelegate(BaseScannerRegionObserver.java:214)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.nextRaw(BaseScannerRegionObserver.java:259)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.scan(RSRpcServices.java:2555)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:33648)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2170)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:109)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)

Caused by: org.apache.phoenix.exception.PhoenixIOException: 
org.apache.hadoop.hbase.DoNotRetryIOException
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT$FailingRegionObserver.preBatchMutate(MutableIndexFailureIT.java:399)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$35.call(RegionCoprocessorHost.java:1007)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$RegionOperation.call(RegionCoprocessorHost.java:1673)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1749)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1705)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.preBatchMutate(RegionCoprocessorHost.java:1003)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.doMiniBatchMutation(HRegion.java:3080)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2867)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2809)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.commitBatch(UngroupedAggregateRegionObserver.java:205)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.doPostScannerOpen(UngroupedAggregateRegionObserver.java:623)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.overrideDelegate(BaseScannerRegionObserver.java:214)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.nextRaw(BaseScannerRegionObserver.java:259)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.scan(RSRpcServices.java:2555)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:33648)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2170)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:109)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)

Caused by: org.apache.phoenix.exception.PhoenixIOException: 
org.apache.hadoop.hbase.DoNotRetryIOException
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT$FailingRegionObserver.preBatchMutate(MutableIndexFailureIT.java:399)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$35.call(RegionCoprocessorHost.java:1007)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$RegionOperation.call(RegionCoprocessorHost.java:1673)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1749)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1705)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.preBatchMutate(RegionCoprocessorHost.java:1003)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.doMiniBatchMutation(HRegion.java:3080)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2867)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2809)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.commitBatch(UngroupedAggregateRegionObserver.java:205)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.doPostScannerOpen(UngroupedAggregateRegionObserver.java:623)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.overrideDelegate(BaseScannerRegionObserver.java:214)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.nextRaw(BaseScannerRegionObserver.java:259)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.scan(RSRpcServices.java:2555)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:33648)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2170)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:109)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)

Caused by: org.apache.hadoop.hbase.DoNotRetryIOException: 
org.apache.hadoop.hbase.DoNotRetryIOException
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT$FailingRegionObserver.preBatchMutate(MutableIndexFailureIT.java:399)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$35.call(RegionCoprocessorHost.java:1007)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$RegionOperation.call(RegionCoprocessorHost.java:1673)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1749)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1705)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.preBatchMutate(RegionCoprocessorHost.java:1003)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.doMiniBatchMutation(HRegion.java:3080)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2867)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2809)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.commitBatch(UngroupedAggregateRegionObserver.java:205)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.doPostScannerOpen(UngroupedAggregateRegionObserver.java:623)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.overrideDelegate(BaseScannerRegionObserver.java:214)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.nextRaw(BaseScannerRegionObserver.java:259)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.scan(RSRpcServices.java:2555)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:33648)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2170)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:109)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)

Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: 
org.apache.hadoop.hbase.DoNotRetryIOException
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT$FailingRegionObserver.preBatchMutate(MutableIndexFailureIT.java:399)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$35.call(RegionCoprocessorHost.java:1007)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$RegionOperation.call(RegionCoprocessorHost.java:1673)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1749)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1705)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.preBatchMutate(RegionCoprocessorHost.java:1003)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.doMiniBatchMutation(HRegion.java:3080)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2867)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2809)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.commitBatch(UngroupedAggregateRegionObserver.java:205)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.doPostScannerOpen(UngroupedAggregateRegionObserver.java:623)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.overrideDelegate(BaseScannerRegionObserver.java:214)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.nextRaw(BaseScannerRegionObserver.java:259)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.scan(RSRpcServices.java:2555)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:33648)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2170)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:109)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)


testWriteFailureDisablesIndex[transactional = false, localIndex = false, 
isNamespaceMapped = 
false](org.apache.phoenix.end2end.index.MutableIndexFailureIT)  Time elapsed: 
16.761 sec  <<< ERROR!
org.apache.phoenix.schema.TableAlreadyExistsException: ERROR 1013 (42M04): 
Table already exists. tableName=TEST.IDX_2
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT.helpTestWriteFailureDisablesIndex(MutableIndexFailureIT.java:159)
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT.testWriteFailureDisablesIndex(MutableIndexFailureIT.java:127)

testWriteFailureDisablesIndex[transactional = true, localIndex = false, 
isNamespaceMapped = 
true](org.apache.phoenix.end2end.index.MutableIndexFailureIT)  Time elapsed: 
27.584 sec  <<< FAILURE!
java.lang.AssertionError
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT.helpTestWriteFailureDisablesIndex(MutableIndexFailureIT.java:164)
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT.testWriteFailureDisablesIndex(MutableIndexFailureIT.java:127)

Running org.apache.phoenix.execute.PartialCommitIT
Tests run: 12, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.357 sec - in 
org.apache.phoenix.execute.PartialCommitIT
Running 
org.apache.phoenix.hbase.index.covered.EndToEndCoveredColumnsIndexBuilderIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 27.143 sec - in 
org.apache.phoenix.hbase.index.covered.EndToEndCoveredColumnsIndexBuilderIT
Running 
org.apache.phoenix.hbase.index.covered.example.EndtoEndIndexingWithCompressionIT
Running org.apache.phoenix.hbase.index.covered.example.EndToEndCoveredIndexingIT
Running org.apache.phoenix.hbase.index.covered.example.FailWithoutRetriesIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 7.245 sec - in 
org.apache.phoenix.hbase.index.covered.example.FailWithoutRetriesIT
Tests run: 16, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1,079.018 sec 
- in org.apache.phoenix.end2end.IndexToolIT
Running org.apache.phoenix.iterate.RoundRobinResultIteratorWithStatsIT
Running org.apache.phoenix.iterate.ScannerLeaseRenewalIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 16.287 sec - in 
org.apache.phoenix.iterate.RoundRobinResultIteratorWithStatsIT
Running org.apache.phoenix.monitoring.PhoenixMetricsIT
Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 192.009 sec - 
in org.apache.phoenix.hbase.index.covered.example.EndToEndCoveredIndexingIT
Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 196.948 sec - 
in 
org.apache.phoenix.hbase.index.covered.example.EndtoEndIndexingWithCompressionIT
Tests run: 18, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 94.6 sec - in 
org.apache.phoenix.monitoring.PhoenixMetricsIT
Running org.apache.phoenix.rpc.PhoenixClientRpcIT
Running org.apache.phoenix.rpc.PhoenixServerRpcIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 175.906 sec - 
in org.apache.phoenix.iterate.ScannerLeaseRenewalIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 9.4 sec - in 
org.apache.phoenix.rpc.PhoenixClientRpcIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 17.1 sec - in 
org.apache.phoenix.rpc.PhoenixServerRpcIT
Tests run: 136, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1,155.616 sec 
- in org.apache.phoenix.end2end.index.IndexIT

Results :

Failed tests: 
  
MutableIndexFailureIT.testWriteFailureDisablesIndex:127->helpTestWriteFailureDisablesIndex:164
Tests in error: 
  
MutableIndexFailureIT.testWriteFailureDisablesIndex:127->helpTestWriteFailureDisablesIndex:159
 » TableAlreadyExists
org.apache.phoenix.end2end.index.MutableIndexFailureIT.testWriteFailureDisablesIndex[transactional
 = false, localIndex = false, isNamespaceMapped = 
true](org.apache.phoenix.end2end.index.MutableIndexFailureIT)
  Run 1: 
MutableIndexFailureIT.testWriteFailureDisablesIndex:127->helpTestWriteFailureDisablesIndex:225
 » Commit
  Run 2: 
MutableIndexFailureIT>BaseOwnClusterHBaseManagedTimeIT.cleanUpAfterTest:27->BaseTest.deletePriorMetaData:930->BaseTest.deletePriorTables:938->BaseTest.deletePriorTables:949->BaseTest.deletePriorTables:994
 » PhoenixIO


Tests run: 480, Failures: 1, Errors: 2, Skipped: 0

[INFO] 
[INFO] --- maven-failsafe-plugin:2.19.1:verify (ClientManagedTimeTests) @ 
phoenix-core ---
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Phoenix ..................................... SUCCESS [  3.341 s]
[INFO] Phoenix Core ....................................... FAILURE [  01:33 h]
[INFO] Phoenix - Flume .................................... SKIPPED
[INFO] Phoenix - Pig ...................................... SKIPPED
[INFO] Phoenix Query Server Client ........................ SKIPPED
[INFO] Phoenix Query Server ............................... SKIPPED
[INFO] Phoenix - Pherf .................................... SKIPPED
[INFO] Phoenix - Spark .................................... SKIPPED
[INFO] Phoenix - Hive ..................................... SKIPPED
[INFO] Phoenix Client ..................................... SKIPPED
[INFO] Phoenix Server ..................................... SKIPPED
[INFO] Phoenix Assembly ................................... SKIPPED
[INFO] Phoenix - Tracing Web Application .................. SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 01:33 h
[INFO] Finished at: 2016-09-15T10:32:36+00:00
[INFO] Final Memory: 60M/1081M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal 
org.apache.maven.plugins:maven-failsafe-plugin:2.19.1:verify 
(ClientManagedTimeTests) on project phoenix-core: There was a timeout or other 
error in the fork -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e 
switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please 
read the following articles:
[ERROR] [Help 1] 
http://cwiki.apache.org/confluence/display/MAVEN/MojoExecutionException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :phoenix-core
Build step 'Invoke top-level Maven targets' marked build as failure
Archiving artifacts
Compressed 1.98 GB of artifacts by 61.9% relative to #1371
Updating PHOENIX-3280
Recording test results

Reply via email to