See <https://builds.apache.org/job/Phoenix-4.8-HBase-1.1/29/changes>

Changes:

[tdsilva] PHOENIX-3307 Backward compatibility fails for tables with index (4.7.0

------------------------------------------
[...truncated 1225 lines...]
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)
Caused by: org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException: 
Failed 2 actions: org.apache.hadoop.hbase.DoNotRetryIOException
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT$FailingRegionObserver.preBatchMutate(MutableIndexFailureIT.java:400)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$35.call(RegionCoprocessorHost.java:1024)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$RegionOperation.call(RegionCoprocessorHost.java:1708)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1783)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1740)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.preBatchMutate(RegionCoprocessorHost.java:1020)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.doMiniBatchMutation(HRegion.java:3078)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2853)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2795)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.doBatchOp(RSRpcServices.java:700)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.doNonAtomicRegionMutation(RSRpcServices.java:662)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.multi(RSRpcServices.java:2046)
        at 
org.apache.hadoop.hbase.client.MultiServerCallable.call(MultiServerCallable.java:129)
        at 
org.apache.hadoop.hbase.client.MultiServerCallable.call(MultiServerCallable.java:54)
        at 
org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithoutRetries(RpcRetryingCaller.java:200)
        at 
org.apache.hadoop.hbase.client.AsyncProcess$AsyncRequestFutureImpl$SingleServerRequestRunnable.run(AsyncProcess.java:708)
        at 
java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471)
        at java.util.concurrent.FutureTask.run(FutureTask.java:262)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        at java.lang.Thread.run(Thread.java:745)
: 2 times, 
        at 
org.apache.hadoop.hbase.client.AsyncProcess$BatchErrors.makeException(AsyncProcess.java:228)
        at 
org.apache.hadoop.hbase.client.AsyncProcess$BatchErrors.access$1700(AsyncProcess.java:208)
        at 
org.apache.hadoop.hbase.client.AsyncProcess$AsyncRequestFutureImpl.getErrors(AsyncProcess.java:1599)
        at org.apache.hadoop.hbase.client.HTable.batch(HTable.java:936)
        at org.apache.hadoop.hbase.client.HTable.batch(HTable.java:950)
        at 
org.apache.hadoop.hbase.client.HTableWrapper.batch(HTableWrapper.java:255)
        at 
org.apache.phoenix.hbase.index.write.ParallelWriterIndexCommitter$1.call(ParallelWriterIndexCommitter.java:171)
        at 
org.apache.phoenix.hbase.index.write.ParallelWriterIndexCommitter$1.call(ParallelWriterIndexCommitter.java:136)
        at java.util.concurrent.FutureTask.run(FutureTask.java:262)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        ... 1 more
: 1 time, 
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT.helpTestWriteFailureDisablesIndex(MutableIndexFailureIT.java:225)
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT.testWriteFailureDisablesIndex(MutableIndexFailureIT.java:127)

testWriteFailureDisablesIndex[transactional = false, localIndex = true, 
isNamespaceMapped = 
false](org.apache.phoenix.end2end.index.MutableIndexFailureIT)  Time elapsed: 
38.109 sec  <<< ERROR!
org.apache.phoenix.exception.PhoenixIOException: 
org.apache.phoenix.exception.PhoenixIOException: 
org.apache.hadoop.hbase.DoNotRetryIOException
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT$FailingRegionObserver.preBatchMutate(MutableIndexFailureIT.java:400)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$35.call(RegionCoprocessorHost.java:1024)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$RegionOperation.call(RegionCoprocessorHost.java:1708)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1783)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1740)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.preBatchMutate(RegionCoprocessorHost.java:1020)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.doMiniBatchMutation(HRegion.java:3078)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2853)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2795)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.commitBatch(UngroupedAggregateRegionObserver.java:205)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.doPostScannerOpen(UngroupedAggregateRegionObserver.java:623)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.overrideDelegate(BaseScannerRegionObserver.java:214)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.nextRaw(BaseScannerRegionObserver.java:259)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.scan(RSRpcServices.java:2420)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:32385)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2117)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)

Caused by: java.util.concurrent.ExecutionException: 
org.apache.phoenix.exception.PhoenixIOException: 
org.apache.hadoop.hbase.DoNotRetryIOException
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT$FailingRegionObserver.preBatchMutate(MutableIndexFailureIT.java:400)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$35.call(RegionCoprocessorHost.java:1024)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$RegionOperation.call(RegionCoprocessorHost.java:1708)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1783)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1740)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.preBatchMutate(RegionCoprocessorHost.java:1020)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.doMiniBatchMutation(HRegion.java:3078)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2853)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2795)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.commitBatch(UngroupedAggregateRegionObserver.java:205)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.doPostScannerOpen(UngroupedAggregateRegionObserver.java:623)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.overrideDelegate(BaseScannerRegionObserver.java:214)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.nextRaw(BaseScannerRegionObserver.java:259)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.scan(RSRpcServices.java:2420)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:32385)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2117)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)

Caused by: org.apache.phoenix.exception.PhoenixIOException: 
org.apache.hadoop.hbase.DoNotRetryIOException
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT$FailingRegionObserver.preBatchMutate(MutableIndexFailureIT.java:400)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$35.call(RegionCoprocessorHost.java:1024)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$RegionOperation.call(RegionCoprocessorHost.java:1708)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1783)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1740)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.preBatchMutate(RegionCoprocessorHost.java:1020)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.doMiniBatchMutation(HRegion.java:3078)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2853)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2795)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.commitBatch(UngroupedAggregateRegionObserver.java:205)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.doPostScannerOpen(UngroupedAggregateRegionObserver.java:623)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.overrideDelegate(BaseScannerRegionObserver.java:214)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.nextRaw(BaseScannerRegionObserver.java:259)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.scan(RSRpcServices.java:2420)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:32385)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2117)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)

Caused by: org.apache.phoenix.exception.PhoenixIOException: 
org.apache.hadoop.hbase.DoNotRetryIOException
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT$FailingRegionObserver.preBatchMutate(MutableIndexFailureIT.java:400)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$35.call(RegionCoprocessorHost.java:1024)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$RegionOperation.call(RegionCoprocessorHost.java:1708)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1783)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1740)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.preBatchMutate(RegionCoprocessorHost.java:1020)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.doMiniBatchMutation(HRegion.java:3078)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2853)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2795)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.commitBatch(UngroupedAggregateRegionObserver.java:205)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.doPostScannerOpen(UngroupedAggregateRegionObserver.java:623)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.overrideDelegate(BaseScannerRegionObserver.java:214)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.nextRaw(BaseScannerRegionObserver.java:259)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.scan(RSRpcServices.java:2420)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:32385)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2117)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)

Caused by: org.apache.hadoop.hbase.DoNotRetryIOException: 
org.apache.hadoop.hbase.DoNotRetryIOException
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT$FailingRegionObserver.preBatchMutate(MutableIndexFailureIT.java:400)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$35.call(RegionCoprocessorHost.java:1024)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$RegionOperation.call(RegionCoprocessorHost.java:1708)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1783)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1740)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.preBatchMutate(RegionCoprocessorHost.java:1020)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.doMiniBatchMutation(HRegion.java:3078)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2853)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2795)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.commitBatch(UngroupedAggregateRegionObserver.java:205)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.doPostScannerOpen(UngroupedAggregateRegionObserver.java:623)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.overrideDelegate(BaseScannerRegionObserver.java:214)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.nextRaw(BaseScannerRegionObserver.java:259)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.scan(RSRpcServices.java:2420)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:32385)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2117)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)

Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: 
org.apache.hadoop.hbase.DoNotRetryIOException
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT$FailingRegionObserver.preBatchMutate(MutableIndexFailureIT.java:400)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$35.call(RegionCoprocessorHost.java:1024)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$RegionOperation.call(RegionCoprocessorHost.java:1708)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1783)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1740)
        at 
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.preBatchMutate(RegionCoprocessorHost.java:1020)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.doMiniBatchMutation(HRegion.java:3078)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2853)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2795)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.commitBatch(UngroupedAggregateRegionObserver.java:205)
        at 
org.apache.phoenix.coprocessor.UngroupedAggregateRegionObserver.doPostScannerOpen(UngroupedAggregateRegionObserver.java:623)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.overrideDelegate(BaseScannerRegionObserver.java:214)
        at 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver$RegionScannerHolder.nextRaw(BaseScannerRegionObserver.java:259)
        at 
org.apache.hadoop.hbase.regionserver.RSRpcServices.scan(RSRpcServices.java:2420)
        at 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:32385)
        at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2117)
        at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:104)
        at 
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:133)
        at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:108)
        at java.lang.Thread.run(Thread.java:745)


testWriteFailureDisablesIndex[transactional = true, localIndex = false, 
isNamespaceMapped = 
true](org.apache.phoenix.end2end.index.MutableIndexFailureIT)  Time elapsed: 
39.444 sec  <<< ERROR!
org.apache.phoenix.schema.TableAlreadyExistsException: ERROR 1013 (42M04): 
Table already exists. tableName=TEST.IDX_2
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT.helpTestWriteFailureDisablesIndex(MutableIndexFailureIT.java:159)
        at 
org.apache.phoenix.end2end.index.MutableIndexFailureIT.testWriteFailureDisablesIndex(MutableIndexFailureIT.java:127)

Running org.apache.phoenix.monitoring.PhoenixMetricsIT
Running org.apache.phoenix.rpc.PhoenixClientRpcIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 22.422 sec - in 
org.apache.phoenix.rpc.PhoenixClientRpcIT
Running org.apache.phoenix.rpc.PhoenixServerRpcIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 184.767 sec - 
in org.apache.phoenix.iterate.ScannerLeaseRenewalIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 31.121 sec - in 
org.apache.phoenix.rpc.PhoenixServerRpcIT
Tests run: 18, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 195.23 sec - 
in org.apache.phoenix.monitoring.PhoenixMetricsIT
Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 389.692 sec - 
in org.apache.phoenix.hbase.index.covered.example.EndToEndCoveredIndexingIT
Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 391.232 sec - 
in 
org.apache.phoenix.hbase.index.covered.example.EndtoEndIndexingWithCompressionIT

Results :

Tests in error: 
  
RenewLeaseIT.doSetup:57->BaseTest.setUpTestDriver:630->BaseTest.initAndRegisterTestDriver:770
 » PhoenixIO
org.apache.phoenix.end2end.index.MutableIndexFailureIT.testWriteFailureDisablesIndex[transactional
 = false, localIndex = true, isNamespaceMapped = 
false](org.apache.phoenix.end2end.index.MutableIndexFailureIT)
  Run 1: 
MutableIndexFailureIT.testWriteFailureDisablesIndex:127->helpTestWriteFailureDisablesIndex:225
 » Commit
  Run 2: 
MutableIndexFailureIT>BaseOwnClusterHBaseManagedTimeIT.cleanUpAfterTest:27->BaseTest.deletePriorMetaData:932->BaseTest.deletePriorTables:940->BaseTest.deletePriorTables:951->BaseTest.deletePriorTables:996
 » PhoenixIO

  
MutableIndexFailureIT.testWriteFailureDisablesIndex:127->helpTestWriteFailureDisablesIndex:159
 » TableAlreadyExists
  
MutableIndexReplicationIT.setUpBeforeClass:108->setupConfigsAndStartCluster:171 
» IO

Tests run: 344, Failures: 0, Errors: 4, Skipped: 0

[INFO] 
[INFO] --- maven-failsafe-plugin:2.19.1:verify (ClientManagedTimeTests) @ 
phoenix-core ---
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Phoenix ..................................... SUCCESS [ 36.573 s]
[INFO] Phoenix Core ....................................... FAILURE [  02:30 h]
[INFO] Phoenix - Flume .................................... SKIPPED
[INFO] Phoenix - Pig ...................................... SKIPPED
[INFO] Phoenix Query Server Client ........................ SKIPPED
[INFO] Phoenix Query Server ............................... SKIPPED
[INFO] Phoenix - Pherf .................................... SKIPPED
[INFO] Phoenix - Spark .................................... SKIPPED
[INFO] Phoenix - Hive ..................................... SKIPPED
[INFO] Phoenix Client ..................................... SKIPPED
[INFO] Phoenix Server ..................................... SKIPPED
[INFO] Phoenix Assembly ................................... SKIPPED
[INFO] Phoenix - Tracing Web Application .................. SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 02:31 h
[INFO] Finished at: 2016-09-23T00:22:09+00:00
[INFO] Final Memory: 59M/1014M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal 
org.apache.maven.plugins:maven-failsafe-plugin:2.19.1:verify 
(ClientManagedTimeTests) on project phoenix-core: There was a timeout or other 
error in the fork -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e 
switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please 
read the following articles:
[ERROR] [Help 1] 
http://cwiki.apache.org/confluence/display/MAVEN/MojoExecutionException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :phoenix-core
Build step 'Invoke top-level Maven targets' marked build as failure
Archiving artifacts
Compressed 1.55 GB of artifacts by 57.7% relative to #27
Recording test results

Reply via email to