See <https://builds.apache.org/job/Phoenix-4.0/362/changes>
Changes: [jtaylor] Rename StatisticsUtils to follow conventions and other misc cleanup [jtaylor] Atomically increment table timestamp and fix unit tests [jtaylor] Change tests that set server-side config to NeedsOwnMiniClusterTest ------------------------------------------ [...truncated 1965 lines...] at java.lang.Thread.run(Thread.java:744) Caused by: org.apache.phoenix.memory.InsufficientMemoryException: Requested memory of 104000 bytes is larger than global pool of 40000 bytes. at org.apache.phoenix.memory.GlobalMemoryManager.allocateBytes(GlobalMemoryManager.java:72) at org.apache.phoenix.memory.GlobalMemoryManager.allocate(GlobalMemoryManager.java:100) at org.apache.phoenix.memory.GlobalMemoryManager.allocate(GlobalMemoryManager.java:106) at org.apache.phoenix.coprocessor.ScanRegionObserver.getTopNScanner(ScanRegionObserver.java:235) at org.apache.phoenix.coprocessor.ScanRegionObserver.doPostScannerOpen(ScanRegionObserver.java:222) at org.apache.phoenix.coprocessor.BaseScannerRegionObserver.postScannerOpen(BaseScannerRegionObserver.java:140) ... 8 more at org.apache.phoenix.util.ServerUtil.parseServerException(ServerUtil.java:107) at org.apache.phoenix.iterate.TableResultIterator.<init>(TableResultIterator.java:57) at org.apache.phoenix.iterate.ParallelIterators$2.call(ParallelIterators.java:583) at org.apache.phoenix.iterate.ParallelIterators$2.call(ParallelIterators.java:578) at java.util.concurrent.FutureTask.run(FutureTask.java:262) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at java.lang.Thread.run(Thread.java:744) Caused by: org.apache.hadoop.hbase.DoNotRetryIOException: org.apache.hadoop.hbase.DoNotRetryIOException: DESCCOLUMNSORTORDERTEST,,1412674387127.ff99f051f87c40a4fc0f259f5e01c561.: Requested memory of 104000 bytes is larger than global pool of 40000 bytes. at org.apache.phoenix.util.ServerUtil.createIOException(ServerUtil.java:83) at org.apache.phoenix.util.ServerUtil.throwIOException(ServerUtil.java:51) at org.apache.phoenix.coprocessor.BaseScannerRegionObserver.postScannerOpen(BaseScannerRegionObserver.java:158) at org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.postScannerOpen(RegionCoprocessorHost.java:1845) at org.apache.hadoop.hbase.regionserver.HRegionServer.scan(HRegionServer.java:3092) at org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:29497) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2027) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:98) at org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:114) at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:94) at java.lang.Thread.run(Thread.java:744) Caused by: org.apache.phoenix.memory.InsufficientMemoryException: Requested memory of 104000 bytes is larger than global pool of 40000 bytes. at org.apache.phoenix.memory.GlobalMemoryManager.allocateBytes(GlobalMemoryManager.java:72) at org.apache.phoenix.memory.GlobalMemoryManager.allocate(GlobalMemoryManager.java:100) at org.apache.phoenix.memory.GlobalMemoryManager.allocate(GlobalMemoryManager.java:106) at org.apache.phoenix.coprocessor.ScanRegionObserver.getTopNScanner(ScanRegionObserver.java:235) at org.apache.phoenix.coprocessor.ScanRegionObserver.doPostScannerOpen(ScanRegionObserver.java:222) at org.apache.phoenix.coprocessor.BaseScannerRegionObserver.postScannerOpen(BaseScannerRegionObserver.java:140) ... 8 more at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57) at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.lang.reflect.Constructor.newInstance(Constructor.java:526) at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:106) at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:95) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.getRemoteException(ProtobufUtil.java:285) at org.apache.hadoop.hbase.client.ScannerCallable.openScanner(ScannerCallable.java:316) at org.apache.hadoop.hbase.client.ScannerCallable.call(ScannerCallable.java:164) at org.apache.hadoop.hbase.client.ScannerCallable.call(ScannerCallable.java:59) at org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithRetries(RpcRetryingCaller.java:114) at org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithRetries(RpcRetryingCaller.java:90) at org.apache.hadoop.hbase.client.ClientScanner.nextScanner(ClientScanner.java:282) at org.apache.hadoop.hbase.client.ClientScanner.initializeScannerInConstruction(ClientScanner.java:187) at org.apache.hadoop.hbase.client.ClientScanner.<init>(ClientScanner.java:182) at org.apache.hadoop.hbase.client.ClientScanner.<init>(ClientScanner.java:109) at org.apache.hadoop.hbase.client.HTable.getScanner(HTable.java:738) at org.apache.phoenix.iterate.TableResultIterator.<init>(TableResultIterator.java:54) at org.apache.phoenix.iterate.ParallelIterators$2.call(ParallelIterators.java:583) at org.apache.phoenix.iterate.ParallelIterators$2.call(ParallelIterators.java:578) at java.util.concurrent.FutureTask.run(FutureTask.java:262) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at java.lang.Thread.run(Thread.java:744) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.DoNotRetryIOException: DESCCOLUMNSORTORDERTEST,,1412674387127.ff99f051f87c40a4fc0f259f5e01c561.: Requested memory of 104000 bytes is larger than global pool of 40000 bytes. at org.apache.phoenix.util.ServerUtil.createIOException(ServerUtil.java:83) at org.apache.phoenix.util.ServerUtil.throwIOException(ServerUtil.java:51) at org.apache.phoenix.coprocessor.BaseScannerRegionObserver.postScannerOpen(BaseScannerRegionObserver.java:158) at org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.postScannerOpen(RegionCoprocessorHost.java:1845) at org.apache.hadoop.hbase.regionserver.HRegionServer.scan(HRegionServer.java:3092) at org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:29497) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2027) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:98) at org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:114) at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:94) at java.lang.Thread.run(Thread.java:744) Caused by: org.apache.phoenix.memory.InsufficientMemoryException: Requested memory of 104000 bytes is larger than global pool of 40000 bytes. at org.apache.phoenix.memory.GlobalMemoryManager.allocateBytes(GlobalMemoryManager.java:72) at org.apache.phoenix.memory.GlobalMemoryManager.allocate(GlobalMemoryManager.java:100) at org.apache.phoenix.memory.GlobalMemoryManager.allocate(GlobalMemoryManager.java:106) at org.apache.phoenix.coprocessor.ScanRegionObserver.getTopNScanner(ScanRegionObserver.java:235) at org.apache.phoenix.coprocessor.ScanRegionObserver.doPostScannerOpen(ScanRegionObserver.java:222) at org.apache.phoenix.coprocessor.BaseScannerRegionObserver.postScannerOpen(BaseScannerRegionObserver.java:140) ... 8 more at org.apache.hadoop.hbase.ipc.RpcClient.call(RpcClient.java:1452) at org.apache.hadoop.hbase.ipc.RpcClient.callBlockingMethod(RpcClient.java:1656) at org.apache.hadoop.hbase.ipc.RpcClient$BlockingRpcChannelImplementation.callBlockingMethod(RpcClient.java:1714) at org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$BlockingStub.scan(ClientProtos.java:29900) at org.apache.hadoop.hbase.client.ScannerCallable.openScanner(ScannerCallable.java:308) at org.apache.hadoop.hbase.client.ScannerCallable.call(ScannerCallable.java:164) at org.apache.hadoop.hbase.client.ScannerCallable.call(ScannerCallable.java:59) at org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithRetries(RpcRetryingCaller.java:114) at org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithRetries(RpcRetryingCaller.java:90) at org.apache.hadoop.hbase.client.ClientScanner.nextScanner(ClientScanner.java:282) at org.apache.hadoop.hbase.client.ClientScanner.initializeScannerInConstruction(ClientScanner.java:187) at org.apache.hadoop.hbase.client.ClientScanner.<init>(ClientScanner.java:182) at org.apache.hadoop.hbase.client.ClientScanner.<init>(ClientScanner.java:109) at org.apache.hadoop.hbase.client.HTable.getScanner(HTable.java:738) at org.apache.phoenix.iterate.TableResultIterator.<init>(TableResultIterator.java:54) at org.apache.phoenix.iterate.ParallelIterators$2.call(ParallelIterators.java:583) at org.apache.phoenix.iterate.ParallelIterators$2.call(ParallelIterators.java:578) at java.util.concurrent.FutureTask.run(FutureTask.java:262) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at java.lang.Thread.run(Thread.java:744) Running org.apache.phoenix.end2end.RegexpSubstrFunctionIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.804 sec - in org.apache.phoenix.end2end.ReverseScanIT Running org.apache.phoenix.end2end.ServerExceptionIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.819 sec - in org.apache.phoenix.end2end.RegexpSubstrFunctionIT Running org.apache.phoenix.end2end.AutoCommitIT Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.691 sec - in org.apache.phoenix.end2end.ServerExceptionIT Running org.apache.phoenix.end2end.LastValueFunctionIT Tests run: 25, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 84.139 sec - in org.apache.phoenix.end2end.InListIT Running org.apache.phoenix.end2end.RoundFloorCeilFunctionsEnd2EndIT Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.698 sec - in org.apache.phoenix.end2end.AutoCommitIT Running org.apache.phoenix.end2end.LpadFunctionIT Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.43 sec - in org.apache.phoenix.end2end.LastValueFunctionIT Tests run: 30, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.98 sec - in org.apache.phoenix.end2end.RoundFloorCeilFunctionsEnd2EndIT Tests run: 10, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 6.039 sec - in org.apache.phoenix.end2end.LpadFunctionIT Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 32.367 sec - in org.apache.phoenix.end2end.QueryMoreIT Tests run: 96, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 86.055 sec - in org.apache.phoenix.end2end.HashJoinIT Results : Tests in error: SubqueryIT.testInSubquery:739 » SQL Encountered exception in sub plan [1] exec... SubqueryIT.testExistsSubquery:787 » SQL Encountered exception in sub plan [1] ... SubqueryIT.testInSubquery:739 » SQL Encountered exception in sub plan [0] exec... SubqueryIT.testExistsSubquery:787 » SQL Encountered exception in sub plan [1] ... SubqueryIT.testInSubquery:739 » SQL Encountered exception in sub plan [1] exec... SubqueryIT.testExistsSubquery:787 » SQL Encountered exception in sub plan [1] ... SortOrderFIT.queryDescDateWithExplicitOrderBy:251->runQueryTest:353->runQuery:396 » PhoenixIO Tests run: 508, Failures: 0, Errors: 7, Skipped: 1 [INFO] [INFO] --- maven-failsafe-plugin:2.17:integration-test (NeedTheirOwnClusterTests) @ phoenix-core --- [INFO] Failsafe report directory: <https://builds.apache.org/job/Phoenix-4.0/ws/phoenix-core/target/failsafe-reports> [INFO] parallel='none', perCoreThreadCount=true, threadCount=0, useUnlimitedThreads=false, threadCountSuites=0, threadCountClasses=0, threadCountMethods=0, parallelOptimized=true ------------------------------------------------------- T E S T S ------------------------------------------------------- ------------------------------------------------------- T E S T S ------------------------------------------------------- Running org.apache.phoenix.hbase.index.balancer.IndexLoadBalancerIT Running org.apache.phoenix.hbase.index.covered.example.EndToEndCoveredIndexingIT Running org.apache.phoenix.hbase.index.covered.EndToEndCoveredColumnsIndexBuilderIT Running org.apache.phoenix.hbase.index.covered.example.EndtoEndIndexingWithCompressionIT Running org.apache.phoenix.hbase.index.covered.example.FailWithoutRetriesIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 7.877 sec - in org.apache.phoenix.hbase.index.covered.EndToEndCoveredColumnsIndexBuilderIT Running org.apache.phoenix.hbase.index.FailForUnsupportedHBaseVersionsIT Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 7.933 sec - in org.apache.phoenix.hbase.index.covered.example.FailWithoutRetriesIT Running org.apache.phoenix.end2end.KeyOnlyIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 15.545 sec - in org.apache.phoenix.hbase.index.FailForUnsupportedHBaseVersionsIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 11.336 sec - in org.apache.phoenix.end2end.KeyOnlyIT Running org.apache.phoenix.end2end.ParallelIteratorsIT Running org.apache.phoenix.end2end.TenantSpecificTablesDDLIT Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 47.561 sec - in org.apache.phoenix.hbase.index.covered.example.EndtoEndIndexingWithCompressionIT Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 47.995 sec - in org.apache.phoenix.hbase.index.covered.example.EndToEndCoveredIndexingIT Running org.apache.phoenix.end2end.index.MutableIndexFailureIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 10.403 sec - in org.apache.phoenix.end2end.ParallelIteratorsIT Running org.apache.phoenix.end2end.index.DropIndexDuringUpsertIT Running org.apache.phoenix.end2end.index.MutableIndexReplicationIT Tests run: 18, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 25.014 sec - in org.apache.phoenix.end2end.TenantSpecificTablesDDLIT Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 6.165 sec - in org.apache.phoenix.end2end.index.MutableIndexReplicationIT Running org.apache.phoenix.end2end.ContextClassloaderIT Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.722 sec - in org.apache.phoenix.end2end.ContextClassloaderIT Tests run: 1, Failures: 1, Errors: 0, Skipped: 0, Time elapsed: 21.136 sec <<< FAILURE! - in org.apache.phoenix.end2end.StatsCollectorIT org.apache.phoenix.end2end.StatsCollectorIT Time elapsed: 21.136 sec <<< FAILURE! java.lang.AssertionError: null at org.junit.Assert.fail(Assert.java:86) at org.junit.Assert.assertTrue(Assert.java:41) at org.junit.Assert.assertTrue(Assert.java:52) at org.apache.phoenix.query.BaseTest.initAndRegisterDriver(BaseTest.java:646) at org.apache.phoenix.query.BaseTest.setUpTestDriver(BaseTest.java:525) at org.apache.phoenix.end2end.StatsCollectorIT.doSetup(StatsCollectorIT.java:33) Running org.apache.phoenix.end2end.TenantSpecificTablesDMLIT Tests run: 1, Failures: 1, Errors: 0, Skipped: 0, Time elapsed: 36.353 sec <<< FAILURE! - in org.apache.phoenix.end2end.MultiCfQueryExecIT org.apache.phoenix.end2end.MultiCfQueryExecIT Time elapsed: 36.352 sec <<< FAILURE! java.lang.AssertionError: null at org.junit.Assert.fail(Assert.java:86) at org.junit.Assert.assertTrue(Assert.java:41) at org.junit.Assert.assertTrue(Assert.java:52) at org.apache.phoenix.query.BaseTest.initAndRegisterDriver(BaseTest.java:646) at org.apache.phoenix.query.BaseTest.setUpTestDriver(BaseTest.java:525) at org.apache.phoenix.end2end.MultiCfQueryExecIT.doSetup(MultiCfQueryExecIT.java:56) Tests run: 9, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 100.662 sec - in org.apache.phoenix.hbase.index.balancer.IndexLoadBalancerIT Tests run: 14, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 13.39 sec - in org.apache.phoenix.end2end.TenantSpecificTablesDMLIT Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 65.79 sec - in org.apache.phoenix.end2end.index.DropIndexDuringUpsertIT Running org.apache.phoenix.mapreduce.CsvBulkLoadToolIT Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 135.916 sec - in org.apache.phoenix.end2end.index.MutableIndexFailureIT Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 167.694 sec - in org.apache.phoenix.mapreduce.CsvBulkLoadToolIT Results : Failed tests: StatsCollectorIT.doSetup:33->BaseTest.setUpTestDriver:525->BaseTest.initAndRegisterDriver:646 null MultiCfQueryExecIT.doSetup:56->BaseTest.setUpTestDriver:525->BaseTest.initAndRegisterDriver:646 null Tests run: 86, Failures: 2, Errors: 0, Skipped: 0 [INFO] [INFO] --- maven-failsafe-plugin:2.17:verify (ClientManagedTimeTests) @ phoenix-core --- [INFO] Failsafe report directory: <https://builds.apache.org/job/Phoenix-4.0/ws/phoenix-core/target/failsafe-reports> [INFO] ------------------------------------------------------------------------ [INFO] Reactor Summary: [INFO] [INFO] Apache Phoenix .................................... SUCCESS [2.297s] [INFO] Phoenix Hadoop Compatibility ...................... SUCCESS [2.769s] [INFO] Phoenix Hadoop2 Compatibility ..................... SUCCESS [2.755s] [INFO] Phoenix Core ...................................... FAILURE [12:41.670s] [INFO] Phoenix - Flume ................................... SKIPPED [INFO] Phoenix - Pig ..................................... SKIPPED [INFO] Phoenix Assembly .................................. SKIPPED [INFO] ------------------------------------------------------------------------ [INFO] BUILD FAILURE [INFO] ------------------------------------------------------------------------ [INFO] Total time: 12:49.977s [INFO] Finished at: Tue Oct 07 09:38:46 UTC 2014 [INFO] Final Memory: 57M/1869M [INFO] ------------------------------------------------------------------------ [ERROR] Failed to execute goal org.apache.maven.plugins:maven-failsafe-plugin:2.17:verify (ClientManagedTimeTests) on project phoenix-core: There are test failures. [ERROR] [ERROR] Please refer to <https://builds.apache.org/job/Phoenix-4.0/ws/phoenix-core/target/failsafe-reports> for the individual test results. [ERROR] -> [Help 1] [ERROR] [ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch. [ERROR] Re-run Maven using the -X switch to enable full debug logging. [ERROR] [ERROR] For more information about the errors and possible solutions, please read the following articles: [ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException [ERROR] [ERROR] After correcting the problems, you can resume the build with the command [ERROR] mvn <goals> -rf :phoenix-core Build step 'Invoke top-level Maven targets' marked build as failure Archiving artifacts Sending artifact delta relative to Phoenix | 4.0 #359 Archived 711 artifacts Archive block size is 32768 Received 5133 blocks and 391922854 bytes Compression is 30.0% Took 2 min 37 sec Recording test results