See <https://builds.apache.org/job/Phoenix-master-hadoop1/382/changes>
Changes: [ramkrishna] Phoenix-1275 SYSTEM.STATS table is not created when SYSTEM.CATALOG is ------------------------------------------ [...truncated 807 lines...] at org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:29497) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2027) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:98) at org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:114) at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:94) at java.lang.Thread.run(Thread.java:724) Caused by: org.apache.phoenix.memory.InsufficientMemoryException: Requested memory of 104000 bytes is larger than global pool of 40000 bytes. at org.apache.phoenix.memory.GlobalMemoryManager.allocateBytes(GlobalMemoryManager.java:72) at org.apache.phoenix.memory.GlobalMemoryManager.allocate(GlobalMemoryManager.java:100) at org.apache.phoenix.memory.GlobalMemoryManager.allocate(GlobalMemoryManager.java:106) at org.apache.phoenix.coprocessor.ScanRegionObserver.getTopNScanner(ScanRegionObserver.java:234) at org.apache.phoenix.coprocessor.ScanRegionObserver.doPostScannerOpen(ScanRegionObserver.java:221) at org.apache.phoenix.coprocessor.BaseScannerRegionObserver.postScannerOpen(BaseScannerRegionObserver.java:134) ... 8 more at java.util.concurrent.FutureTask$Sync.innerGet(FutureTask.java:262) at java.util.concurrent.FutureTask.get(FutureTask.java:119) at org.apache.phoenix.iterate.ParallelIterators.getIterators(ParallelIterators.java:280) at org.apache.phoenix.iterate.MergeSortResultIterator.getIterators(MergeSortResultIterator.java:48) at org.apache.phoenix.iterate.MergeSortResultIterator.minIterator(MergeSortResultIterator.java:63) at org.apache.phoenix.iterate.MergeSortResultIterator.next(MergeSortResultIterator.java:90) at org.apache.phoenix.iterate.MergeSortTopNResultIterator.next(MergeSortTopNResultIterator.java:87) at org.apache.phoenix.jdbc.PhoenixResultSet.next(PhoenixResultSet.java:732) at org.apache.phoenix.end2end.SortOrderFIT.runQuery(SortOrderFIT.java:396) at org.apache.phoenix.end2end.SortOrderFIT.runQueryTest(SortOrderFIT.java:353) at org.apache.phoenix.end2end.SortOrderFIT.queryDescDateWithExplicitOrderBy(SortOrderFIT.java:251) Caused by: org.apache.phoenix.exception.PhoenixIOException: org.apache.hadoop.hbase.DoNotRetryIOException: DESCCOLUMNSORTORDERTEST,,1411642454947.35d84ba17616bad89a1ed754e96608e6.: Requested memory of 104000 bytes is larger than global pool of 40000 bytes. at org.apache.phoenix.util.ServerUtil.createIOException(ServerUtil.java:77) at org.apache.phoenix.util.ServerUtil.throwIOException(ServerUtil.java:45) at org.apache.phoenix.coprocessor.BaseScannerRegionObserver.postScannerOpen(BaseScannerRegionObserver.java:152) at org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.postScannerOpen(RegionCoprocessorHost.java:1845) at org.apache.hadoop.hbase.regionserver.HRegionServer.scan(HRegionServer.java:3092) at org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:29497) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2027) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:98) at org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:114) at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:94) at java.lang.Thread.run(Thread.java:724) Caused by: org.apache.phoenix.memory.InsufficientMemoryException: Requested memory of 104000 bytes is larger than global pool of 40000 bytes. at org.apache.phoenix.memory.GlobalMemoryManager.allocateBytes(GlobalMemoryManager.java:72) at org.apache.phoenix.memory.GlobalMemoryManager.allocate(GlobalMemoryManager.java:100) at org.apache.phoenix.memory.GlobalMemoryManager.allocate(GlobalMemoryManager.java:106) at org.apache.phoenix.coprocessor.ScanRegionObserver.getTopNScanner(ScanRegionObserver.java:234) at org.apache.phoenix.coprocessor.ScanRegionObserver.doPostScannerOpen(ScanRegionObserver.java:221) at org.apache.phoenix.coprocessor.BaseScannerRegionObserver.postScannerOpen(BaseScannerRegionObserver.java:134) ... 8 more at org.apache.phoenix.util.ServerUtil.parseServerException(ServerUtil.java:101) at org.apache.phoenix.iterate.TableResultIterator.<init>(TableResultIterator.java:57) at org.apache.phoenix.iterate.ParallelIterators$3.call(ParallelIterators.java:362) at org.apache.phoenix.iterate.ParallelIterators$3.call(ParallelIterators.java:357) at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:334) at java.util.concurrent.FutureTask.run(FutureTask.java:166) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at java.lang.Thread.run(Thread.java:724) Caused by: org.apache.hadoop.hbase.DoNotRetryIOException: org.apache.hadoop.hbase.DoNotRetryIOException: DESCCOLUMNSORTORDERTEST,,1411642454947.35d84ba17616bad89a1ed754e96608e6.: Requested memory of 104000 bytes is larger than global pool of 40000 bytes. at org.apache.phoenix.util.ServerUtil.createIOException(ServerUtil.java:77) at org.apache.phoenix.util.ServerUtil.throwIOException(ServerUtil.java:45) at org.apache.phoenix.coprocessor.BaseScannerRegionObserver.postScannerOpen(BaseScannerRegionObserver.java:152) at org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.postScannerOpen(RegionCoprocessorHost.java:1845) at org.apache.hadoop.hbase.regionserver.HRegionServer.scan(HRegionServer.java:3092) at org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:29497) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2027) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:98) at org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:114) at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:94) at java.lang.Thread.run(Thread.java:724) Caused by: org.apache.phoenix.memory.InsufficientMemoryException: Requested memory of 104000 bytes is larger than global pool of 40000 bytes. at org.apache.phoenix.memory.GlobalMemoryManager.allocateBytes(GlobalMemoryManager.java:72) at org.apache.phoenix.memory.GlobalMemoryManager.allocate(GlobalMemoryManager.java:100) at org.apache.phoenix.memory.GlobalMemoryManager.allocate(GlobalMemoryManager.java:106) at org.apache.phoenix.coprocessor.ScanRegionObserver.getTopNScanner(ScanRegionObserver.java:234) at org.apache.phoenix.coprocessor.ScanRegionObserver.doPostScannerOpen(ScanRegionObserver.java:221) at org.apache.phoenix.coprocessor.BaseScannerRegionObserver.postScannerOpen(BaseScannerRegionObserver.java:134) ... 8 more at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57) at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.lang.reflect.Constructor.newInstance(Constructor.java:526) at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:106) at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:95) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.getRemoteException(ProtobufUtil.java:285) at org.apache.hadoop.hbase.client.ScannerCallable.openScanner(ScannerCallable.java:316) at org.apache.hadoop.hbase.client.ScannerCallable.call(ScannerCallable.java:164) at org.apache.hadoop.hbase.client.ScannerCallable.call(ScannerCallable.java:59) at org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithRetries(RpcRetryingCaller.java:114) at org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithRetries(RpcRetryingCaller.java:90) at org.apache.hadoop.hbase.client.ClientScanner.nextScanner(ClientScanner.java:282) at org.apache.hadoop.hbase.client.ClientScanner.initializeScannerInConstruction(ClientScanner.java:187) at org.apache.hadoop.hbase.client.ClientScanner.<init>(ClientScanner.java:182) at org.apache.hadoop.hbase.client.ClientScanner.<init>(ClientScanner.java:109) at org.apache.hadoop.hbase.client.HTable.getScanner(HTable.java:738) at org.apache.phoenix.iterate.TableResultIterator.<init>(TableResultIterator.java:54) at org.apache.phoenix.iterate.ParallelIterators$3.call(ParallelIterators.java:362) at org.apache.phoenix.iterate.ParallelIterators$3.call(ParallelIterators.java:357) at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:334) at java.util.concurrent.FutureTask.run(FutureTask.java:166) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at java.lang.Thread.run(Thread.java:724) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.DoNotRetryIOException: DESCCOLUMNSORTORDERTEST,,1411642454947.35d84ba17616bad89a1ed754e96608e6.: Requested memory of 104000 bytes is larger than global pool of 40000 bytes. at org.apache.phoenix.util.ServerUtil.createIOException(ServerUtil.java:77) at org.apache.phoenix.util.ServerUtil.throwIOException(ServerUtil.java:45) at org.apache.phoenix.coprocessor.BaseScannerRegionObserver.postScannerOpen(BaseScannerRegionObserver.java:152) at org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.postScannerOpen(RegionCoprocessorHost.java:1845) at org.apache.hadoop.hbase.regionserver.HRegionServer.scan(HRegionServer.java:3092) at org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:29497) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2027) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:98) at org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:114) at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:94) at java.lang.Thread.run(Thread.java:724) Caused by: org.apache.phoenix.memory.InsufficientMemoryException: Requested memory of 104000 bytes is larger than global pool of 40000 bytes. at org.apache.phoenix.memory.GlobalMemoryManager.allocateBytes(GlobalMemoryManager.java:72) at org.apache.phoenix.memory.GlobalMemoryManager.allocate(GlobalMemoryManager.java:100) at org.apache.phoenix.memory.GlobalMemoryManager.allocate(GlobalMemoryManager.java:106) at org.apache.phoenix.coprocessor.ScanRegionObserver.getTopNScanner(ScanRegionObserver.java:234) at org.apache.phoenix.coprocessor.ScanRegionObserver.doPostScannerOpen(ScanRegionObserver.java:221) at org.apache.phoenix.coprocessor.BaseScannerRegionObserver.postScannerOpen(BaseScannerRegionObserver.java:134) ... 8 more at org.apache.hadoop.hbase.ipc.RpcClient.call(RpcClient.java:1452) at org.apache.hadoop.hbase.ipc.RpcClient.callBlockingMethod(RpcClient.java:1656) at org.apache.hadoop.hbase.ipc.RpcClient$BlockingRpcChannelImplementation.callBlockingMethod(RpcClient.java:1714) at org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$BlockingStub.scan(ClientProtos.java:29900) at org.apache.hadoop.hbase.client.ScannerCallable.openScanner(ScannerCallable.java:308) at org.apache.hadoop.hbase.client.ScannerCallable.call(ScannerCallable.java:164) at org.apache.hadoop.hbase.client.ScannerCallable.call(ScannerCallable.java:59) at org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithRetries(RpcRetryingCaller.java:114) at org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithRetries(RpcRetryingCaller.java:90) at org.apache.hadoop.hbase.client.ClientScanner.nextScanner(ClientScanner.java:282) at org.apache.hadoop.hbase.client.ClientScanner.initializeScannerInConstruction(ClientScanner.java:187) at org.apache.hadoop.hbase.client.ClientScanner.<init>(ClientScanner.java:182) at org.apache.hadoop.hbase.client.ClientScanner.<init>(ClientScanner.java:109) at org.apache.hadoop.hbase.client.HTable.getScanner(HTable.java:738) at org.apache.phoenix.iterate.TableResultIterator.<init>(TableResultIterator.java:54) at org.apache.phoenix.iterate.ParallelIterators$3.call(ParallelIterators.java:362) at org.apache.phoenix.iterate.ParallelIterators$3.call(ParallelIterators.java:357) at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:334) at java.util.concurrent.FutureTask.run(FutureTask.java:166) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at java.lang.Thread.run(Thread.java:724) Running org.apache.phoenix.end2end.RegexpSubstrFunctionIT Tests run: 5, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.355 sec - in org.apache.phoenix.end2end.salted.SaltedTableUpsertSelectIT Running org.apache.phoenix.end2end.ServerExceptionIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.164 sec - in org.apache.phoenix.end2end.ReverseScanIT Running org.apache.phoenix.end2end.AutoCommitIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.661 sec - in org.apache.phoenix.end2end.RegexpSubstrFunctionIT Running org.apache.phoenix.end2end.LastValueFunctionIT Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.321 sec - in org.apache.phoenix.end2end.ServerExceptionIT Running org.apache.phoenix.end2end.RoundFloorCeilFunctionsEnd2EndIT Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.053 sec - in org.apache.phoenix.end2end.AutoCommitIT Running org.apache.phoenix.end2end.LpadFunctionIT Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 6.027 sec - in org.apache.phoenix.end2end.LastValueFunctionIT Tests run: 30, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 7.163 sec - in org.apache.phoenix.end2end.RoundFloorCeilFunctionsEnd2EndIT Tests run: 10, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 6.822 sec - in org.apache.phoenix.end2end.LpadFunctionIT Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 32.081 sec - in org.apache.phoenix.end2end.QueryMoreIT Tests run: 96, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 92.251 sec - in org.apache.phoenix.end2end.HashJoinIT Results : Tests in error: LocalIndexIT.testLocalIndexScanJoinColumnsFromDataTable:439 ? PhoenixIO org.ap... SortOrderFIT.queryDescDateWithExplicitOrderBy:251->runQueryTest:353->runQuery:396 ? PhoenixIO Tests run: 490, Failures: 0, Errors: 2, Skipped: 0 [INFO] [INFO] --- maven-failsafe-plugin:2.17:integration-test (NeedTheirOwnClusterTests) @ phoenix-core --- [INFO] Failsafe report directory: <https://builds.apache.org/job/Phoenix-master-hadoop1/ws/phoenix-core/target/failsafe-reports> [INFO] parallel='none', perCoreThreadCount=true, threadCount=0, useUnlimitedThreads=false, threadCountSuites=0, threadCountClasses=0, threadCountMethods=0, parallelOptimized=true ------------------------------------------------------- T E S T S ------------------------------------------------------- ------------------------------------------------------- T E S T S ------------------------------------------------------- Running org.apache.phoenix.hbase.index.covered.example.EndtoEndIndexingWithCompressionIT Running org.apache.phoenix.hbase.index.covered.EndToEndCoveredColumnsIndexBuilderIT Running org.apache.phoenix.hbase.index.covered.example.EndToEndCoveredIndexingIT Running org.apache.phoenix.hbase.index.covered.example.FailWithoutRetriesIT Running org.apache.phoenix.hbase.index.balancer.IndexLoadBalancerIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.062 sec - in org.apache.phoenix.hbase.index.covered.EndToEndCoveredColumnsIndexBuilderIT Running org.apache.phoenix.hbase.index.FailForUnsupportedHBaseVersionsIT Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 7.838 sec - in org.apache.phoenix.hbase.index.covered.example.FailWithoutRetriesIT Running org.apache.phoenix.end2end.index.MutableIndexFailureIT Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 15.336 sec - in org.apache.phoenix.hbase.index.FailForUnsupportedHBaseVersionsIT Running org.apache.phoenix.end2end.ContextClassloaderIT Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 48.399 sec - in org.apache.phoenix.hbase.index.covered.example.EndtoEndIndexingWithCompressionIT Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 48.203 sec - in org.apache.phoenix.hbase.index.covered.example.EndToEndCoveredIndexingIT Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 7.1 sec - in org.apache.phoenix.end2end.ContextClassloaderIT Running org.apache.phoenix.mapreduce.CsvBulkLoadToolIT Tests run: 9, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 102.251 sec - in org.apache.phoenix.hbase.index.balancer.IndexLoadBalancerIT Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 152.354 sec - in org.apache.phoenix.end2end.index.MutableIndexFailureIT Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 163.874 sec - in org.apache.phoenix.mapreduce.CsvBulkLoadToolIT Results : Tests run: 46, Failures: 0, Errors: 0, Skipped: 0 [INFO] [INFO] --- maven-failsafe-plugin:2.17:verify (ClientManagedTimeTests) @ phoenix-core --- [INFO] Failsafe report directory: <https://builds.apache.org/job/Phoenix-master-hadoop1/ws/phoenix-core/target/failsafe-reports> [INFO] ------------------------------------------------------------------------ [INFO] Reactor Summary: [INFO] [INFO] Apache Phoenix .................................... SUCCESS [2.757s] [INFO] Phoenix Hadoop Compatibility ...................... SUCCESS [3.903s] [INFO] Phoenix Hadoop2 Compatibility ..................... SUCCESS [3.556s] [INFO] Phoenix Core ...................................... FAILURE [13:20.300s] [INFO] Phoenix - Flume ................................... SKIPPED [INFO] Phoenix - Pig ..................................... SKIPPED [INFO] Phoenix Assembly .................................. SKIPPED [INFO] ------------------------------------------------------------------------ [INFO] BUILD FAILURE [INFO] ------------------------------------------------------------------------ [INFO] Total time: 13:31.075s [INFO] Finished at: Thu Sep 25 10:59:24 UTC 2014 [INFO] Final Memory: 55M/948M [INFO] ------------------------------------------------------------------------ [ERROR] Failed to execute goal org.apache.maven.plugins:maven-failsafe-plugin:2.17:verify (ClientManagedTimeTests) on project phoenix-core: There are test failures. [ERROR] [ERROR] Please refer to <https://builds.apache.org/job/Phoenix-master-hadoop1/ws/phoenix-core/target/failsafe-reports> for the individual test results. [ERROR] -> [Help 1] [ERROR] [ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch. [ERROR] Re-run Maven using the -X switch to enable full debug logging. [ERROR] [ERROR] For more information about the errors and possible solutions, please read the following articles: [ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException [ERROR] [ERROR] After correcting the problems, you can resume the build with the command [ERROR] mvn <goals> -rf :phoenix-core Build step 'Invoke top-level Maven targets' marked build as failure Archiving artifacts Sending artifact delta relative to Phoenix | Master | Hadoop1 #381 Archived 695 artifacts Archive block size is 32768 Received 4685 blocks and 256146005 bytes Compression is 37.5% Took 1 min 37 sec Updating PHOENIX-1275 Recording test results