[
https://issues.apache.org/jira/browse/PHOENIX-1950?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
]
tianming updated PHOENIX-1950:
------------------------------
Description:
using pohoenix creats Secondary index appears OutOfMemoryError.this question
appears when I pre-split the hbase table 900 regions ,but 90 regions the
question is not .
this detail belows:
2015-05-04 15:52:18,496 ERROR
[B.DefaultRpcServer.handler=29,queue=5,port=60020] parallel.BaseTaskRunner:
Found a failed task because: java.lang.OutOfMemoryError: unable to create new
native thread
java.util.concurrent.ExecutionException: java.lang.OutOfMemoryError: unable to
create new native thread
at
com.google.common.util.concurrent.AbstractFuture$Sync.getValue(AbstractFuture.java:289)
at
com.google.common.util.concurrent.AbstractFuture$Sync.get(AbstractFuture.java:276)
at
com.google.common.util.concurrent.AbstractFuture.get(AbstractFuture.java:111)
at
org.apache.phoenix.hbase.index.parallel.BaseTaskRunner.submit(BaseTaskRunner.java:66)
at
org.apache.phoenix.hbase.index.parallel.BaseTaskRunner.submitUninterruptible(BaseTaskRunner.java:99)
at
org.apache.phoenix.hbase.index.write.ParallelWriterIndexCommitter.write(ParallelWriterIndexCommitter.java:192)
at
org.apache.phoenix.hbase.index.write.IndexWriter.write(IndexWriter.java:179)
at
org.apache.phoenix.hbase.index.write.IndexWriter.writeAndKillYourselfOnFailure(IndexWriter.java:144)
at
org.apache.phoenix.hbase.index.write.IndexWriter.writeAndKillYourselfOnFailure(IndexWriter.java:134)
at
org.apache.phoenix.hbase.index.Indexer.doPostWithExceptions(Indexer.java:457)
at org.apache.phoenix.hbase.index.Indexer.doPost(Indexer.java:406)
at
org.apache.phoenix.hbase.index.Indexer.postBatchMutate(Indexer.java:401)
at
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.postBatchMutate(RegionCoprocessorHost.java:1311)
at
org.apache.hadoop.hbase.regionserver.HRegion.doMiniBatchMutation(HRegion.java:2985)
at
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2653)
at
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2589)
at
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2593)
at
org.apache.hadoop.hbase.regionserver.HRegionServer.doBatchOp(HRegionServer.java:4402)
at
org.apache.hadoop.hbase.regionserver.HRegionServer.doNonAtomicRegionMutation(HRegionServer.java:3584)
at
org.apache.hadoop.hbase.regionserver.HRegionServer.multi(HRegionServer.java:3474)
at
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:30000)
at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2078)
at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:108)
at
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:114)
at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:94)
at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.OutOfMemoryError: unable to create new native thread
my operators is:
first I create hbase table 900region
second phoenix client creates table :
ddl: create table VIO_VIOLATION(WFBH VARCHAR PRIMARY KEY,HPHM
VARCHAR ,HPZL VARCHAR,
HPZLMC VARCHAR,JSZH VARCHAR,TELEPHONE VARCHAR,WFSJ VARCHAR,
JBR VARCHAR,CLJGMC VARCHAR)default_column_family='DATA'
third :create index
ddl:create index idx_VIO_VIOLATION on VIO_VIOLATION(WFBH,HPHM,HPZL)
salt_buckets=20
when I see an issue whose question is also outofMemoryERROR,but when query ,I
find answers not more 128 threads.then I create hbase table with 90 regions,
the question is not
was:
using pohoenix creats Secondary index appears OutOfMemoryError.this question
appears when I pre-split the hbase table 900 regions ,but 90 regions the
question is not .
this detail belows:
2015-05-04 15:52:18,496 ERROR
[B.DefaultRpcServer.handler=29,queue=5,port=60020] parallel.BaseTaskRunner:
Found a failed task because: java.lang.OutOfMemoryError: unable to create new
native thread
java.util.concurrent.ExecutionException: java.lang.OutOfMemoryError: unable to
create new native thread
at
com.google.common.util.concurrent.AbstractFuture$Sync.getValue(AbstractFuture.java:289)
at
com.google.common.util.concurrent.AbstractFuture$Sync.get(AbstractFuture.java:276)
at
com.google.common.util.concurrent.AbstractFuture.get(AbstractFuture.java:111)
at
org.apache.phoenix.hbase.index.parallel.BaseTaskRunner.submit(BaseTaskRunner.java:66)
at
org.apache.phoenix.hbase.index.parallel.BaseTaskRunner.submitUninterruptible(BaseTaskRunner.java:99)
at
org.apache.phoenix.hbase.index.write.ParallelWriterIndexCommitter.write(ParallelWriterIndexCommitter.java:192)
at
org.apache.phoenix.hbase.index.write.IndexWriter.write(IndexWriter.java:179)
at
org.apache.phoenix.hbase.index.write.IndexWriter.writeAndKillYourselfOnFailure(IndexWriter.java:144)
at
org.apache.phoenix.hbase.index.write.IndexWriter.writeAndKillYourselfOnFailure(IndexWriter.java:134)
at
org.apache.phoenix.hbase.index.Indexer.doPostWithExceptions(Indexer.java:457)
at org.apache.phoenix.hbase.index.Indexer.doPost(Indexer.java:406)
at
org.apache.phoenix.hbase.index.Indexer.postBatchMutate(Indexer.java:401)
at
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.postBatchMutate(RegionCoprocessorHost.java:1311)
at
org.apache.hadoop.hbase.regionserver.HRegion.doMiniBatchMutation(HRegion.java:2985)
at
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2653)
at
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2589)
at
org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2593)
at
org.apache.hadoop.hbase.regionserver.HRegionServer.doBatchOp(HRegionServer.java:4402)
at
org.apache.hadoop.hbase.regionserver.HRegionServer.doNonAtomicRegionMutation(HRegionServer.java:3584)
at
org.apache.hadoop.hbase.regionserver.HRegionServer.multi(HRegionServer.java:3474)
at
org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:30000)
at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2078)
at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:108)
at
org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:114)
at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:94)
at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.OutOfMemoryError: unable to create new native thread
my operators is:
first I create hbase table 900region
second phoenix client creates table :
ddl: create table VIO_VIOLATION(
WFBH VARCHAR PRIMARY KEY,HPHM VARCHAR ,HPZL VARCHAR,
HPZLMC VARCHAR,JSZH VARCHAR,TELEPHONE VARCHAR,WFSJ VARCHAR,
JBR VARCHAR,CLJGMC VARCHAR)default_column_family='DATA'
third :create index
ddl:create index idx_VIO_VIOLATION on VIO_VIOLATION(WFBH,HPHM,HPZL)
salt_buckets=20
when I see an issue whose question is also outofMemoryERROR,but when query ,I
find answers not more 128 threads,then the question is not
> creating phoenix Secondary Index appears OutOfMemoryError
> --------------------------------------------------------------
>
> Key: PHOENIX-1950
> URL: https://issues.apache.org/jira/browse/PHOENIX-1950
> Project: Phoenix
> Issue Type: Bug
> Affects Versions: 4.3.0
> Environment: os :centos 6.5 cpu: 24cores memory:64G
> Reporter: tianming
>
> using pohoenix creats Secondary index appears OutOfMemoryError.this question
> appears when I pre-split the hbase table 900 regions ,but 90 regions the
> question is not .
> this detail belows:
> 2015-05-04 15:52:18,496 ERROR
> [B.DefaultRpcServer.handler=29,queue=5,port=60020] parallel.BaseTaskRunner:
> Found a failed task because: java.lang.OutOfMemoryError: unable to create new
> native thread
> java.util.concurrent.ExecutionException: java.lang.OutOfMemoryError: unable
> to create new native thread
> at
> com.google.common.util.concurrent.AbstractFuture$Sync.getValue(AbstractFuture.java:289)
> at
> com.google.common.util.concurrent.AbstractFuture$Sync.get(AbstractFuture.java:276)
> at
> com.google.common.util.concurrent.AbstractFuture.get(AbstractFuture.java:111)
> at
> org.apache.phoenix.hbase.index.parallel.BaseTaskRunner.submit(BaseTaskRunner.java:66)
> at
> org.apache.phoenix.hbase.index.parallel.BaseTaskRunner.submitUninterruptible(BaseTaskRunner.java:99)
> at
> org.apache.phoenix.hbase.index.write.ParallelWriterIndexCommitter.write(ParallelWriterIndexCommitter.java:192)
> at
> org.apache.phoenix.hbase.index.write.IndexWriter.write(IndexWriter.java:179)
> at
> org.apache.phoenix.hbase.index.write.IndexWriter.writeAndKillYourselfOnFailure(IndexWriter.java:144)
> at
> org.apache.phoenix.hbase.index.write.IndexWriter.writeAndKillYourselfOnFailure(IndexWriter.java:134)
> at
> org.apache.phoenix.hbase.index.Indexer.doPostWithExceptions(Indexer.java:457)
> at org.apache.phoenix.hbase.index.Indexer.doPost(Indexer.java:406)
> at
> org.apache.phoenix.hbase.index.Indexer.postBatchMutate(Indexer.java:401)
> at
> org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.postBatchMutate(RegionCoprocessorHost.java:1311)
> at
> org.apache.hadoop.hbase.regionserver.HRegion.doMiniBatchMutation(HRegion.java:2985)
> at
> org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2653)
> at
> org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2589)
> at
> org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2593)
> at
> org.apache.hadoop.hbase.regionserver.HRegionServer.doBatchOp(HRegionServer.java:4402)
> at
> org.apache.hadoop.hbase.regionserver.HRegionServer.doNonAtomicRegionMutation(HRegionServer.java:3584)
> at
> org.apache.hadoop.hbase.regionserver.HRegionServer.multi(HRegionServer.java:3474)
> at
> org.apache.hadoop.hbase.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:30000)
> at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:2078)
> at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:108)
> at
> org.apache.hadoop.hbase.ipc.RpcExecutor.consumerLoop(RpcExecutor.java:114)
> at org.apache.hadoop.hbase.ipc.RpcExecutor$1.run(RpcExecutor.java:94)
> at java.lang.Thread.run(Thread.java:745)
> Caused by: java.lang.OutOfMemoryError: unable to create new native thread
> my operators is:
> first I create hbase table 900region
> second phoenix client creates table :
> ddl: create table VIO_VIOLATION(WFBH VARCHAR PRIMARY KEY,HPHM
> VARCHAR ,HPZL VARCHAR,
> HPZLMC VARCHAR,JSZH VARCHAR,TELEPHONE VARCHAR,WFSJ VARCHAR,
> JBR VARCHAR,CLJGMC VARCHAR)default_column_family='DATA'
> third :create index
> ddl:create index idx_VIO_VIOLATION on VIO_VIOLATION(WFBH,HPHM,HPZL)
> salt_buckets=20
> when I see an issue whose question is also outofMemoryERROR,but when query ,I
> find answers not more 128 threads.then I create hbase table with 90 regions,
> the question is not
--
This message was sent by Atlassian JIRA
(v6.3.4#6332)