[
https://issues.apache.org/jira/browse/PHOENIX-1521?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
]
James Taylor resolved PHOENIX-1521.
-----------------------------------
Resolution: Not a Problem
It's entirely possible that the CPU on the region server would go to 100%.
> cpu 100% when using local secondary index.
> ------------------------------------------
>
> Key: PHOENIX-1521
> URL: https://issues.apache.org/jira/browse/PHOENIX-1521
> Project: Phoenix
> Issue Type: Bug
> Affects Versions: 4.2
> Environment: hadoop2.5.1
> hbase-0.98.7-hadoop2
> phoenix-4.2.1
> --------------------------------------------
> 8 Servers(cpu : 24 processors, RAM:64G)
> 16 regionservers (each server 2 regionservers)
> Reporter: wuchengzhi
> Assignee: rajeshbabu
>
> I run upsert test with 96 threads(each client 12 threads) to run upsert sql
> together, each 2,000,000 rows, and 10000 rows batch.
> ThreadPoolExecutor executor = new ThreadPoolExecutor(16, 16, 30,
> TimeUnit.SECONDS, new ArrayBlockingQueue(50));
> executor.submit(new Callable<Long>() {
> public Long call() throws Exception {
> Connection conn = null;
> //....
> for(int j = 0; j < 2000000; j ++ ) {
> if (j > 0 && j % (10000) == 0) {
> stmt.executeBatch();
> conn.commit();
> }
> }
> }
> });
> DDL:
> create table TEST_USER (id varchar primary key , attr1 varchar, attr2
> varchar,attr3 varchar,attr4 varchar,attr5 varchar,attr6 integer,attr7
> integer,attr8 integer,attr9 integer,attr10 integer )
> DATA_BLOCK_ENCODING='FAST_DIFF',VERSIONS=1,BLOOMFILTER='ROW',COMPRESSION='LZ4',BLOCKSIZE
> = '65536',SALT_BUCKETS=16;
> without the index, there are 350,000 rows per seconds putting in the
> hbase,and the cpu ,memery is very low.
> the i recreate the table and create one index as :
> create local index TEST_USER_INDEX on
> TEST_USER(attr1,attr2,attr3,attr4,attr5,attr6,attr7,attr8,attr9,attr10);
> using the same client code to send request.
> but i got only 85,000 rows per seconds, and the cpu is high load almost 100%
> in each server.
> i get the jstack info when running.
> "$MY_SERVER_DNS,60021,1417746400312-index-writer--pool3-t311" daemon prio=10
> tid=0x000000000212a800 nid=0x11887 runnable [0x00007f895f081000]
> java.lang.Thread.State: RUNNABLE
> at
> org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$30.call(RegionCoprocessorHost.java:792)
> at
> org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost$RegionOperation.call(RegionCoprocessorHost.java:1522)
> at
> org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1597)
> at
> org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.execOperation(RegionCoprocessorHost.java:1554)
> at
> org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.prePut(RegionCoprocessorHost.java:788)
> at
> org.apache.hadoop.hbase.regionserver.HRegion.doPreMutationHook(HRegion.java:2284)
> at
> org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2259)
> at
> org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2215)
> at
> org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:2219)
> at
> org.apache.phoenix.hbase.index.write.ParallelWriterIndexCommitter$1.call(ParallelWriterIndexCommitter.java:156)
> at
> org.apache.phoenix.hbase.index.write.ParallelWriterIndexCommitter$1.call(ParallelWriterIndexCommitter.java:128)
> at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:334)
> at java.util.concurrent.FutureTask.run(FutureTask.java:166)
> at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
> at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
> at java.lang.Thread.run(Thread.java:724)
>
> "$MY_SERVER_DNS,60021,1417746400312-index-builder--pool2-t319" daemon prio=10
> tid=0x00007f8960273800 nid=0x11847 runnable [0x00007f895e964000]
> java.lang.Thread.State: RUNNABLE
> at org.apache.hadoop.hbase.KeyValue.<init>(KeyValue.java:640)
> at org.apache.hadoop.hbase.KeyValue.<init>(KeyValue.java:568)
> at org.apache.hadoop.hbase.KeyValue.<init>(KeyValue.java:471)
> at org.apache.hadoop.hbase.KeyValue.<init>(KeyValue.java:405)
> at
> org.apache.hadoop.hbase.KeyValue.createFirstOnRow(KeyValue.java:2515)
> at
> org.apache.hadoop.hbase.KeyValue.createFirstOnRow(KeyValue.java:2490)
> at
> org.apache.hadoop.hbase.regionserver.StoreFile$Reader.passesKeyRangeFilter(StoreFile.java:1335)
> at
> org.apache.hadoop.hbase.regionserver.StoreFileScanner.shouldUseScanner(StoreFileScanner.java:403)
> at
> org.apache.hadoop.hbase.regionserver.StoreScanner.selectScannersFrom(StoreScanner.java:363)
> at
> org.apache.hadoop.hbase.regionserver.StoreScanner.getScannersNoCompaction(StoreScanner.java:291)
> at
> org.apache.hadoop.hbase.regionserver.StoreScanner.<init>(StoreScanner.java:170)
> at
> org.apache.hadoop.hbase.regionserver.HStore.getScanner(HStore.java:1816)
> at
> org.apache.hadoop.hbase.regionserver.HRegion$RegionScannerImpl.<init>(HRegion.java:3856)
> at
> org.apache.hadoop.hbase.regionserver.HRegion.instantiateRegionScanner(HRegion.java:1951)
> at
> org.apache.hadoop.hbase.regionserver.HRegion.getScanner(HRegion.java:1937)
> at
> org.apache.hadoop.hbase.regionserver.HRegion.getScanner(HRegion.java:1914)
> at
> org.apache.phoenix.hbase.index.covered.data.LocalTable.getCurrentRowState(LocalTable.java:62)
> at
> org.apache.phoenix.hbase.index.covered.LocalTableState.ensureLocalStateInitialized(LocalTableState.java:158)
> - locked <0x00000002c1c4bae8> (a
> org.apache.phoenix.hbase.index.covered.LocalTableState)
> at
> org.apache.phoenix.hbase.index.covered.LocalTableState.getIndexedColumnsTableState(LocalTableState.java:125)
> at
> org.apache.phoenix.index.PhoenixIndexCodec.getIndexUpdates(PhoenixIndexCodec.java:160)
> at
> org.apache.phoenix.index.PhoenixIndexCodec.getIndexDeletes(PhoenixIndexCodec.java:119)
> at
> org.apache.phoenix.hbase.index.covered.CoveredColumnsIndexBuilder.addDeleteUpdatesToMap(CoveredColumnsIndexBuilder.java:403)
> at
> org.apache.phoenix.hbase.index.covered.CoveredColumnsIndexBuilder.addCleanupForCurrentBatch(CoveredColumnsIndexBuilder.java:287)
> at
> org.apache.phoenix.hbase.index.covered.CoveredColumnsIndexBuilder.addMutationsForBatch(CoveredColumnsIndexBuilder.java:239)
> at
> org.apache.phoenix.hbase.index.covered.CoveredColumnsIndexBuilder.batchMutationAndAddUpdates(CoveredColumnsIndexBuilder.java:136)
> at
> org.apache.phoenix.hbase.index.covered.CoveredColumnsIndexBuilder.getIndexUpdate(CoveredColumnsIndexBuilder.java:99)
> at
> org.apache.phoenix.hbase.index.builder.IndexBuildManager$1.call(IndexBuildManager.java:133)
> at
> org.apache.phoenix.hbase.index.builder.IndexBuildManager$1.call(IndexBuildManager.java:129)
> at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:334)
> at java.util.concurrent.FutureTask.run(FutureTask.java:166)
> at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
> at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
> at java.lang.Thread.run(Thread.java:724)
--
This message was sent by Atlassian JIRA
(v6.3.4#6332)