[ 
https://issues.apache.org/jira/browse/HBASE-14018?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Dinh Duong Mai updated HBASE-14018:
-----------------------------------
    Description: 
+ Pseudo-distributed Hadoop, ZK_HBASE_MANAGE = true (1 master, 1 regionserver).
+ Put data to OpenTSDB, 1000 records / s, for 2000 seconds.
+ RegionServer is aborted.

=== RegionServer logs ===
2015-07-03 16:37:37,332 INFO  [LruBlockCacheStatsExecutor] hfile.LruBlockCache: 
totalSize=371.27 KB, freeSize=181.41 MB, max=181.78 MB, blockCount=5, 
accesses=1623, hits=172, hitRatio=10.60%, , cachingAccesses=177, 
cachingHits=151, cachingHitsRatio=85.31%, evictions=1139, evicted=21, 
evictedPerRun=0.018437225371599197
2015-07-03 16:37:37,898 INFO  [node1:16040Replication Statistics #0] 
regionserver.Replication: Normal source for cluster 1: Total replicated edits: 
2744, currently replicating from: 
hdfs://node1.vmcluster:9000/hbase/WALs/node1.vmcluster,16040,1435897652505/node1.vmcluster%2C16040%2C1435897652505.default.1435908458590
 at position: 19207814

2015-07-03 16:42:37,331 INFO  [LruBlockCacheStatsExecutor] hfile.LruBlockCache: 
totalSize=371.27 KB, freeSize=181.41 MB, max=181.78 MB, blockCount=5, 
accesses=1624, hits=173, hitRatio=10.65%, , cachingAccesses=178, 
cachingHits=152, cachingHitsRatio=85.39%, evictions=1169, evicted=21, 
evictedPerRun=0.01796407252550125
2015-07-03 16:42:37,899 INFO  [node1:16040Replication Statistics #0] 
regionserver.Replication: Normal source for cluster 1: Total replicated edits: 
3049, currently replicating from: 
hdfs://node1.vmcluster:9000/hbase/WALs/node1.vmcluster,16040,1435897652505/node1.vmcluster%2C16040%2C1435897652505.default.1435908458590
 at position: 33026416

2015-07-03 16:43:27,217 INFO  [MemStoreFlusher.1] regionserver.HRegion: Started 
memstore flush for tsdb,,1435897759785.2d49cd81fb6513f51af58bd0394c4e0d., 
current region memstore size 128.05 MB
2015-07-03 16:43:27,899 FATAL [MemStoreFlusher.1] regionserver.HRegionServer: 
ABORTING region server node1.vmcluster,16040,1435897652505: Replay of WAL 
required. Forcing server shutdown
org.apache.hadoop.hbase.DroppedSnapshotException: region: 
tsdb,,1435897759785.2d49cd81fb6513f51af58bd0394c4e0d.
        at 
org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2001)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:1772)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:1704)
        at 
org.apache.hadoop.hbase.regionserver.MemStoreFlusher.flushRegion(MemStoreFlusher.java:445)
        at 
org.apache.hadoop.hbase.regionserver.MemStoreFlusher.flushRegion(MemStoreFlusher.java:407)
        at 
org.apache.hadoop.hbase.regionserver.MemStoreFlusher.access$800(MemStoreFlusher.java:69)
        at 
org.apache.hadoop.hbase.regionserver.MemStoreFlusher$FlushHandler.run(MemStoreFlusher.java:225)
        at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.ArrayIndexOutOfBoundsException: -32743
        at 
org.apache.hadoop.hbase.CellComparator.getMinimumMidpointArray(CellComparator.java:478)
        at 
org.apache.hadoop.hbase.CellComparator.getMidpoint(CellComparator.java:448)
        at 
org.apache.hadoop.hbase.io.hfile.HFileWriterV2.finishBlock(HFileWriterV2.java:165)
        at 
org.apache.hadoop.hbase.io.hfile.HFileWriterV2.checkBlockBoundary(HFileWriterV2.java:146)
        at 
org.apache.hadoop.hbase.io.hfile.HFileWriterV2.append(HFileWriterV2.java:263)
        at 
org.apache.hadoop.hbase.io.hfile.HFileWriterV3.append(HFileWriterV3.java:87)
        at 
org.apache.hadoop.hbase.regionserver.StoreFile$Writer.append(StoreFile.java:932)
        at 
org.apache.hadoop.hbase.regionserver.StoreFlusher.performFlush(StoreFlusher.java:121)
        at 
org.apache.hadoop.hbase.regionserver.DefaultStoreFlusher.flushSnapshot(DefaultStoreFlusher.java:71)
        at 
org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:879)
        at 
org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:2128)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:1955)
        ... 7 more
2015-07-03 16:43:27,901 FATAL [MemStoreFlusher.1] regionserver.HRegionServer: 
RegionServer abort: loaded coprocessors are: 
[org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint]

=== HMaster logs ===
2015-07-03 13:29:20,671 INFO  [RegionOpenAndInitThread-tsdb-meta-1] 
regionserver.HRegion: creating HRegion tsdb-meta HTD == 'tsdb-meta', {NAME => 
'name', BLOOMFILTER => 'ROW', VERSIONS => '1', IN_MEMORY => 'false', 
KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', 
COMPRESSION => 'NONE', MIN_VERSIONS => '0', BLOCKCACHE => 'true', BLOCKSIZE => 
'65536', REPLICATION_SCOPE => '1'} RootDir = 
hdfs://node1.vmcluster:9000/hbase/.tmp Table name == tsdb-meta
2015-07-03 13:29:20,696 INFO  [RegionOpenAndInitThread-tsdb-meta-1] 
regionserver.HRegion: Closed 
tsdb-meta,,1435897760624.0738e3fac8ffe40d656dc91588a47aac.
2015-07-03 13:29:20,703 INFO  [MASTER_TABLE_OPERATIONS-node1:16020-0] 
hbase.MetaTableAccessor: Added 1
2015-07-03 13:29:20,704 INFO  [MASTER_TABLE_OPERATIONS-node1:16020-0] 
master.AssignmentManager: Assigning 1 region(s) to 
node1.vmcluster,16040,1435897652505
2015-07-03 13:29:20,717 INFO  [MASTER_TABLE_OPERATIONS-node1:16020-0] 
master.RegionStates: Transition {0738e3fac8ffe40d656dc91588a47aac 
state=OFFLINE, ts=1435897760704, server=null} to 
{0738e3fac8ffe40d656dc91588a47aac state=PENDING_OPEN, ts=1435897760717, 
server=node1.vmcluster,16040,1435897652505}
2015-07-03 13:29:20,729 WARN  [MASTER_TABLE_OPERATIONS-node1:16020-0] 
zookeeper.ZKTableStateManager: Moving table tsdb-meta state from ENABLING to 
ENABLED
2015-07-03 13:29:20,734 INFO  [AM.ZK.Worker-pool2-t33] master.RegionStates: 
Transition {0738e3fac8ffe40d656dc91588a47aac state=PENDING_OPEN, 
ts=1435897760717, server=node1.vmcluster,16040,1435897652505} to 
{0738e3fac8ffe40d656dc91588a47aac state=OPENING, ts=1435897760734, 
server=node1.vmcluster,16040,1435897652505}
2015-07-03 13:29:20,748 INFO  [MASTER_TABLE_OPERATIONS-node1:16020-0] 
handler.CreateTableHandler: failed. null
2015-07-03 13:29:20,772 INFO  [AM.ZK.Worker-pool2-t35] master.RegionStates: 
Transition {0738e3fac8ffe40d656dc91588a47aac state=OPENING, ts=1435897760734, 
server=node1.vmcluster,16040,1435897652505} to 
{0738e3fac8ffe40d656dc91588a47aac state=OPEN, ts=1435897760772, 
server=node1.vmcluster,16040,1435897652505}
2015-07-03 13:29:20,774 INFO  [AM.ZK.Worker-pool2-t35] master.RegionStates: 
Onlined 0738e3fac8ffe40d656dc91588a47aac on node1.vmcluster,16040,1435897652505
2015-07-03 16:43:27,970 ERROR 
[B.defaultRpcServer.handler=15,queue=0,port=16020] master.MasterRpcServices: 
Region server node1.vmcluster,16040,1435897652505 reported a fatal error:
ABORTING region server node1.vmcluster,16040,1435897652505: Replay of WAL 
required. Forcing server shutdown
Cause:
org.apache.hadoop.hbase.DroppedSnapshotException: region: 
tsdb,,1435897759785.2d49cd81fb6513f51af58bd0394c4e0d.
        at 
org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2001)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:1772)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:1704)
        at 
org.apache.hadoop.hbase.regionserver.MemStoreFlusher.flushRegion(MemStoreFlusher.java:445)
        at 
org.apache.hadoop.hbase.regionserver.MemStoreFlusher.flushRegion(MemStoreFlusher.java:407)
        at 
org.apache.hadoop.hbase.regionserver.MemStoreFlusher.access$800(MemStoreFlusher.java:69)
        at 
org.apache.hadoop.hbase.regionserver.MemStoreFlusher$FlushHandler.run(MemStoreFlusher.java:225)
        at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.ArrayIndexOutOfBoundsException: -32743
        at 
org.apache.hadoop.hbase.CellComparator.getMinimumMidpointArray(CellComparator.java:478)
        at 
org.apache.hadoop.hbase.CellComparator.getMidpoint(CellComparator.java:448)
        at 
org.apache.hadoop.hbase.io.hfile.HFileWriterV2.finishBlock(HFileWriterV2.java:165)
        at 
org.apache.hadoop.hbase.io.hfile.HFileWriterV2.checkBlockBoundary(HFileWriterV2.java:146)
        at 
org.apache.hadoop.hbase.io.hfile.HFileWriterV2.append(HFileWriterV2.java:263)
        at 
org.apache.hadoop.hbase.io.hfile.HFileWriterV3.append(HFileWriterV3.java:87)
        at 
org.apache.hadoop.hbase.regionserver.StoreFile$Writer.append(StoreFile.java:932)
        at 
org.apache.hadoop.hbase.regionserver.StoreFlusher.performFlush(StoreFlusher.java:121)
        at 
org.apache.hadoop.hbase.regionserver.DefaultStoreFlusher.flushSnapshot(DefaultStoreFlusher.java:71)
        at 
org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:879)
        at 
org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:2128)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:1955)
        ... 7 more

2015-07-03 16:43:32,595 INFO  [main-EventThread] zookeeper.RegionServerTracker: 
RegionServer ephemeral node deleted, processing expiration 
[node1.vmcluster,16040,1435897652505]
2015-07-03 16:43:32,611 INFO  [MASTER_META_SERVER_OPERATIONS-node1:16020-0] 
handler.MetaServerShutdownHandler: Splitting hbase:meta logs for 
node1.vmcluster,16040,1435897652505
2015-07-03 16:43:32,627 INFO  [MASTER_META_SERVER_OPERATIONS-node1:16020-0] 
master.SplitLogManager: dead splitlog workers 
[node1.vmcluster,16040,1435897652505]
2015-07-03 16:43:32,630 INFO  [MASTER_META_SERVER_OPERATIONS-node1:16020-0] 
master.SplitLogManager: started splitting 1 logs in 
[hdfs://node1.vmcluster:9000/hbase/WALs/node1.vmcluster,16040,1435897652505-splitting]
 for [node1.vmcluster,16040,1435897652505]

  was:
+ Pseudo-distributed Hadoop, ZK_HBASE_MANAGE = true (1 master, 1 regionserver).
+ Put data to OpenTSDB, 1000 records / s, for 2000 seconds.
+ RegionServer is aborted.

=== RegionServer logs ===
2015-07-03 16:37:37,332 INFO  [LruBlockCacheStatsExecutor] hfile.LruBlockCache: 
totalSize=371.27 KB, freeSize=181.41 MB, max=181.78 MB, blockCount=5, 
accesses=1623, hits=172, hitRatio=10.60%, , cachingAccesses=177, 
cachingHits=151, cachingHitsRatio=85.31%, evictions=1139, evicted=21, 
evictedPerRun=0.018437225371599197
2015-07-03 16:37:37,898 INFO  [node1:16040Replication Statistics #0] 
regionserver.Replication: Normal source for cluster 1: Total replicated edits: 
2744, currently replicating from: 
hdfs://node1.vmcluster:9000/hbase/WALs/node1.vmcluster,16040,1435897652505/node1.vmcluster%2C16040%2C1435897652505.default.1435908458590
 at position: 19207814

2015-07-03 16:42:37,331 INFO  [LruBlockCacheStatsExecutor] hfile.LruBlockCache: 
totalSize=371.27 KB, freeSize=181.41 MB, max=181.78 MB, blockCount=5, 
accesses=1624, hits=173, hitRatio=10.65%, , cachingAccesses=178, 
cachingHits=152, cachingHitsRatio=85.39%, evictions=1169, evicted=21, 
evictedPerRun=0.01796407252550125
2015-07-03 16:42:37,899 INFO  [node1:16040Replication Statistics #0] 
regionserver.Replication: Normal source for cluster 1: Total replicated edits: 
3049, currently replicating from: 
hdfs://node1.vmcluster:9000/hbase/WALs/node1.vmcluster,16040,1435897652505/node1.vmcluster%2C16040%2C1435897652505.default.1435908458590
 at position: 33026416

2015-07-03 16:43:27,217 INFO  [MemStoreFlusher.1] regionserver.HRegion: Started 
memstore flush for tsdb,,1435897759785.2d49cd81fb6513f51af58bd0394c4e0d., 
current region memstore size 128.05 MB
2015-07-03 16:43:27,899 FATAL [MemStoreFlusher.1] regionserver.HRegionServer: 
ABORTING region server node1.vmcluster,16040,1435897652505: Replay of WAL 
required. Forcing server shutdown
org.apache.hadoop.hbase.DroppedSnapshotException: region: 
tsdb,,1435897759785.2d49cd81fb6513f51af58bd0394c4e0d.
        at 
org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2001)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:1772)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:1704)
        at 
org.apache.hadoop.hbase.regionserver.MemStoreFlusher.flushRegion(MemStoreFlusher.java:445)
        at 
org.apache.hadoop.hbase.regionserver.MemStoreFlusher.flushRegion(MemStoreFlusher.java:407)
        at 
org.apache.hadoop.hbase.regionserver.MemStoreFlusher.access$800(MemStoreFlusher.java:69)
        at 
org.apache.hadoop.hbase.regionserver.MemStoreFlusher$FlushHandler.run(MemStoreFlusher.java:225)
        at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.ArrayIndexOutOfBoundsException: -32743
        at 
org.apache.hadoop.hbase.CellComparator.getMinimumMidpointArray(CellComparator.java:478)
        at 
org.apache.hadoop.hbase.CellComparator.getMidpoint(CellComparator.java:448)
        at 
org.apache.hadoop.hbase.io.hfile.HFileWriterV2.finishBlock(HFileWriterV2.java:165)
        at 
org.apache.hadoop.hbase.io.hfile.HFileWriterV2.checkBlockBoundary(HFileWriterV2.java:146)
        at 
org.apache.hadoop.hbase.io.hfile.HFileWriterV2.append(HFileWriterV2.java:263)
        at 
org.apache.hadoop.hbase.io.hfile.HFileWriterV3.append(HFileWriterV3.java:87)
        at 
org.apache.hadoop.hbase.regionserver.StoreFile$Writer.append(StoreFile.java:932)
        at 
org.apache.hadoop.hbase.regionserver.StoreFlusher.performFlush(StoreFlusher.java:121)
        at 
org.apache.hadoop.hbase.regionserver.DefaultStoreFlusher.flushSnapshot(DefaultStoreFlusher.java:71)
        at 
org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:879)
        at 
org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:2128)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:1955)
        ... 7 more
2015-07-03 16:43:27,901 FATAL [MemStoreFlusher.1] regionserver.HRegionServer: 
RegionServer abort: loaded coprocessors are: 
[org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint]
2015-07-03 16:43:27,942 INFO  [MemStoreFlusher.1] regionserver.HRegionServer: 
Dump of metrics as JSON on abort: {
  "beans" : [ {
    "name" : "java.lang:type=Memory",
    "modelerType" : "sun.management.MemoryImpl",
    "HeapMemoryUsage" : {
      "committed" : 170471424,
      "init" : 31457280,
      "max" : 476512256,
      "used" : 151515136
    },
    "Verbose" : false,
    "ObjectPendingFinalizationCount" : 0,
    "NonHeapMemoryUsage" : {
      "committed" : 72335360,
      "init" : 2555904,
      "max" : -1,
      "used" : 70646296
    },
    "ObjectName" : "java.lang:type=Memory"
  } ],
  "beans" : [ {
    "name" : "Hadoop:service=HBase,name=RegionServer,sub=IPC",
    "modelerType" : "RegionServer,sub=IPC",
    "tag.Context" : "regionserver",
    "tag.Hostname" : "node1.vmcluster",
    "queueSize" : 0,
    "numCallsInGeneralQueue" : 0,
    "numCallsInReplicationQueue" : 0,
    "numCallsInPriorityQueue" : 0,
    "numOpenConnections" : 2,
    "numActiveHandler" : 0,
    "receivedBytes" : 147185301,
    "authenticationSuccesses" : 0,
    "authorizationFailures" : 0,
    "ProcessCallTime_num_ops" : 3610,
    "ProcessCallTime_min" : 0,
    "ProcessCallTime_max" : 92,
    "ProcessCallTime_mean" : 12.251523545706371,
    "ProcessCallTime_median" : 13.0,
    "ProcessCallTime_75th_percentile" : 21.0,
    "ProcessCallTime_95th_percentile" : 30.0,
    "ProcessCallTime_99th_percentile" : 38.0,
    "authorizationSuccesses" : 45,
    "sentBytes" : 20484419,
    "QueueCallTime_num_ops" : 3610,
    "QueueCallTime_min" : 0,
    "QueueCallTime_max" : 17,
    "QueueCallTime_mean" : 0.32936288088642657,
    "QueueCallTime_median" : 0.0,
    "QueueCallTime_75th_percentile" : 1.0,
    "QueueCallTime_95th_percentile" : 1.0,
    "QueueCallTime_99th_percentile" : 2.0,
    "authenticationFailures" : 0
  } ],
  "beans" : [ {
    "name" : "Hadoop:service=HBase,name=RegionServer,sub=Replication",
    "modelerType" : "RegionServer,sub=Replication",
    "tag.Context" : "regionserver",
    "tag.Hostname" : "node1.vmcluster",
    "source.1.ageOfLastShippedOp" : 0,
    "source.1.logEditsRead" : 3123,
    "source.1.logReadInBytes" : 128637718,
    "source.shippedOps" : 2733185,
    "source.1.shippedKBs" : 303650,
    "source.ageOfLastShippedOp" : 0,
    "source.logEditsFiltered" : 23,
    "source.shippedKBs" : 303650,
    "source.logReadInBytes" : 128637718,
    "source.1.sizeOfLogQueue" : 0,
    "sink.appliedOps" : 0,
    "source.1.shippedOps" : 2733185,
    "source.sizeOfLogQueue" : 0,
    "source.1.logEditsFiltered" : 23,
    "source.shippedBatches" : 2006,
    "sink.ageOfLastAppliedOp" : 0,
    "source.1.shippedBatches" : 2006,
    "source.logEditsRead" : 3123,
    "sink.appliedBatches" : 0
  } ],
  "beans" : [ {
    "name" : "Hadoop:service=HBase,name=RegionServer,sub=Server",
    "modelerType" : "RegionServer,sub=Server",
    "tag.zookeeperQuorum" : "node1.vmcluster:2181",
    "tag.serverName" : "node1.vmcluster,16040,1435897652505",
    "tag.clusterId" : "af59ef2f-210a-4bbe-93f2-2a219e34907c",
    "tag.Context" : "regionserver",
    "tag.Hostname" : "node1.vmcluster",
    "regionCount" : 6,
    "storeCount" : 7,
    "hlogFileCount" : 2,
    "hlogFileSize" : 0,
    "storeFileCount" : 5,
    "memStoreSize" : 133737768,
    "storeFileSize" : 89254719,
    "regionServerStartTime" : 1435897652505,
    "totalRequestCount" : 2759417,
    "readRequestCount" : 777,
    "writeRequestCount" : 2755437,
    "checkMutateFailedCount" : 0,
    "checkMutatePassedCount" : 204,
    "storeFileIndexSize" : 105176,
    "staticIndexSize" : 57108,
    "staticBloomSize" : 388,
    "mutationsWithoutWALCount" : 0,
    "mutationsWithoutWALSize" : 0,
    "percentFilesLocal" : 100,
    "splitQueueLength" : 0,
    "compactionQueueLength" : 0,
    "flushQueueLength" : 0,
    "blockCacheFreeSize" : 190224736,
    "blockCacheCount" : 5,
    "blockCacheSize" : 380176,
    "blockCacheHitCount" : 174,
    "blockCacheMissCount" : 1451,
    "blockCacheEvictionCount" : 21,
    "blockCacheCountHitPercent" : 10.0,
    "blockCacheExpressHitPercent" : 85,
    "updatesBlockedTime" : 0,
    "flushedCellsCount" : 2020353,
    "compactedCellsCount" : 1980128,
    "majorCompactedCellsCount" : 0,
    "flushedCellsSize" : 355701760,
    "compactedCellsSize" : 83327014,
    "majorCompactedCellsSize" : 0,
    "blockedRequestCount" : 0,
    "Mutate_num_ops" : 2796,
    "Mutate_min" : 1,
    "Mutate_max" : 59,
    "Mutate_mean" : 15.11552217453505,
    "Mutate_median" : 14.0,
    "Mutate_75th_percentile" : 21.0,
    "Mutate_95th_percentile" : 30.0,
    "Mutate_99th_percentile" : 38.0,
    "slowAppendCount" : 0,
    "slowDeleteCount" : 0,
    "Increment_num_ops" : 102,
    "Increment_min" : 1,
    "Increment_max" : 13,
    "Increment_mean" : 1.892156862745098,
    "Increment_median" : 1.5,
    "Increment_75th_percentile" : 2.0,
    "Increment_95th_percentile" : 5.0,
    "Increment_99th_percentile" : 12.819999999999993,
    "Replay_num_ops" : 0,
    "Replay_min" : 0,
    "Replay_max" : 0,
    "Replay_mean" : 0.0,
    "Replay_median" : 0.0,
    "Replay_75th_percentile" : 0.0,
    "Replay_95th_percentile" : 0.0,
    "Replay_99th_percentile" : 0.0,
    "FlushTime_num_ops" : 5,
    "FlushTime_min" : 973,
    "FlushTime_max" : 20521,
    "FlushTime_mean" : 10675.4,
    "FlushTime_median" : 15223.0,
    "FlushTime_75th_percentile" : 18014.0,
    "FlushTime_95th_percentile" : 20521.0,
    "FlushTime_99th_percentile" : 20521.0,
    "Delete_num_ops" : 7,
    "Delete_min" : 1,
    "Delete_max" : 60,
    "Delete_mean" : 26.285714285714285,
    "Delete_median" : 5.0,
    "Delete_75th_percentile" : 59.0,
    "Delete_95th_percentile" : 60.0,
    "Delete_99th_percentile" : 60.0,
    "splitRequestCount" : 0,
    "splitSuccessCount" : 0,
    "slowGetCount" : 0,
    "Get_num_ops" : 108,
    "Get_min" : 0,
    "Get_max" : 17,
    "Get_mean" : 0.5833333333333334,
    "Get_median" : 0.0,
    "Get_75th_percentile" : 1.0,
    "Get_95th_percentile" : 2.6499999999999915,
    "Get_99th_percentile" : 16.009999999999962,
    "slowPutCount" : 0,
    "slowIncrementCount" : 0,
    "Append_num_ops" : 0,
    "Append_min" : 0,
    "Append_max" : 0,
    "Append_mean" : 0.0,
    "Append_median" : 0.0,
    "Append_75th_percentile" : 0.0,
    "Append_95th_percentile" : 0.0,
    "Append_99th_percentile" : 0.0,
    "SplitTime_num_ops" : 0,
    "SplitTime_min" : 0,
    "SplitTime_max" : 0,
    "SplitTime_mean" : 0.0,
    "SplitTime_median" : 0.0,
    "SplitTime_75th_percentile" : 0.0,
    "SplitTime_95th_percentile" : 0.0,
    "SplitTime_99th_percentile" : 0.0
  } ]
}
2015-07-03 16:43:27,974 INFO  [MemStoreFlusher.1] regionserver.HRegionServer: 
STOPPED: Replay of WAL required. Forcing server shutdown
2015-07-03 16:43:27,975 INFO  
[regionserver/node1.vmcluster/192.168.56.101:16040] 
regionserver.SplitLogWorker: Sending interrupt to stop the worker thread
2015-07-03 16:43:27,976 INFO  
[regionserver/node1.vmcluster/192.168.56.101:16040] regionserver.HRegionServer: 
Stopping infoServer
2015-07-03 16:43:27,976 INFO  [SplitLogWorker-node1:16040] 
regionserver.SplitLogWorker: SplitLogWorker interrupted. Exiting. 

=== HMaster logs ===
2015-07-03 13:29:20,671 INFO  [RegionOpenAndInitThread-tsdb-meta-1] 
regionserver.HRegion: creating HRegion tsdb-meta HTD == 'tsdb-meta', {NAME => 
'name', BLOOMFILTER => 'ROW', VERSIONS => '1', IN_MEMORY => 'false', 
KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', 
COMPRESSION => 'NONE', MIN_VERSIONS => '0', BLOCKCACHE => 'true', BLOCKSIZE => 
'65536', REPLICATION_SCOPE => '1'} RootDir = 
hdfs://node1.vmcluster:9000/hbase/.tmp Table name == tsdb-meta
2015-07-03 13:29:20,696 INFO  [RegionOpenAndInitThread-tsdb-meta-1] 
regionserver.HRegion: Closed 
tsdb-meta,,1435897760624.0738e3fac8ffe40d656dc91588a47aac.
2015-07-03 13:29:20,703 INFO  [MASTER_TABLE_OPERATIONS-node1:16020-0] 
hbase.MetaTableAccessor: Added 1
2015-07-03 13:29:20,704 INFO  [MASTER_TABLE_OPERATIONS-node1:16020-0] 
master.AssignmentManager: Assigning 1 region(s) to 
node1.vmcluster,16040,1435897652505
2015-07-03 13:29:20,717 INFO  [MASTER_TABLE_OPERATIONS-node1:16020-0] 
master.RegionStates: Transition {0738e3fac8ffe40d656dc91588a47aac 
state=OFFLINE, ts=1435897760704, server=null} to 
{0738e3fac8ffe40d656dc91588a47aac state=PENDING_OPEN, ts=1435897760717, 
server=node1.vmcluster,16040,1435897652505}
2015-07-03 13:29:20,729 WARN  [MASTER_TABLE_OPERATIONS-node1:16020-0] 
zookeeper.ZKTableStateManager: Moving table tsdb-meta state from ENABLING to 
ENABLED
2015-07-03 13:29:20,734 INFO  [AM.ZK.Worker-pool2-t33] master.RegionStates: 
Transition {0738e3fac8ffe40d656dc91588a47aac state=PENDING_OPEN, 
ts=1435897760717, server=node1.vmcluster,16040,1435897652505} to 
{0738e3fac8ffe40d656dc91588a47aac state=OPENING, ts=1435897760734, 
server=node1.vmcluster,16040,1435897652505}
2015-07-03 13:29:20,748 INFO  [MASTER_TABLE_OPERATIONS-node1:16020-0] 
handler.CreateTableHandler: failed. null
2015-07-03 13:29:20,772 INFO  [AM.ZK.Worker-pool2-t35] master.RegionStates: 
Transition {0738e3fac8ffe40d656dc91588a47aac state=OPENING, ts=1435897760734, 
server=node1.vmcluster,16040,1435897652505} to 
{0738e3fac8ffe40d656dc91588a47aac state=OPEN, ts=1435897760772, 
server=node1.vmcluster,16040,1435897652505}
2015-07-03 13:29:20,774 INFO  [AM.ZK.Worker-pool2-t35] master.RegionStates: 
Onlined 0738e3fac8ffe40d656dc91588a47aac on node1.vmcluster,16040,1435897652505
2015-07-03 16:43:27,970 ERROR 
[B.defaultRpcServer.handler=15,queue=0,port=16020] master.MasterRpcServices: 
Region server node1.vmcluster,16040,1435897652505 reported a fatal error:
ABORTING region server node1.vmcluster,16040,1435897652505: Replay of WAL 
required. Forcing server shutdown
Cause:
org.apache.hadoop.hbase.DroppedSnapshotException: region: 
tsdb,,1435897759785.2d49cd81fb6513f51af58bd0394c4e0d.
        at 
org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2001)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:1772)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:1704)
        at 
org.apache.hadoop.hbase.regionserver.MemStoreFlusher.flushRegion(MemStoreFlusher.java:445)
        at 
org.apache.hadoop.hbase.regionserver.MemStoreFlusher.flushRegion(MemStoreFlusher.java:407)
        at 
org.apache.hadoop.hbase.regionserver.MemStoreFlusher.access$800(MemStoreFlusher.java:69)
        at 
org.apache.hadoop.hbase.regionserver.MemStoreFlusher$FlushHandler.run(MemStoreFlusher.java:225)
        at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.ArrayIndexOutOfBoundsException: -32743
        at 
org.apache.hadoop.hbase.CellComparator.getMinimumMidpointArray(CellComparator.java:478)
        at 
org.apache.hadoop.hbase.CellComparator.getMidpoint(CellComparator.java:448)
        at 
org.apache.hadoop.hbase.io.hfile.HFileWriterV2.finishBlock(HFileWriterV2.java:165)
        at 
org.apache.hadoop.hbase.io.hfile.HFileWriterV2.checkBlockBoundary(HFileWriterV2.java:146)
        at 
org.apache.hadoop.hbase.io.hfile.HFileWriterV2.append(HFileWriterV2.java:263)
        at 
org.apache.hadoop.hbase.io.hfile.HFileWriterV3.append(HFileWriterV3.java:87)
        at 
org.apache.hadoop.hbase.regionserver.StoreFile$Writer.append(StoreFile.java:932)
        at 
org.apache.hadoop.hbase.regionserver.StoreFlusher.performFlush(StoreFlusher.java:121)
        at 
org.apache.hadoop.hbase.regionserver.DefaultStoreFlusher.flushSnapshot(DefaultStoreFlusher.java:71)
        at 
org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:879)
        at 
org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:2128)
        at 
org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:1955)
        ... 7 more

2015-07-03 16:43:32,595 INFO  [main-EventThread] zookeeper.RegionServerTracker: 
RegionServer ephemeral node deleted, processing expiration 
[node1.vmcluster,16040,1435897652505]
2015-07-03 16:43:32,611 INFO  [MASTER_META_SERVER_OPERATIONS-node1:16020-0] 
handler.MetaServerShutdownHandler: Splitting hbase:meta logs for 
node1.vmcluster,16040,1435897652505
2015-07-03 16:43:32,627 INFO  [MASTER_META_SERVER_OPERATIONS-node1:16020-0] 
master.SplitLogManager: dead splitlog workers 
[node1.vmcluster,16040,1435897652505]
2015-07-03 16:43:32,630 INFO  [MASTER_META_SERVER_OPERATIONS-node1:16020-0] 
master.SplitLogManager: started splitting 1 logs in 
[hdfs://node1.vmcluster:9000/hbase/WALs/node1.vmcluster,16040,1435897652505-splitting]
 for [node1.vmcluster,16040,1435897652505]
2015-07-03 16:43:33,102 INFO  
[node1.vmcluster,16020,1435897650549.splitLogManagerTimeoutMonitor] 
master.SplitLogManager: total tasks = 1 unassigned = 1 
tasks={/hbase/splitWAL/WALs%2Fnode1.vmcluster%2C16040%2C1435897652505-splitting%2Fnode1.vmcluster%252C16040%252C1435897652505..meta.1435908462449.meta=last_update
 = -1 last_version = -1 cur_worker_name = null status = in_progress incarnation 
= 0 resubmits = 0 batch = installed = 1 done = 0 error = 0}
2015-07-03 16:43:38,103 INFO  
[node1.vmcluster,16020,1435897650549.splitLogManagerTimeoutMonitor] 
master.SplitLogManager: total tasks = 1 unassigned = 1 
tasks={/hbase/splitWAL/WALs%2Fnode1.vmcluster%2C16040%2C1435897652505-splitting%2Fnode1.vmcluster%252C16040%252C1435897652505..meta.1435908462449.meta=last_update
 = -1 last_version = -1 cur_worker_name = null status = in_progress incarnation 
= 0 resubmits = 0 batch = installed = 1 done = 0 error = 0}
2015-07-03 16:43:43,105 INFO  
[node1.vmcluster,16020,1435897650549.splitLogManagerTimeoutMonitor] 
master.SplitLogManager: total tasks = 1 unassigned = 1 
tasks={/hbase/splitWAL/WALs%2Fnode1.vmcluster%2C16040%2C1435897652505-splitting%2Fnode1.vmcluster%252C16040%252C1435897652505..meta.1435908462449.meta=last_update
 = -1 last_version = -1 cur_worker_name = null status = in_progress incarnation 
= 0 resubmits = 0 batch = installed = 1 done = 0 error = 0}
2015-07-03 16:43:48,107 INFO  
[node1.vmcluster,16020,1435897650549.splitLogManagerTimeoutMonitor] 
master.SplitLogManager: total tasks = 1 unassigned = 1 
tasks={/hbase/splitWAL/WALs%2Fnode1.vmcluster%2C16040%2C1435897652505-splitting%2Fnode1.vmcluster%252C16040%252C1435897652505..meta.1435908462449.meta=last_update
 = -1 last_version = -1 cur_worker_name = null status = in_progress incarnation 
= 0 resubmits = 0 batch = installed = 1 done = 0 error = 0}
2015-07-03 16:43:53,108 INFO  
[node1.vmcluster,16020,1435897650549.splitLogManagerTimeoutMonitor] 
master.SplitLogManager: total tasks = 1 unassigned = 1 
tasks={/hbase/splitWAL/WALs%2Fnode1.vmcluster%2C16040%2C1435897652505-splitting%2Fnode1.vmcluster%252C16040%252C1435897652505..meta.1435908462449.meta=last_update
 = -1 last_version = -1 cur_worker_name = null status = in_progress incarnation 
= 0 resubmits = 0 batch = installed = 1 done = 0 error = 0}



> RegionServer is aborted when flushing memstore.
> -----------------------------------------------
>
>                 Key: HBASE-14018
>                 URL: https://issues.apache.org/jira/browse/HBASE-14018
>             Project: HBase
>          Issue Type: Bug
>    Affects Versions: 1.0.1.1
>         Environment: CentOS x64 Server
>            Reporter: Dinh Duong Mai
>
> + Pseudo-distributed Hadoop, ZK_HBASE_MANAGE = true (1 master, 1 
> regionserver).
> + Put data to OpenTSDB, 1000 records / s, for 2000 seconds.
> + RegionServer is aborted.
> === RegionServer logs ===
> 2015-07-03 16:37:37,332 INFO  [LruBlockCacheStatsExecutor] 
> hfile.LruBlockCache: totalSize=371.27 KB, freeSize=181.41 MB, max=181.78 MB, 
> blockCount=5, accesses=1623, hits=172, hitRatio=10.60%, , 
> cachingAccesses=177, cachingHits=151, cachingHitsRatio=85.31%, 
> evictions=1139, evicted=21, evictedPerRun=0.018437225371599197
> 2015-07-03 16:37:37,898 INFO  [node1:16040Replication Statistics #0] 
> regionserver.Replication: Normal source for cluster 1: Total replicated 
> edits: 2744, currently replicating from: 
> hdfs://node1.vmcluster:9000/hbase/WALs/node1.vmcluster,16040,1435897652505/node1.vmcluster%2C16040%2C1435897652505.default.1435908458590
>  at position: 19207814
> 2015-07-03 16:42:37,331 INFO  [LruBlockCacheStatsExecutor] 
> hfile.LruBlockCache: totalSize=371.27 KB, freeSize=181.41 MB, max=181.78 MB, 
> blockCount=5, accesses=1624, hits=173, hitRatio=10.65%, , 
> cachingAccesses=178, cachingHits=152, cachingHitsRatio=85.39%, 
> evictions=1169, evicted=21, evictedPerRun=0.01796407252550125
> 2015-07-03 16:42:37,899 INFO  [node1:16040Replication Statistics #0] 
> regionserver.Replication: Normal source for cluster 1: Total replicated 
> edits: 3049, currently replicating from: 
> hdfs://node1.vmcluster:9000/hbase/WALs/node1.vmcluster,16040,1435897652505/node1.vmcluster%2C16040%2C1435897652505.default.1435908458590
>  at position: 33026416
> 2015-07-03 16:43:27,217 INFO  [MemStoreFlusher.1] regionserver.HRegion: 
> Started memstore flush for 
> tsdb,,1435897759785.2d49cd81fb6513f51af58bd0394c4e0d., current region 
> memstore size 128.05 MB
> 2015-07-03 16:43:27,899 FATAL [MemStoreFlusher.1] regionserver.HRegionServer: 
> ABORTING region server node1.vmcluster,16040,1435897652505: Replay of WAL 
> required. Forcing server shutdown
> org.apache.hadoop.hbase.DroppedSnapshotException: region: 
> tsdb,,1435897759785.2d49cd81fb6513f51af58bd0394c4e0d.
>       at 
> org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2001)
>       at 
> org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:1772)
>       at 
> org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:1704)
>       at 
> org.apache.hadoop.hbase.regionserver.MemStoreFlusher.flushRegion(MemStoreFlusher.java:445)
>       at 
> org.apache.hadoop.hbase.regionserver.MemStoreFlusher.flushRegion(MemStoreFlusher.java:407)
>       at 
> org.apache.hadoop.hbase.regionserver.MemStoreFlusher.access$800(MemStoreFlusher.java:69)
>       at 
> org.apache.hadoop.hbase.regionserver.MemStoreFlusher$FlushHandler.run(MemStoreFlusher.java:225)
>       at java.lang.Thread.run(Thread.java:745)
> Caused by: java.lang.ArrayIndexOutOfBoundsException: -32743
>       at 
> org.apache.hadoop.hbase.CellComparator.getMinimumMidpointArray(CellComparator.java:478)
>       at 
> org.apache.hadoop.hbase.CellComparator.getMidpoint(CellComparator.java:448)
>       at 
> org.apache.hadoop.hbase.io.hfile.HFileWriterV2.finishBlock(HFileWriterV2.java:165)
>       at 
> org.apache.hadoop.hbase.io.hfile.HFileWriterV2.checkBlockBoundary(HFileWriterV2.java:146)
>       at 
> org.apache.hadoop.hbase.io.hfile.HFileWriterV2.append(HFileWriterV2.java:263)
>       at 
> org.apache.hadoop.hbase.io.hfile.HFileWriterV3.append(HFileWriterV3.java:87)
>       at 
> org.apache.hadoop.hbase.regionserver.StoreFile$Writer.append(StoreFile.java:932)
>       at 
> org.apache.hadoop.hbase.regionserver.StoreFlusher.performFlush(StoreFlusher.java:121)
>       at 
> org.apache.hadoop.hbase.regionserver.DefaultStoreFlusher.flushSnapshot(DefaultStoreFlusher.java:71)
>       at 
> org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:879)
>       at 
> org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:2128)
>       at 
> org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:1955)
>       ... 7 more
> 2015-07-03 16:43:27,901 FATAL [MemStoreFlusher.1] regionserver.HRegionServer: 
> RegionServer abort: loaded coprocessors are: 
> [org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint]
> === HMaster logs ===
> 2015-07-03 13:29:20,671 INFO  [RegionOpenAndInitThread-tsdb-meta-1] 
> regionserver.HRegion: creating HRegion tsdb-meta HTD == 'tsdb-meta', {NAME => 
> 'name', BLOOMFILTER => 'ROW', VERSIONS => '1', IN_MEMORY => 'false', 
> KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 
> 'FOREVER', COMPRESSION => 'NONE', MIN_VERSIONS => '0', BLOCKCACHE => 'true', 
> BLOCKSIZE => '65536', REPLICATION_SCOPE => '1'} RootDir = 
> hdfs://node1.vmcluster:9000/hbase/.tmp Table name == tsdb-meta
> 2015-07-03 13:29:20,696 INFO  [RegionOpenAndInitThread-tsdb-meta-1] 
> regionserver.HRegion: Closed 
> tsdb-meta,,1435897760624.0738e3fac8ffe40d656dc91588a47aac.
> 2015-07-03 13:29:20,703 INFO  [MASTER_TABLE_OPERATIONS-node1:16020-0] 
> hbase.MetaTableAccessor: Added 1
> 2015-07-03 13:29:20,704 INFO  [MASTER_TABLE_OPERATIONS-node1:16020-0] 
> master.AssignmentManager: Assigning 1 region(s) to 
> node1.vmcluster,16040,1435897652505
> 2015-07-03 13:29:20,717 INFO  [MASTER_TABLE_OPERATIONS-node1:16020-0] 
> master.RegionStates: Transition {0738e3fac8ffe40d656dc91588a47aac 
> state=OFFLINE, ts=1435897760704, server=null} to 
> {0738e3fac8ffe40d656dc91588a47aac state=PENDING_OPEN, ts=1435897760717, 
> server=node1.vmcluster,16040,1435897652505}
> 2015-07-03 13:29:20,729 WARN  [MASTER_TABLE_OPERATIONS-node1:16020-0] 
> zookeeper.ZKTableStateManager: Moving table tsdb-meta state from ENABLING to 
> ENABLED
> 2015-07-03 13:29:20,734 INFO  [AM.ZK.Worker-pool2-t33] master.RegionStates: 
> Transition {0738e3fac8ffe40d656dc91588a47aac state=PENDING_OPEN, 
> ts=1435897760717, server=node1.vmcluster,16040,1435897652505} to 
> {0738e3fac8ffe40d656dc91588a47aac state=OPENING, ts=1435897760734, 
> server=node1.vmcluster,16040,1435897652505}
> 2015-07-03 13:29:20,748 INFO  [MASTER_TABLE_OPERATIONS-node1:16020-0] 
> handler.CreateTableHandler: failed. null
> 2015-07-03 13:29:20,772 INFO  [AM.ZK.Worker-pool2-t35] master.RegionStates: 
> Transition {0738e3fac8ffe40d656dc91588a47aac state=OPENING, ts=1435897760734, 
> server=node1.vmcluster,16040,1435897652505} to 
> {0738e3fac8ffe40d656dc91588a47aac state=OPEN, ts=1435897760772, 
> server=node1.vmcluster,16040,1435897652505}
> 2015-07-03 13:29:20,774 INFO  [AM.ZK.Worker-pool2-t35] master.RegionStates: 
> Onlined 0738e3fac8ffe40d656dc91588a47aac on 
> node1.vmcluster,16040,1435897652505
> 2015-07-03 16:43:27,970 ERROR 
> [B.defaultRpcServer.handler=15,queue=0,port=16020] master.MasterRpcServices: 
> Region server node1.vmcluster,16040,1435897652505 reported a fatal error:
> ABORTING region server node1.vmcluster,16040,1435897652505: Replay of WAL 
> required. Forcing server shutdown
> Cause:
> org.apache.hadoop.hbase.DroppedSnapshotException: region: 
> tsdb,,1435897759785.2d49cd81fb6513f51af58bd0394c4e0d.
>       at 
> org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2001)
>       at 
> org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:1772)
>       at 
> org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:1704)
>       at 
> org.apache.hadoop.hbase.regionserver.MemStoreFlusher.flushRegion(MemStoreFlusher.java:445)
>       at 
> org.apache.hadoop.hbase.regionserver.MemStoreFlusher.flushRegion(MemStoreFlusher.java:407)
>       at 
> org.apache.hadoop.hbase.regionserver.MemStoreFlusher.access$800(MemStoreFlusher.java:69)
>       at 
> org.apache.hadoop.hbase.regionserver.MemStoreFlusher$FlushHandler.run(MemStoreFlusher.java:225)
>       at java.lang.Thread.run(Thread.java:745)
> Caused by: java.lang.ArrayIndexOutOfBoundsException: -32743
>       at 
> org.apache.hadoop.hbase.CellComparator.getMinimumMidpointArray(CellComparator.java:478)
>       at 
> org.apache.hadoop.hbase.CellComparator.getMidpoint(CellComparator.java:448)
>       at 
> org.apache.hadoop.hbase.io.hfile.HFileWriterV2.finishBlock(HFileWriterV2.java:165)
>       at 
> org.apache.hadoop.hbase.io.hfile.HFileWriterV2.checkBlockBoundary(HFileWriterV2.java:146)
>       at 
> org.apache.hadoop.hbase.io.hfile.HFileWriterV2.append(HFileWriterV2.java:263)
>       at 
> org.apache.hadoop.hbase.io.hfile.HFileWriterV3.append(HFileWriterV3.java:87)
>       at 
> org.apache.hadoop.hbase.regionserver.StoreFile$Writer.append(StoreFile.java:932)
>       at 
> org.apache.hadoop.hbase.regionserver.StoreFlusher.performFlush(StoreFlusher.java:121)
>       at 
> org.apache.hadoop.hbase.regionserver.DefaultStoreFlusher.flushSnapshot(DefaultStoreFlusher.java:71)
>       at 
> org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:879)
>       at 
> org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:2128)
>       at 
> org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:1955)
>       ... 7 more
> 2015-07-03 16:43:32,595 INFO  [main-EventThread] 
> zookeeper.RegionServerTracker: RegionServer ephemeral node deleted, 
> processing expiration [node1.vmcluster,16040,1435897652505]
> 2015-07-03 16:43:32,611 INFO  [MASTER_META_SERVER_OPERATIONS-node1:16020-0] 
> handler.MetaServerShutdownHandler: Splitting hbase:meta logs for 
> node1.vmcluster,16040,1435897652505
> 2015-07-03 16:43:32,627 INFO  [MASTER_META_SERVER_OPERATIONS-node1:16020-0] 
> master.SplitLogManager: dead splitlog workers 
> [node1.vmcluster,16040,1435897652505]
> 2015-07-03 16:43:32,630 INFO  [MASTER_META_SERVER_OPERATIONS-node1:16020-0] 
> master.SplitLogManager: started splitting 1 logs in 
> [hdfs://node1.vmcluster:9000/hbase/WALs/node1.vmcluster,16040,1435897652505-splitting]
>  for [node1.vmcluster,16040,1435897652505]



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

Reply via email to