hbase git commit: HBASE-16954 Unify HTable#checkAndDelete with AP (ChiaPing Tsai)

2016-10-30 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master ba6d95232 -> 6ce05d44e


HBASE-16954 Unify HTable#checkAndDelete with AP (ChiaPing Tsai)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6ce05d44
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6ce05d44
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6ce05d44

Branch: refs/heads/master
Commit: 6ce05d44e5f6c78df1eae79d682b49ae7d410a89
Parents: ba6d952
Author: daoye.ch 
Authored: Mon Oct 31 09:15:29 2016 +0800
Committer: daoye.ch 
Committed: Mon Oct 31 09:15:29 2016 +0800

--
 .../org/apache/hadoop/hbase/client/HTable.java  | 24 
 .../org/apache/hadoop/hbase/SplitLogTask.java   |  2 +-
 2 files changed, 5 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6ce05d44/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index 8d024dd..b2c012d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -529,8 +529,7 @@ public class HTable implements Table {
 return ResponseConverter.getResult(request, response, 
getRpcControllerCellScanner());
   }
 };
-List rows = new ArrayList();
-rows.add(delete);
+List rows = Collections.singletonList(delete);
 AsyncRequestFuture ars = multiAp.submitAll(pool, tableName, rows,
 null, null, callable, writeRpcTimeout);
 ars.waitUntilDone();
@@ -762,21 +761,8 @@ public class HTable implements Table {
*/
   @Override
   public boolean checkAndDelete(final byte [] row, final byte [] family, final 
byte [] qualifier,
-  final byte [] value, final Delete delete)
-  throws IOException {
-ClientServiceCallable callable = new 
ClientServiceCallable(this.connection, getName(), row,
-this.rpcControllerFactory.newController()) {
-  @Override
-  protected Boolean rpcCall() throws Exception {
-MutateRequest request = RequestConverter.buildMutateRequest(
-  getLocation().getRegionInfo().getRegionName(), row, family, 
qualifier,
-  new BinaryComparator(value), CompareType.EQUAL, delete);
-MutateResponse response = doMutate(request);
-return Boolean.valueOf(response.getProcessed());
-  }
-};
-return rpcCallerFactory. newCaller(this.writeRpcTimeout).
-callWithRetries(callable, this.operationTimeout);
+  final byte [] value, final Delete delete) throws IOException {
+return checkAndDelete(row, family, qualifier, CompareOp.EQUAL, value, 
delete);
   }
 
   /**
@@ -801,9 +787,7 @@ public class HTable implements Table {
 return ResponseConverter.getResult(request, response, 
getRpcControllerCellScanner());
   }
 };
-List rows = new ArrayList();
-rows.add(delete);
-
+List rows = Collections.singletonList(delete);
 Object[] results = new Object[1];
 AsyncRequestFuture ars = multiAp.submitAll(pool, tableName, rows,
 null, results, callable, -1);

http://git-wip-us.apache.org/repos/asf/hbase/blob/6ce05d44/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java
index 986e5bf..66493e1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java
@@ -153,7 +153,7 @@ public class SplitLogTask {
   /**
* @param data Serialized date to parse.
* @return An SplitLogTaskState instance made of the passed data
-   * @throws DeserializationException 
+   * @throws DeserializationException
* @see #toByteArray()
*/
   public static SplitLogTask parseFrom(final byte [] data) throws 
DeserializationException {



[2/3] hbase git commit: HBASE-16653 Backport HBASE-11393 to branches which support namespace

2016-10-17 Thread chenheng
http://git-wip-us.apache.org/repos/asf/hbase/blob/66941910/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
index 09479c4..955995f 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
@@ -5032,6 +5032,719 @@ public final class ZooKeeperProtos {
 // @@protoc_insertion_point(class_scope:hbase.pb.Table)
   }
 
+  public interface TableCFOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// optional .hbase.pb.TableName table_name = 1;
+/**
+ * optional .hbase.pb.TableName table_name = 1;
+ */
+boolean hasTableName();
+/**
+ * optional .hbase.pb.TableName table_name = 1;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
getTableName();
+/**
+ * optional .hbase.pb.TableName table_name = 1;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder 
getTableNameOrBuilder();
+
+// repeated bytes families = 2;
+/**
+ * repeated bytes families = 2;
+ */
+java.util.List getFamiliesList();
+/**
+ * repeated bytes families = 2;
+ */
+int getFamiliesCount();
+/**
+ * repeated bytes families = 2;
+ */
+com.google.protobuf.ByteString getFamilies(int index);
+  }
+  /**
+   * Protobuf type {@code hbase.pb.TableCF}
+   */
+  public static final class TableCF extends
+  com.google.protobuf.GeneratedMessage
+  implements TableCFOrBuilder {
+// Use TableCF.newBuilder() to construct.
+private TableCF(com.google.protobuf.GeneratedMessage.Builder builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private TableCF(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final TableCF defaultInstance;
+public static TableCF getDefaultInstance() {
+  return defaultInstance;
+}
+
+public TableCF getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private TableCF(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 10: {
+  
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder 
subBuilder = null;
+  if (((bitField0_ & 0x0001) == 0x0001)) {
+subBuilder = tableName_.toBuilder();
+  }
+  tableName_ = 
input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER,
 extensionRegistry);
+  if (subBuilder != null) {
+subBuilder.mergeFrom(tableName_);
+tableName_ = subBuilder.buildPartial();
+  }
+  bitField0_ |= 0x0001;
+  break;
+}
+case 18: {
+  if (!((mutable_bitField0_ & 0x0002) == 0x0002)) {
+families_ = new 
java.util.ArrayList();
+mutable_bitField0_ |= 0x0002;
+  }
+  families_.add(input.readBytes());
+  break;
+}
+  }
+}
+  } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new com.google.protobuf.InvalidProtocolBufferException(
+e.getMessage()).setUnfinishedMessage(this);
+  } finally {
+if (((mutable_bitField0_ & 0x0002) == 0x0002)) {
+  families_ = java.util.Collections.unmodifiableList(families_);
+}
+this.unknownFields = 

[3/3] hbase git commit: HBASE-16653 Backport HBASE-11393 to branches which support namespace

2016-10-17 Thread chenheng
HBASE-16653 Backport HBASE-11393 to branches which support namespace

Signed-off-by: chenheng <chenh...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/66941910
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/66941910
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/66941910

Branch: refs/heads/branch-1
Commit: 66941910bd07462fe496c5bbb591f4071f77b8fb
Parents: 6df7554
Author: Guanghao Zhang <zghao...@gmail.com>
Authored: Mon Sep 26 19:33:43 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Tue Oct 18 09:12:47 2016 +0800

--
 .../client/replication/ReplicationAdmin.java|   84 +-
 .../replication/ReplicationPeerConfig.java  |   16 +-
 .../replication/ReplicationPeerZKImpl.java  |   80 +-
 .../hbase/replication/ReplicationPeers.java |   15 +-
 .../replication/ReplicationPeersZKImpl.java |   60 +-
 .../replication/ReplicationSerDeHelper.java |  189 +++
 .../replication/ReplicationStateZKBase.java |   17 +
 .../protobuf/generated/ZooKeeperProtos.java | 1155 +-
 .../src/main/protobuf/ZooKeeper.proto   |8 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |8 +
 .../replication/master/TableCFsUpdater.java |  120 ++
 .../hbase/client/TestReplicaWithCluster.java|5 +-
 .../replication/TestReplicationAdmin.java   |  193 +--
 .../cleaner/TestReplicationHFileCleaner.java|2 +-
 .../replication/TestMasterReplication.java  |9 +-
 .../replication/TestMultiSlaveReplication.java  |8 +-
 .../replication/TestPerTableCFReplication.java  |  153 ++-
 .../hbase/replication/TestReplicationBase.java  |4 +-
 .../replication/TestReplicationSmallTests.java  |4 +-
 .../replication/TestReplicationStateBasic.java  |   20 +-
 .../replication/TestReplicationSyncUpTool.java  |4 +-
 .../TestReplicationTrackerZKImpl.java   |   10 +-
 .../replication/TestReplicationWithTags.java|4 +-
 .../replication/master/TestTableCFsUpdater.java |  164 +++
 .../TestReplicationSourceManager.java   |2 +-
 ...sibilityLabelReplicationWithExpAsString.java |5 +-
 .../TestVisibilityLabelsReplication.java|5 +-
 .../apache/hadoop/hbase/util/TestHBaseFsck.java |5 +-
 .../src/main/ruby/hbase/replication_admin.rb|   49 +-
 .../src/main/ruby/shell/commands/add_peer.rb|4 +-
 .../ruby/shell/commands/append_peer_tableCFs.rb |2 +-
 .../src/main/ruby/shell/commands/list_peers.rb  |6 +-
 .../ruby/shell/commands/remove_peer_tableCFs.rb |4 +-
 .../ruby/shell/commands/set_peer_tableCFs.rb|4 +-
 .../hbase/client/TestReplicationShell.java  |2 +-
 .../test/ruby/hbase/replication_admin_test.rb   |  118 +-
 36 files changed, 2167 insertions(+), 371 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/66941910/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
index 1304396..9fca28b 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
@@ -55,6 +55,7 @@ import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerZKImpl;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
+import org.apache.hadoop.hbase.replication.ReplicationSerDeHelper;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 
@@ -184,8 +185,8 @@ public class ReplicationAdmin implements Closeable {
   @Deprecated
   public void addPeer(String id, String clusterKey, String tableCFs)
 throws ReplicationException {
-this.replicationPeers.addPeer(id,
-  new ReplicationPeerConfig().setClusterKey(clusterKey), tableCFs);
+this.addPeer(id, new ReplicationPeerConfig().setClusterKey(clusterKey),
+  parseTableCFsFromConfig(tableCFs));
   }
 
   /**
@@ -199,7 +200,19 @@ public class ReplicationAdmin implements Closeable {
*/
   public void addPeer(String id, ReplicationPeerConfig peerConfig,
   Map<TableName, ? extends Collection> tableCfs) throws 
ReplicationException {
-this.replicationPeers.addPeer(id, peerConfig, getTableCfsStr(tableCfs));
+if (tableCfs != null) {
+  peerConfig.setTableCFsMap(tableCfs);
+}
+this.replicationPeers.addPeer(id, peerConfig);

[1/3] hbase git commit: HBASE-16653 Backport HBASE-11393 to branches which support namespace

2016-10-17 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1 6df7554d2 -> 66941910b


http://git-wip-us.apache.org/repos/asf/hbase/blob/66941910/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java
index 2c9fc0f..dd15e4c 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java
@@ -22,10 +22,14 @@ package org.apache.hadoop.hbase.replication;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
@@ -37,6 +41,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
@@ -183,13 +188,13 @@ public class TestPerTableCFReplication {
 Map tabCFsMap = null;
 
 // 1. null or empty string, result should be null
-tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig(null);
+tabCFsMap = ReplicationSerDeHelper.parseTableCFsFromConfig(null);
 assertEquals(null, tabCFsMap);
 
-tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig("");
+tabCFsMap = ReplicationSerDeHelper.parseTableCFsFromConfig("");
 assertEquals(null, tabCFsMap);
 
-tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig("   ");
+tabCFsMap = ReplicationSerDeHelper.parseTableCFsFromConfig("   ");
 assertEquals(null, tabCFsMap);
 
 TableName tab1 = TableName.valueOf("tab1");
@@ -197,20 +202,20 @@ public class TestPerTableCFReplication {
 TableName tab3 = TableName.valueOf("tab3");
 
 // 2. single table: "tab1" / "tab2:cf1" / "tab3:cf1,cf3"
-tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig("tab1");
+tabCFsMap = ReplicationSerDeHelper.parseTableCFsFromConfig("tab1");
 assertEquals(1, tabCFsMap.size()); // only one table
 assertTrue(tabCFsMap.containsKey(tab1));   // its table name is "tab1"
 assertFalse(tabCFsMap.containsKey(tab2));  // not other table
 assertEquals(null, tabCFsMap.get(tab1));   // null cf-list,
 
-tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig("tab2:cf1");
+tabCFsMap = ReplicationSerDeHelper.parseTableCFsFromConfig("tab2:cf1");
 assertEquals(1, tabCFsMap.size()); // only one table
 assertTrue(tabCFsMap.containsKey(tab2));   // its table name is "tab2"
 assertFalse(tabCFsMap.containsKey(tab1));  // not other table
 assertEquals(1, tabCFsMap.get(tab2).size());   // cf-list contains only 1 
cf
 assertEquals("cf1", tabCFsMap.get(tab2).get(0));// the only cf is "cf1"
 
-tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig("tab3 : cf1 , cf3");
+tabCFsMap = ReplicationSerDeHelper.parseTableCFsFromConfig("tab3 : cf1 , 
cf3");
 assertEquals(1, tabCFsMap.size()); // only one table
 assertTrue(tabCFsMap.containsKey(tab3));   // its table name is "tab2"
 assertFalse(tabCFsMap.containsKey(tab1));  // not other table
@@ -219,7 +224,7 @@ public class TestPerTableCFReplication {
 assertTrue(tabCFsMap.get(tab3).contains("cf3"));// contains "cf3"
 
 // 3. multiple tables: "tab1 ; tab2:cf1 ; tab3:cf1,cf3"
-tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig("tab1 ; tab2:cf1 ; 
tab3:cf1,cf3");
+tabCFsMap = ReplicationSerDeHelper.parseTableCFsFromConfig("tab1 ; 
tab2:cf1 ; tab3:cf1,cf3");
 // 3.1 contains 3 tables : "tab1", "tab2" and "tab3"
 assertEquals(3, tabCFsMap.size());
 assertTrue(tabCFsMap.containsKey(tab1));
@@ -237,7 +242,7 @@ public class TestPerTableCFReplication {
 
 // 4. contiguous or additional ";"(table delimiter) or ","(cf delimiter) 
can be tolerated
 // still use the example of multiple tables: "tab1 ; tab2:cf1 ; 
tab3:cf1,cf3"
-tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig(
+tabCFsMap = ReplicationSerDeHelper.parseTableCFsFromConfig(
   "tab1 ; ; tab2:cf1 ; tab3:cf1,,cf3 ;");
 // 4.1 contains 3 tables : "tab1", "tab2" and "tab3"
 assertEquals(3, tabCFsMap.size());
@@ -256,7 +261,7 @@ public class TestPerTableCFReplication {
 
 

hbase git commit: HBASE-16807, RegionServer will fail to report new active Hmaster until HMaster/RegionServer failover.

2016-10-13 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 211559e6d -> bcc74e5ee


HBASE-16807, RegionServer will fail to report new active Hmaster until 
HMaster/RegionServer failover.

Signed-off-by: chenheng <chenh...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bcc74e5e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bcc74e5e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bcc74e5e

Branch: refs/heads/branch-1.2
Commit: bcc74e5ee5fa113d1815077f276ed61cb704b99b
Parents: 211559e
Author: Pankaj Kumar <pankaj...@huawei.com>
Authored: Thu Oct 13 22:26:42 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Oct 13 22:46:22 2016 +0800

--
 .../hadoop/hbase/regionserver/HRegionServer.java  | 18 +++---
 .../TestRegionServerReportForDuty.java|  4 ++--
 2 files changed, 17 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bcc74e5e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index f3824ea..386cd24 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1159,7 +1159,7 @@ public class HRegionServer extends HasThread implements
   }
   // Couldn't connect to the master, get location from zk and reconnect
   // Method blocks until new master is found or we are stopped
-  createRegionServerStatusStub();
+  createRegionServerStatusStub(true);
 }
   }
 
@@ -2185,12 +2185,24 @@ public class HRegionServer extends HasThread implements
*/
   @VisibleForTesting
   protected synchronized ServerName createRegionServerStatusStub() {
+// Create RS stub without refreshing the master node from ZK, use cached 
data
+return createRegionServerStatusStub(false);
+  }
+
+  /**
+   * Get the current master from ZooKeeper and open the RPC connection to it. 
To get a fresh
+   * connection, the current rssStub must be null. Method will block until a 
master is available.
+   * You can break from this block by requesting the server stop.
+   * @param refresh If true then master address will be read from ZK, 
otherwise use cached data
+   * @return master + port, or null if server has been stopped
+   */
+  @VisibleForTesting
+  protected synchronized ServerName createRegionServerStatusStub(boolean 
refresh) {
 if (rssStub != null) {
   return masterAddressTracker.getMasterAddress();
 }
 ServerName sn = null;
 long previousLogTime = 0;
-boolean refresh = false; // for the first time, use cached data
 RegionServerStatusService.BlockingInterface intf = null;
 boolean interrupted = false;
 try {
@@ -2265,7 +2277,7 @@ public class HRegionServer extends HasThread implements
* @throws IOException
*/
   private RegionServerStartupResponse reportForDuty() throws IOException {
-ServerName masterServerName = createRegionServerStatusStub();
+ServerName masterServerName = createRegionServerStatusStub(true);
 if (masterServerName == null) return null;
 RegionServerStartupResponse result = null;
 try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/bcc74e5e/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
index 5778c6c..77cb5b7 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
@@ -166,8 +166,8 @@ public class TestRegionServerReportForDuty {
 }
 
 @Override
-protected synchronized ServerName createRegionServerStatusStub() {
-  sn = super.createRegionServerStatusStub();
+protected synchronized ServerName createRegionServerStatusStub(boolean 
refresh) {
+  sn = super.createRegionServerStatusStub(refresh);
   rpcStubCreatedFlag = true;
 
   // Wait for master switch over. Only do this for the second region 
server.



hbase git commit: HBASE-16807, RegionServer will fail to report new active Hmaster until HMaster/RegionServer failover.

2016-10-13 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/0.98 95a70c924 -> 09b89eedb


HBASE-16807, RegionServer will fail to report new active Hmaster until 
HMaster/RegionServer failover.

Signed-off-by: chenheng <chenh...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/09b89eed
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/09b89eed
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/09b89eed

Branch: refs/heads/0.98
Commit: 09b89eedb585517d734bf3af3875e0e1ff5359e0
Parents: 95a70c9
Author: Pankaj Kumar <pankaj...@huawei.com>
Authored: Thu Oct 13 20:24:55 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Oct 13 22:20:43 2016 +0800

--
 .../hbase/regionserver/HRegionServer.java   | 21 +++-
 .../TestRegionServerReportForDuty.java  |  4 ++--
 2 files changed, 18 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/09b89eed/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 1d8116f..37cbdc4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1132,7 +1132,7 @@ public class HRegionServer implements 
ClientProtos.ClientService.BlockingInterfa
   }
   // Couldn't connect to the master, get location from zk and reconnect
   // Method blocks until new master is found or we are stopped
-  createRegionServerStatusStub();
+  createRegionServerStatusStub(true);
 }
   }
 
@@ -2108,15 +2108,26 @@ public class HRegionServer implements 
ClientProtos.ClientService.BlockingInterfa
* @return master + port, or null if server has been stopped
*/
   @VisibleForTesting
-  protected synchronized ServerName
-  createRegionServerStatusStub() {
+  protected synchronized ServerName createRegionServerStatusStub() {
+// Create RS stub without refreshing the master node from ZK, use cached 
data
+return createRegionServerStatusStub(false);
+  }
+
+  /**
+   * Get the current master from ZooKeeper and open the RPC connection to it. 
To get a fresh
+   * connection, the current rssStub must be null. Method will block until a 
master is available.
+   * You can break from this block by requesting the server stop.
+   * @param refresh If true then master address will be read from ZK, 
otherwise use cached data
+   * @return master + port, or null if server has been stopped
+   */
+  @VisibleForTesting
+  protected synchronized ServerName createRegionServerStatusStub(boolean 
refresh) {
 if (rssStub != null) {
   return masterAddressTracker.getMasterAddress();
 }
 ServerName sn = null;
 long previousLogTime = 0;
 RegionServerStatusService.BlockingInterface master = null;
-boolean refresh = false; // for the first time, use cached data
 RegionServerStatusService.BlockingInterface intf = null;
 while (keepLooping() && master == null) {
   sn = this.masterAddressTracker.getMasterAddress(refresh);
@@ -2179,7 +2190,7 @@ public class HRegionServer implements 
ClientProtos.ClientService.BlockingInterfa
* @throws IOException
*/
   private RegionServerStartupResponse reportForDuty() throws IOException {
-ServerName masterServerName = createRegionServerStatusStub();
+ServerName masterServerName = createRegionServerStatusStub(true);
 if (masterServerName == null) return null;
 RegionServerStartupResponse result = null;
 try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/09b89eed/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
index 80acb3f..3a5879f 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
@@ -165,8 +165,8 @@ public class TestRegionServerReportForDuty {
 }
 
 @Override
-protected synchronized ServerName createRegionServerStatusStub() {
-  sn = super.createRegionServerStatusStub();
+protected synchronized ServerName createRegionServerStatusStub(

hbase git commit: HBASE-16807, RegionServer will fail to report new active Hmaster until HMaster/RegionServer failover.

2016-10-13 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.1 edac95201 -> ba6e7ddd3


HBASE-16807, RegionServer will fail to report new active Hmaster until 
HMaster/RegionServer failover.

Signed-off-by: chenheng <chenh...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ba6e7ddd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ba6e7ddd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ba6e7ddd

Branch: refs/heads/branch-1.1
Commit: ba6e7ddd31b3fe6b965cb04a8033185382459b7c
Parents: edac952
Author: Pankaj Kumar <pankaj...@huawei.com>
Authored: Thu Oct 13 22:21:03 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Oct 13 22:42:25 2016 +0800

--
 .../hadoop/hbase/regionserver/HRegionServer.java  | 18 +++---
 .../TestRegionServerReportForDuty.java|  4 ++--
 2 files changed, 17 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ba6e7ddd/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 73a7c4e..e642679 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1149,7 +1149,7 @@ public class HRegionServer extends HasThread implements
   }
   // Couldn't connect to the master, get location from zk and reconnect
   // Method blocks until new master is found or we are stopped
-  createRegionServerStatusStub();
+  createRegionServerStatusStub(true);
 }
   }
 
@@ -2172,12 +2172,24 @@ public class HRegionServer extends HasThread implements
*/
   @VisibleForTesting
   protected synchronized ServerName createRegionServerStatusStub() {
+// Create RS stub without refreshing the master node from ZK, use cached 
data
+return createRegionServerStatusStub(false);
+  }
+
+  /**
+   * Get the current master from ZooKeeper and open the RPC connection to it. 
To get a fresh
+   * connection, the current rssStub must be null. Method will block until a 
master is available.
+   * You can break from this block by requesting the server stop.
+   * @param refresh If true then master address will be read from ZK, 
otherwise use cached data
+   * @return master + port, or null if server has been stopped
+   */
+  @VisibleForTesting
+  protected synchronized ServerName createRegionServerStatusStub(boolean 
refresh) {
 if (rssStub != null) {
   return masterAddressTracker.getMasterAddress();
 }
 ServerName sn = null;
 long previousLogTime = 0;
-boolean refresh = false; // for the first time, use cached data
 RegionServerStatusService.BlockingInterface intf = null;
 boolean interrupted = false;
 try {
@@ -2252,7 +2264,7 @@ public class HRegionServer extends HasThread implements
* @throws IOException
*/
   private RegionServerStartupResponse reportForDuty() throws IOException {
-ServerName masterServerName = createRegionServerStatusStub();
+ServerName masterServerName = createRegionServerStatusStub(true);
 if (masterServerName == null) return null;
 RegionServerStartupResponse result = null;
 try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/ba6e7ddd/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
index 5778c6c..77cb5b7 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
@@ -166,8 +166,8 @@ public class TestRegionServerReportForDuty {
 }
 
 @Override
-protected synchronized ServerName createRegionServerStatusStub() {
-  sn = super.createRegionServerStatusStub();
+protected synchronized ServerName createRegionServerStatusStub(boolean 
refresh) {
+  sn = super.createRegionServerStatusStub(refresh);
   rpcStubCreatedFlag = true;
 
   // Wait for master switch over. Only do this for the second region 
server.



hbase git commit: HBASE-16807, RegionServer will fail to report new active Hmaster until HMaster/RegionServer failover.

2016-10-13 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1 8f9fadf02 -> 27398ea52


HBASE-16807, RegionServer will fail to report new active Hmaster until 
HMaster/RegionServer failover.

Signed-off-by: chenheng <chenh...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/27398ea5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/27398ea5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/27398ea5

Branch: refs/heads/branch-1
Commit: 27398ea525be88f9d8089071e0d7f212f33c2ca8
Parents: 8f9fadf
Author: Pankaj Kumar <pankaj...@huawei.com>
Authored: Thu Oct 13 20:19:07 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Oct 13 22:17:28 2016 +0800

--
 .../hadoop/hbase/regionserver/HRegionServer.java  | 18 +++---
 .../TestRegionServerReportForDuty.java|  4 ++--
 2 files changed, 17 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/27398ea5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 03dcd4e..f3eb27f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1196,7 +1196,7 @@ public class HRegionServer extends HasThread implements
   }
   // Couldn't connect to the master, get location from zk and reconnect
   // Method blocks until new master is found or we are stopped
-  createRegionServerStatusStub();
+  createRegionServerStatusStub(true);
 }
   }
 
@@ -2235,12 +2235,24 @@ public class HRegionServer extends HasThread implements
*/
   @VisibleForTesting
   protected synchronized ServerName createRegionServerStatusStub() {
+// Create RS stub without refreshing the master node from ZK, use cached 
data
+return createRegionServerStatusStub(false);
+  }
+
+  /**
+   * Get the current master from ZooKeeper and open the RPC connection to it. 
To get a fresh
+   * connection, the current rssStub must be null. Method will block until a 
master is available.
+   * You can break from this block by requesting the server stop.
+   * @param refresh If true then master address will be read from ZK, 
otherwise use cached data
+   * @return master + port, or null if server has been stopped
+   */
+  @VisibleForTesting
+  protected synchronized ServerName createRegionServerStatusStub(boolean 
refresh) {
 if (rssStub != null) {
   return masterAddressTracker.getMasterAddress();
 }
 ServerName sn = null;
 long previousLogTime = 0;
-boolean refresh = false; // for the first time, use cached data
 RegionServerStatusService.BlockingInterface intf = null;
 boolean interrupted = false;
 try {
@@ -2315,7 +2327,7 @@ public class HRegionServer extends HasThread implements
* @throws IOException
*/
   private RegionServerStartupResponse reportForDuty() throws IOException {
-ServerName masterServerName = createRegionServerStatusStub();
+ServerName masterServerName = createRegionServerStatusStub(true);
 if (masterServerName == null) return null;
 RegionServerStartupResponse result = null;
 try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/27398ea5/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
index 5778c6c..77cb5b7 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
@@ -166,8 +166,8 @@ public class TestRegionServerReportForDuty {
 }
 
 @Override
-protected synchronized ServerName createRegionServerStatusStub() {
-  sn = super.createRegionServerStatusStub();
+protected synchronized ServerName createRegionServerStatusStub(boolean 
refresh) {
+  sn = super.createRegionServerStatusStub(refresh);
   rpcStubCreatedFlag = true;
 
   // Wait for master switch over. Only do this for the second region 
server.



hbase git commit: HBASE-16807 RegionServer will fail to report new active Hmaster until HMaster/RegionServer failover.

2016-10-13 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 d316bf7c4 -> e3ded33a5


HBASE-16807 RegionServer will fail to report new active Hmaster until 
HMaster/RegionServer failover.

Signed-off-by: chenheng <chenh...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e3ded33a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e3ded33a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e3ded33a

Branch: refs/heads/branch-1.3
Commit: e3ded33a52574d532780594e85be8fddb6259671
Parents: d316bf7
Author: Pankaj Kumar <pankaj...@huawei.com>
Authored: Thu Oct 13 20:31:50 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Oct 13 22:14:58 2016 +0800

--
 .../hadoop/hbase/regionserver/HRegionServer.java  | 18 +++---
 .../TestRegionServerReportForDuty.java|  4 ++--
 2 files changed, 17 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e3ded33a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 9897b29..49dde01 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1185,7 +1185,7 @@ public class HRegionServer extends HasThread implements
   }
   // Couldn't connect to the master, get location from zk and reconnect
   // Method blocks until new master is found or we are stopped
-  createRegionServerStatusStub();
+  createRegionServerStatusStub(true);
 }
   }
 
@@ -2217,12 +2217,24 @@ public class HRegionServer extends HasThread implements
*/
   @VisibleForTesting
   protected synchronized ServerName createRegionServerStatusStub() {
+// Create RS stub without refreshing the master node from ZK, use cached 
data
+return createRegionServerStatusStub(false);
+  }
+
+  /**
+   * Get the current master from ZooKeeper and open the RPC connection to it. 
To get a fresh
+   * connection, the current rssStub must be null. Method will block until a 
master is available.
+   * You can break from this block by requesting the server stop.
+   * @param refresh If true then master address will be read from ZK, 
otherwise use cached data
+   * @return master + port, or null if server has been stopped
+   */
+  @VisibleForTesting
+  protected synchronized ServerName createRegionServerStatusStub(boolean 
refresh) {
 if (rssStub != null) {
   return masterAddressTracker.getMasterAddress();
 }
 ServerName sn = null;
 long previousLogTime = 0;
-boolean refresh = false; // for the first time, use cached data
 RegionServerStatusService.BlockingInterface intf = null;
 boolean interrupted = false;
 try {
@@ -2297,7 +2309,7 @@ public class HRegionServer extends HasThread implements
* @throws IOException
*/
   private RegionServerStartupResponse reportForDuty() throws IOException {
-ServerName masterServerName = createRegionServerStatusStub();
+ServerName masterServerName = createRegionServerStatusStub(true);
 if (masterServerName == null) return null;
 RegionServerStartupResponse result = null;
 try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/e3ded33a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
index 5778c6c..77cb5b7 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
@@ -166,8 +166,8 @@ public class TestRegionServerReportForDuty {
 }
 
 @Override
-protected synchronized ServerName createRegionServerStatusStub() {
-  sn = super.createRegionServerStatusStub();
+protected synchronized ServerName createRegionServerStatusStub(boolean 
refresh) {
+  sn = super.createRegionServerStatusStub(refresh);
   rpcStubCreatedFlag = true;
 
   // Wait for master switch over. Only do this for the second region 
server.



hbase git commit: HBASE-16664 Timeout logic in AsyncProcess is broken

2016-10-13 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 3737c4696 -> d316bf7c4


HBASE-16664 Timeout logic in AsyncProcess is broken

Signed-off-by: chenheng <chenh...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d316bf7c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d316bf7c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d316bf7c

Branch: refs/heads/branch-1.3
Commit: d316bf7c4b3fd4de7a108c4e025c8ddb8dc0a0b8
Parents: 3737c46
Author: Phil Yang <ud1...@gmail.com>
Authored: Tue Oct 11 17:12:54 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Oct 13 19:52:02 2016 +0800

--
 .../hadoop/hbase/client/AsyncProcess.java   |  75 +---
 .../hbase/client/BufferedMutatorImpl.java   |  21 ++-
 .../hadoop/hbase/client/ConnectionManager.java  |   3 +-
 .../org/apache/hadoop/hbase/client/HTable.java  |  22 ++-
 .../hadoop/hbase/client/HTableMultiplexer.java  |   5 +-
 .../hbase/client/MultiServerCallable.java   |  15 +-
 .../hbase/client/RetryingTimeTracker.java   |   3 +-
 .../hadoop/hbase/client/TestAsyncProcess.java   |  22 ++-
 .../hbase/client/HConnectionTestingUtility.java |   5 +-
 .../org/apache/hadoop/hbase/client/TestHCM.java | 172 +--
 10 files changed, 276 insertions(+), 67 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d316bf7c/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index cdcb1b2..32de1e3 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -232,7 +232,8 @@ class AsyncProcess {
   protected final long pause;
   protected int numTries;
   protected int serverTrackerTimeout;
-  protected int timeout;
+  protected int rpcTimeout;
+  protected int operationTimeout;
   protected long primaryCallTimeoutMicroseconds;
   // End configuration settings.
 
@@ -275,7 +276,8 @@ class AsyncProcess {
   }
 
   public AsyncProcess(ClusterConnection hc, Configuration conf, 
ExecutorService pool,
-  RpcRetryingCallerFactory rpcCaller, boolean useGlobalErrors, 
RpcControllerFactory rpcFactory) {
+  RpcRetryingCallerFactory rpcCaller, boolean useGlobalErrors, 
RpcControllerFactory rpcFactory,
+  int rpcTimeout) {
 if (hc == null) {
   throw new IllegalArgumentException("HConnection cannot be null.");
 }
@@ -290,8 +292,9 @@ class AsyncProcess {
 HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
 this.numTries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
 HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
-this.timeout = conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
-HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
+this.rpcTimeout = rpcTimeout;
+this.operationTimeout = 
conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
+HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
 this.primaryCallTimeoutMicroseconds = 
conf.getInt(PRIMARY_CALL_TIMEOUT_KEY, 1);
 
 this.maxTotalConcurrentTasks = 
conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS,
@@ -336,6 +339,14 @@ class AsyncProcess {
   DEFAULT_THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS);
   }
 
+  public void setRpcTimeout(int rpcTimeout) {
+this.rpcTimeout = rpcTimeout;
+  }
+
+  public void setOperationTimeout(int operationTimeout) {
+this.operationTimeout = operationTimeout;
+  }
+
   /**
* @return pool if non null, otherwise returns this.pool if non null, 
otherwise throws
* RuntimeException
@@ -561,12 +572,12 @@ class AsyncProcess {
*/
   public  AsyncRequestFuture submitAll(TableName tableName,
   List rows, Batch.Callback callback, Object[] 
results) {
-return submitAll(null, tableName, rows, callback, results, null, timeout);
+return submitAll(null, tableName, rows, callback, results, null, 
operationTimeout, rpcTimeout);
   }
 
   public  AsyncRequestFuture submitAll(ExecutorService pool, 
TableName tableName,
   List rows, Batch.Callback callback, Object[] 
results) {
-return submitAll(pool, tableName, rows, callback, results, null, timeout);
+return submitAll(pool, tableName, rows, callback, results, null, 
operationTimeout, rpcTimeout);
   }
   /**
* Submit immediately the list of rows, whatever the server status. Kept for 
backward
@@ -580,7 +591,7 @@ class AsyncProcess {
*/
   public  AsyncRequestFuture submitAll(ExecutorService pool, 
TableName tableName,
   List rows, Batch.Callb

hbase git commit: HBASE-16807, RegionServer will fail to report new active Hmaster until HMaster/RegionServer failover.

2016-10-13 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master 193e0d602 -> 90d83d5b3


HBASE-16807, RegionServer will fail to report new active Hmaster until 
HMaster/RegionServer failover.

Signed-off-by: chenheng <chenh...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/90d83d5b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/90d83d5b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/90d83d5b

Branch: refs/heads/master
Commit: 90d83d5b38beb4ce35db0d7d75eb6fcef477fbf7
Parents: 193e0d6
Author: Pankaj Kumar <pankaj...@huawei.com>
Authored: Thu Oct 13 10:57:29 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Oct 13 18:00:57 2016 +0800

--
 .../hadoop/hbase/regionserver/HRegionServer.java  | 18 +++---
 .../TestRegionServerReportForDuty.java|  4 ++--
 2 files changed, 17 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/90d83d5b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 159d067..8bea130 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1213,7 +1213,7 @@ public class HRegionServer extends HasThread implements
   }
   // Couldn't connect to the master, get location from zk and reconnect
   // Method blocks until new master is found or we are stopped
-  createRegionServerStatusStub();
+  createRegionServerStatusStub(true);
 }
   }
 
@@ -2284,12 +2284,24 @@ public class HRegionServer extends HasThread implements
*/
   @VisibleForTesting
   protected synchronized ServerName createRegionServerStatusStub() {
+// Create RS stub without refreshing the master node from ZK, use cached 
data
+return createRegionServerStatusStub(false);
+  }
+
+  /**
+   * Get the current master from ZooKeeper and open the RPC connection to it. 
To get a fresh
+   * connection, the current rssStub must be null. Method will block until a 
master is available.
+   * You can break from this block by requesting the server stop.
+   * @param refresh If true then master address will be read from ZK, 
otherwise use cached data
+   * @return master + port, or null if server has been stopped
+   */
+  @VisibleForTesting
+  protected synchronized ServerName createRegionServerStatusStub(boolean 
refresh) {
 if (rssStub != null) {
   return masterAddressTracker.getMasterAddress();
 }
 ServerName sn = null;
 long previousLogTime = 0;
-boolean refresh = false; // for the first time, use cached data
 RegionServerStatusService.BlockingInterface intf = null;
 boolean interrupted = false;
 try {
@@ -2364,7 +2376,7 @@ public class HRegionServer extends HasThread implements
* @throws IOException
*/
   private RegionServerStartupResponse reportForDuty() throws IOException {
-ServerName masterServerName = createRegionServerStatusStub();
+ServerName masterServerName = createRegionServerStatusStub(true);
 if (masterServerName == null) return null;
 RegionServerStartupResponse result = null;
 try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/90d83d5b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
index 19d25a8..a4e90e6 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java
@@ -166,8 +166,8 @@ public class TestRegionServerReportForDuty {
 }
 
 @Override
-protected synchronized ServerName createRegionServerStatusStub() {
-  sn = super.createRegionServerStatusStub();
+protected synchronized ServerName createRegionServerStatusStub(boolean 
refresh) {
+  sn = super.createRegionServerStatusStub(refresh);
   rpcStubCreatedFlag = true;
 
   // Wait for master switch over. Only do this for the second region 
server.



hbase git commit: HBASE-16664 Timeout logic in AsyncProcess is broken

2016-10-13 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1 e2278f954 -> 8f9fadf02


HBASE-16664 Timeout logic in AsyncProcess is broken

Signed-off-by: chenheng <chenh...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8f9fadf0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8f9fadf0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8f9fadf0

Branch: refs/heads/branch-1
Commit: 8f9fadf0216977996564ec56347a91e5a0a8b945
Parents: e2278f9
Author: Phil Yang <ud1...@gmail.com>
Authored: Sun Oct 9 19:31:45 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Oct 13 17:14:52 2016 +0800

--
 .../hadoop/hbase/client/AsyncProcess.java   |  73 +---
 .../hbase/client/BufferedMutatorImpl.java   |  15 +-
 .../org/apache/hadoop/hbase/client/HTable.java  |  29 ++-
 .../hbase/client/MultiServerCallable.java   |  15 +-
 .../hbase/client/RetryingTimeTracker.java   |   3 +-
 .../hadoop/hbase/client/TestAsyncProcess.java   |  13 +-
 .../org/apache/hadoop/hbase/client/TestHCM.java | 182 ---
 7 files changed, 259 insertions(+), 71 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8f9fadf0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index 647a466..b0652a7 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -259,7 +259,8 @@ class AsyncProcess {
   protected final long pause;
   protected int numTries;
   protected int serverTrackerTimeout;
-  protected int timeout;
+  protected int rpcTimeout;
+  protected int operationTimeout;
   protected long primaryCallTimeoutMicroseconds;
   /** Whether to log details for batch errors */
   private final boolean logBatchErrorDetails;
@@ -322,7 +323,9 @@ class AsyncProcess {
 HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
 this.numTries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
 HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
-this.timeout = rpcTimeout;
+this.rpcTimeout = rpcTimeout;
+this.operationTimeout = 
conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
+HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
 this.primaryCallTimeoutMicroseconds = 
conf.getInt(PRIMARY_CALL_TIMEOUT_KEY, 1);
 
 this.maxTotalConcurrentTasks = 
conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS,
@@ -378,6 +381,14 @@ class AsyncProcess {
   DEFAULT_THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS);
   }
 
+  public void setRpcTimeout(int rpcTimeout) {
+this.rpcTimeout = rpcTimeout;
+  }
+
+  public void setOperationTimeout(int operationTimeout) {
+this.operationTimeout = operationTimeout;
+  }
+
   /**
* @return pool if non null, otherwise returns this.pool if non null, 
otherwise throws
* RuntimeException
@@ -570,12 +581,12 @@ class AsyncProcess {
*/
   public  AsyncRequestFuture submitAll(TableName tableName,
   List rows, Batch.Callback callback, Object[] 
results) {
-return submitAll(null, tableName, rows, callback, results, null, timeout);
+return submitAll(null, tableName, rows, callback, results, null, 
operationTimeout, rpcTimeout);
   }
 
   public  AsyncRequestFuture submitAll(ExecutorService pool, 
TableName tableName,
   List rows, Batch.Callback callback, Object[] 
results) {
-return submitAll(pool, tableName, rows, callback, results, null, timeout);
+return submitAll(pool, tableName, rows, callback, results, null, 
operationTimeout, rpcTimeout);
   }
   /**
* Submit immediately the list of rows, whatever the server status. Kept for 
backward
@@ -589,7 +600,7 @@ class AsyncProcess {
*/
   public  AsyncRequestFuture submitAll(ExecutorService pool, 
TableName tableName,
   List rows, Batch.Callback callback, Object[] 
results,
-  PayloadCarryingServerCallable callable, int curTimeout) {
+  PayloadCarryingServerCallable callable, int operationTimeout, int 
rpcTimeout) {
 List<Action> actions = new ArrayList<Action>(rows.size());
 
 // The position will be used by the processBatch to match the object array 
returned.
@@ -609,7 +620,7 @@ class AsyncProcess {
 }
 AsyncRequestFutureImpl ars = createAsyncRequestFuture(
 tableName, actions, ng.getNonceGroup(), getPool(pool), callback, 
results, results != null,
-callable, curTimeout);
+callable, operationTimeout, rpcTimeout);
 ars.groupAndSendMultiAction(actions, 1);

hbase git commit: HBASE-16664 Timeout logic in AsyncProcess is broken

2016-10-13 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master f11aa4542 -> 88ff71b91


HBASE-16664 Timeout logic in AsyncProcess is broken

Signed-off-by: chenheng <chenh...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/88ff71b9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/88ff71b9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/88ff71b9

Branch: refs/heads/master
Commit: 88ff71b91b086984fdc5b8707d134a1d475e5103
Parents: f11aa45
Author: Phil Yang <ud1...@gmail.com>
Authored: Sun Oct 9 15:25:11 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Oct 13 16:15:43 2016 +0800

--
 .../hadoop/hbase/client/AsyncProcess.java   |  38 +++--
 .../hbase/client/AsyncRequestFutureImpl.java|  64 +++
 .../hadoop/hbase/client/BufferedMutator.java|  10 ++
 .../hbase/client/BufferedMutatorImpl.java   |  20 ++-
 .../client/CancellableRegionServerCallable.java |  22 ++-
 .../hbase/client/ConnectionImplementation.java  |   8 +-
 .../org/apache/hadoop/hbase/client/HTable.java  |  48 +++---
 .../hadoop/hbase/client/HTableMultiplexer.java  |   6 +-
 .../hbase/client/MultiServerCallable.java   |  15 +-
 .../client/NoncedRegionServerCallable.java  |   2 +-
 .../hbase/client/RetryingTimeTracker.java   |   3 +-
 .../RpcRetryingCallerWithReadReplicas.java  |  14 +-
 .../hadoop/hbase/client/TestAsyncProcess.java   |  39 +++--
 .../hbase/client/HConnectionTestingUtility.java |   4 +-
 .../org/apache/hadoop/hbase/client/TestHCM.java | 167 +--
 15 files changed, 338 insertions(+), 122 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/88ff71b9/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index f2d9546..abefc46 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -19,6 +19,8 @@
 
 package org.apache.hadoop.hbase.client;
 
+import com.google.common.annotations.VisibleForTesting;
+
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.util.ArrayList;
@@ -55,8 +57,6 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdge;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
-import com.google.common.annotations.VisibleForTesting;
-
 /**
  * This class  allows a continuous flow of requests. It's written to be 
compatible with a
  * synchronous caller such as HTable.
@@ -212,7 +212,8 @@ class AsyncProcess {
   protected final long pause;
   protected int numTries;
   protected int serverTrackerTimeout;
-  protected int timeout;
+  protected int rpcTimeout;
+  protected int operationTimeout;
   protected long primaryCallTimeoutMicroseconds;
   /** Whether to log details for batch errors */
   protected final boolean logBatchErrorDetails;
@@ -220,7 +221,7 @@ class AsyncProcess {
 
   public AsyncProcess(ClusterConnection hc, Configuration conf, 
ExecutorService pool,
   RpcRetryingCallerFactory rpcCaller, boolean useGlobalErrors,
-  RpcControllerFactory rpcFactory, int rpcTimeout) {
+  RpcControllerFactory rpcFactory, int rpcTimeout, int operationTimeout) {
 if (hc == null) {
   throw new IllegalArgumentException("ClusterConnection cannot be null.");
 }
@@ -236,7 +237,8 @@ class AsyncProcess {
 // how many times we could try in total, one more than retry number
 this.numTries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
 HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER) + 1;
-this.timeout = rpcTimeout;
+this.rpcTimeout = rpcTimeout;
+this.operationTimeout = operationTimeout;
 this.primaryCallTimeoutMicroseconds = 
conf.getInt(PRIMARY_CALL_TIMEOUT_KEY, 1);
 
 this.maxTotalConcurrentTasks = 
conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS,
@@ -434,7 +436,7 @@ class AsyncProcess {
   List locationErrorRows, Map<ServerName, MultiAction> 
actionsByServer,
   ExecutorService pool) {
 AsyncRequestFutureImpl ars = createAsyncRequestFuture(
-  tableName, retainedActions, nonceGroup, pool, callback, results, 
needResults, null, timeout);
+  tableName, retainedActions, nonceGroup, pool, callback, results, 
needResults, null, -1);
 // Add location errors if any
 if (locationErrors != null) {
   for (int i = 0; i < locationErrors.size(); ++i) {
@@ -448,6 +450,14 @@ class AsyncProcess {
 return ars;
   }
 
+  

hbase git commit: HBASE-16665 Check whether KeyValueUtil.createXXX could be replaced by CellUtil without copy

2016-09-25 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master f7bb6fbf2 -> 21969f515


HBASE-16665 Check whether KeyValueUtil.createXXX could be replaced by CellUtil 
without copy


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/21969f51
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/21969f51
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/21969f51

Branch: refs/heads/master
Commit: 21969f5159e6e8f93a7b8f9c7cfe2f359f11dd27
Parents: f7bb6fb
Author: chenheng <chenh...@apache.org>
Authored: Sun Sep 25 14:06:55 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Sun Sep 25 14:06:55 2016 +0800

--
 .../org/apache/hadoop/hbase/client/Result.java  | 16 --
 .../java/org/apache/hadoop/hbase/CellUtil.java  | 22 
 .../hbase/io/hfile/HFilePrettyPrinter.java  |  2 +-
 .../hbase/mob/mapreduce/MemStoreWrapper.java|  3 ++-
 .../hbase/mob/mapreduce/SweepReducer.java   |  3 ++-
 .../hbase/regionserver/AbstractMemStore.java|  2 +-
 .../hbase/regionserver/HRegionFileSystem.java   |  5 +++--
 .../hbase/regionserver/StoreFileReader.java |  4 +---
 8 files changed, 46 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/21969f51/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
index f1e7cc4..98792e7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
@@ -275,12 +275,24 @@ public class Result implements CellScannable, CellScanner 
{
 return result;
   }
 
+  private byte[] notNullBytes(final byte[] bytes) {
+if (bytes == null) {
+  return HConstants.EMPTY_BYTE_ARRAY;
+} else {
+  return bytes;
+}
+  }
+
   protected int binarySearch(final Cell [] kvs,
  final byte [] family,
  final byte [] qualifier) {
+byte[] familyNotNull = notNullBytes(family);
+byte[] qualifierNotNull = notNullBytes(qualifier);
 Cell searchTerm =
-KeyValueUtil.createFirstOnRow(CellUtil.cloneRow(kvs[0]),
-family, qualifier);
+CellUtil.createFirstOnRow(kvs[0].getRowArray(),
+kvs[0].getRowOffset(), kvs[0].getRowLength(),
+familyNotNull, 0, (byte)familyNotNull.length,
+qualifierNotNull, 0, qualifierNotNull.length);
 
 // pos === ( -(insertion point) - 1)
 int pos = Arrays.binarySearch(kvs, searchTerm, CellComparator.COMPARATOR);

http://git-wip-us.apache.org/repos/asf/hbase/blob/21969f51/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
index 94c7189..2da71fb 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
@@ -1735,6 +1735,24 @@ public final class CellUtil {
 return new FirstOnRowCell(row, roffset, rlength);
   }
 
+  public static Cell createFirstOnRow(final byte[] row, final byte[] family, 
final byte[] col) {
+return createFirstOnRow(row, 0, (short)row.length,
+family, 0, (byte)family.length,
+col, 0, col.length);
+  }
+
+  public static Cell createFirstOnRow(final byte[] row, int roffset, short 
rlength,
+  final byte[] family, int foffset, byte 
flength,
+  final byte[] col, int coffset, int 
clength) {
+return new FirstOnRowColCell(row, roffset, rlength,
+family, foffset, flength,
+col, coffset, clength);
+  }
+
+  public static Cell createFirstOnRow(final byte[] row) {
+return createFirstOnRow(row, 0, (short)row.length);
+  }
+
   /**
* Create a Cell that is smaller than all other possible Cells for the given 
Cell's row.
* The family length is considered to be 0
@@ -1824,6 +1842,10 @@ public final class CellUtil {
 return new LastOnRowCell(cell.getRowArray(), cell.getRowOffset(), 
cell.getRowLength());
   }
 
+  public static Cell createLastOnRow(final byte[] row) {
+return new LastOnRowCell(row, 0, (short)row.length);
+  }
+
   /**
* Create a Cell that is larger than all other possible Cells for the given 
Cell's rk:cf:q. Used
* in creating "fake keys" for the multi-column Bloom filter optimization to 
skip the row/column

http://git-wi

hbase git commit: HBASE-16677 Add table size (total store file size) to table page (Guang Yang)

2016-09-24 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1 92b1b5ac8 -> d6f9eab4b


HBASE-16677 Add table size (total store file size) to table page (Guang Yang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d6f9eab4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d6f9eab4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d6f9eab4

Branch: refs/heads/branch-1
Commit: d6f9eab4be16dee0dd45ea9f1e0dd17ef8a74b7b
Parents: 92b1b5a
Author: chenheng <chenh...@apache.org>
Authored: Sun Sep 25 10:48:01 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Sun Sep 25 10:50:03 2016 +0800

--
 .../resources/hbase-webapps/master/table.jsp| 50 +++-
 1 file changed, 49 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d6f9eab4/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
--
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp 
b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
index 2c749ff..37209c7 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
@@ -72,6 +72,25 @@
   }
   String action = request.getParameter("action");
   String key = request.getParameter("key");
+  long totalStoreFileSizeMB = 0;
+
+  final String numRegionsParam = request.getParameter("numRegions");
+  // By default, the page render up to 1 regions to improve the page load 
time
+  int numRegionsToRender = 1;
+  if (numRegionsParam != null) {
+// either 'all' or a number
+if (numRegionsParam.equals("all")) {
+  numRegionsToRender = -1;
+} else {
+  try {
+numRegionsToRender = Integer.parseInt(numRegionsParam);
+  } catch (NumberFormatException ex) {
+// ignore
+  }
+}
+  }
+  int numRegions = 0;
+
 %>
 

hbase git commit: HBASE-16677 Add table size (total store file size) to table page (Guang Yang)

2016-09-24 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master b14fb1488 -> f7bb6fbf2


HBASE-16677 Add table size (total store file size) to table page (Guang Yang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f7bb6fbf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f7bb6fbf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f7bb6fbf

Branch: refs/heads/master
Commit: f7bb6fbf21a6a86700b8411311343f0be80ebf3f
Parents: b14fb14
Author: chenheng <chenh...@apache.org>
Authored: Sun Sep 25 10:48:01 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Sun Sep 25 10:48:01 2016 +0800

--
 .../resources/hbase-webapps/master/table.jsp| 50 +++-
 1 file changed, 49 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f7bb6fbf/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
--
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp 
b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
index 27388e7..86b70c7 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
@@ -73,6 +73,25 @@
   }
   String action = request.getParameter("action");
   String key = request.getParameter("key");
+  long totalStoreFileSizeMB = 0;
+
+  final String numRegionsParam = request.getParameter("numRegions");
+  // By default, the page render up to 1 regions to improve the page load 
time
+  int numRegionsToRender = 1;
+  if (numRegionsParam != null) {
+// either 'all' or a number
+if (numRegionsParam.equals("all")) {
+  numRegionsToRender = -1;
+} else {
+  try {
+numRegionsToRender = Integer.parseInt(numRegionsParam);
+  } catch (NumberFormatException ex) {
+// ignore
+  }
+}
+  }
+  int numRegions = 0;
+
 %>
 

hbase git commit: Revert "HBASE-16677 Add table size (total store file size) to table page"

2016-09-24 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master f5351e2db -> b14fb1488


Revert "HBASE-16677 Add table size (total store file size) to table page"

This reverts commit f5351e2dbd29ab32dbd4044844feb6a94d9fea98.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b14fb148
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b14fb148
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b14fb148

Branch: refs/heads/master
Commit: b14fb14886686d3135f718ff7e067230ff7d62fc
Parents: f5351e2
Author: chenheng <chenh...@apache.org>
Authored: Sun Sep 25 10:47:20 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Sun Sep 25 10:47:20 2016 +0800

--
 .../resources/hbase-webapps/master/table.jsp| 50 +---
 1 file changed, 1 insertion(+), 49 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b14fb148/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
--
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp 
b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
index 86b70c7..27388e7 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
@@ -73,25 +73,6 @@
   }
   String action = request.getParameter("action");
   String key = request.getParameter("key");
-  long totalStoreFileSizeMB = 0;
-
-  final String numRegionsParam = request.getParameter("numRegions");
-  // By default, the page render up to 1 regions to improve the page load 
time
-  int numRegionsToRender = 1;
-  if (numRegionsParam != null) {
-// either 'all' or a number
-if (numRegionsParam.equals("all")) {
-  numRegionsToRender = -1;
-} else {
-  try {
-numRegionsToRender = Integer.parseInt(numRegionsParam);
-  } catch (NumberFormatException ex) {
-// ignore
-  }
-}
-  }
-  int numRegions = 0;
-
 %>
 

hbase git commit: HBASE-16677 Add table size (total store file size) to table page

2016-09-24 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master 2765b9d9d -> f5351e2db


HBASE-16677 Add table size (total store file size) to table page


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f5351e2d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f5351e2d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f5351e2d

Branch: refs/heads/master
Commit: f5351e2dbd29ab32dbd4044844feb6a94d9fea98
Parents: 2765b9d
Author: chenheng <chenh...@apache.org>
Authored: Sun Sep 25 09:49:50 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Sun Sep 25 09:49:50 2016 +0800

--
 .../resources/hbase-webapps/master/table.jsp| 50 +++-
 1 file changed, 49 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f5351e2d/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
--
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp 
b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
index 27388e7..86b70c7 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
@@ -73,6 +73,25 @@
   }
   String action = request.getParameter("action");
   String key = request.getParameter("key");
+  long totalStoreFileSizeMB = 0;
+
+  final String numRegionsParam = request.getParameter("numRegions");
+  // By default, the page render up to 1 regions to improve the page load 
time
+  int numRegionsToRender = 1;
+  if (numRegionsParam != null) {
+// either 'all' or a number
+if (numRegionsParam.equals("all")) {
+  numRegionsToRender = -1;
+} else {
+  try {
+numRegionsToRender = Integer.parseInt(numRegionsParam);
+  } catch (NumberFormatException ex) {
+// ignore
+  }
+}
+  }
+  int numRegions = 0;
+
 %>
 

hbase git commit: HBASE-16659 Use CellUtil.createFirstOnRow instead of KeyValueUtil.createFirstOnRow in some places (binlijin)

2016-09-21 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master 6624c676f -> c67983ebf


HBASE-16659 Use CellUtil.createFirstOnRow instead of 
KeyValueUtil.createFirstOnRow in some places (binlijin)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c67983eb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c67983eb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c67983eb

Branch: refs/heads/master
Commit: c67983ebf88d449a67bccd8b213237362a4093f6
Parents: 6624c67
Author: chenheng <chenh...@apache.org>
Authored: Wed Sep 21 15:25:36 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Wed Sep 21 15:25:36 2016 +0800

--
 .../java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java   | 4 ++--
 .../org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java   | 4 ++--
 .../java/org/apache/hadoop/hbase/regionserver/HRegion.java| 2 +-
 .../hadoop/hbase/regionserver/ReversedRegionScannerImpl.java  | 7 ++-
 .../apache/hadoop/hbase/regionserver/StoreFileScanner.java| 3 ++-
 5 files changed, 9 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c67983eb/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
index 54402ef..f52137d 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
@@ -25,7 +25,7 @@ import java.util.PriorityQueue;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.KeyValueUtil;
+import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
@@ -175,7 +175,7 @@ public class FuzzyRowFilter extends FilterBase {
   return null;
 }
 byte[] nextRowKey = tracker.nextRow();
-return KeyValueUtil.createFirstOnRow(nextRowKey);
+return CellUtil.createFirstOnRow(nextRowKey, 0, (short) nextRowKey.length);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/c67983eb/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java
index 3f26586..33f035c 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java
@@ -25,7 +25,6 @@ import java.util.List;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
@@ -138,7 +137,8 @@ public class MultiRowRangeFilter extends FilterBase {
   @Override
   public Cell getNextCellHint(Cell currentKV) {
 // skip to the next range's start row
-return KeyValueUtil.createFirstOnRow(range.startRow);
+return CellUtil.createFirstOnRow(range.startRow, 0,
+(short) range.startRow.length);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/c67983eb/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 351a389..debaec9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -6216,7 +6216,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   }
   boolean result = false;
   startRegionOperation();
-  KeyValue kv = KeyValueUtil.createFirstOnRow(row);
+  Cell kv = CellUtil.createFirstOnRow(row, 0, (short) row.length);
   try {
 // use request seek to make use of the lazy seek option. See HBASE-5520
 result = this.storeHeap.requestSeek(kv, true, true);

[1/2] hbase git commit: HBASE-16631 Extract AsyncRequestFuture related code from AsyncProcess

2016-09-16 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master b6b72361b -> 2cf8907db


http://git-wip-us.apache.org/repos/asf/hbase/blob/2cf8907d/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
new file mode 100644
index 000..c6b2a53
--- /dev/null
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
@@ -0,0 +1,1290 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.RetryImmediatelyException;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.backoff.ServerStatistics;
+import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.htrace.Trace;
+
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * The context, and return value, for a single submit/submitAll call.
+ * Note on how this class (one AP submit) works. Initially, all requests are 
split into groups
+ * by server; request is sent to each server in parallel; the RPC calls are 
not async so a
+ * thread per server is used. Every time some actions fail, regions/locations 
might have
+ * changed, so we re-group them by server and region again and send these 
groups in parallel
+ * too. The result, in case of retries, is a "tree" of threads, with parent 
exiting after
+ * scheduling children. This is why lots of code doesn't require any 
synchronization.
+ */
+@InterfaceAudience.Private
+class AsyncRequestFutureImpl implements AsyncRequestFuture {
+
+  private static final Log LOG = 
LogFactory.getLog(AsyncRequestFutureImpl.class);
+
+  /**
+   * Runnable (that can be submitted to thread pool) that waits for when it's 
time
+   * to issue replica calls, finds region replicas, groups the requests by 
replica and
+   * issues the calls (on separate threads, via sendMultiAction).
+   * This is done on a separate thread because we don't want to wait on user 
thread for
+   * our asynchronous call, and usually we have to wait before making replica 
calls.
+   */
+  private final class ReplicaCallIssuingRunnable implements Runnable {
+private final long startTime;
+private final List initialActions;
+
+public ReplicaCallIssuingRunnable(List initialActions, long 
startTime) {
+  this.initialActions = initialActions;
+  this.startTime = startTime;
+}
+
+@Override
+public void run() {
+  boolean done = false;
+  if (asyncProcess.primaryCallTimeoutMicroseconds > 0) {
+try {
+  done = waitUntilDone(startTime * 1000L + 
asyncProcess.primaryCallTimeoutMicroseconds);
+} catch (InterruptedException ex) {
+  LOG.error("Replica thread was 

[2/2] hbase git commit: HBASE-16631 Extract AsyncRequestFuture related code from AsyncProcess

2016-09-16 Thread chenheng
HBASE-16631 Extract AsyncRequestFuture related code from AsyncProcess


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2cf8907d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2cf8907d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2cf8907d

Branch: refs/heads/master
Commit: 2cf8907db53b84a0118acc1edd1dfb9b37abe8b7
Parents: b6b7236
Author: chenheng <chenh...@apache.org>
Authored: Sat Sep 17 00:35:23 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Sat Sep 17 00:35:23 2016 +0800

--
 .../hadoop/hbase/client/AsyncProcess.java   | 1375 +-
 .../hadoop/hbase/client/AsyncRequestFuture.java |   40 +
 .../hbase/client/AsyncRequestFutureImpl.java| 1290 
 .../apache/hadoop/hbase/client/BatchErrors.java |   69 +
 .../org/apache/hadoop/hbase/client/HTable.java  |1 -
 .../hadoop/hbase/client/HTableMultiplexer.java  |1 -
 .../hadoop/hbase/client/TestAsyncProcess.java   |   48 +-
 .../hadoop/hbase/client/TestReplicasClient.java |2 -
 8 files changed, 1481 insertions(+), 1345 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2cf8907d/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index 93b17bc..2ffb2e3 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -20,52 +20,41 @@
 package org.apache.hadoop.hbase.client;
 
 import com.google.common.annotations.VisibleForTesting;
-import java.io.InterruptedIOException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ConcurrentSkipListMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.RejectedExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeSet;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.AsyncProcess.RowChecker.ReturnCode;
-import org.apache.hadoop.hbase.client.backoff.ServerStatistics;
-import org.apache.hadoop.hbase.client.coprocessor.Batch;
-import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.RegionLocations;
-import org.apache.hadoop.hbase.RetryImmediatelyException;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.AsyncProcess.RowChecker.ReturnCode;
+import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdge;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.htrace.Trace;
+
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ConcurrentSkipListMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
 
 /**
  * This class  allows a continuous flow of requests. It's written to be 
compatible with a
@@ -

hbase git commit: HBASE-16381 Shell deleteall command should support row key prefixes (Yi Liang)

2016-09-15 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master 8ef6c7634 -> 1d6c90b49


HBASE-16381 Shell deleteall command should support row key prefixes (Yi Liang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1d6c90b4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1d6c90b4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1d6c90b4

Branch: refs/heads/master
Commit: 1d6c90b4969b8ec47699c69984be052050a9ee46
Parents: 8ef6c76
Author: chenheng <chenh...@apache.org>
Authored: Thu Sep 15 19:18:47 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Sep 15 19:20:29 2016 +0800

--
 hbase-shell/src/main/ruby/hbase/table.rb| 81 +++-
 .../src/main/ruby/shell/commands/deleteall.rb   | 17 +++-
 hbase-shell/src/test/ruby/hbase/table_test.rb   | 12 +++
 3 files changed, 86 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1d6c90b4/hbase-shell/src/main/ruby/hbase/table.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/table.rb 
b/hbase-shell/src/main/ruby/hbase/table.rb
index 5930c0d..22bbcfe 100644
--- a/hbase-shell/src/main/ruby/hbase/table.rb
+++ b/hbase-shell/src/main/ruby/hbase/table.rb
@@ -160,6 +160,62 @@ EOF
 end
 
 
#--
+# Create a Delete mutation
+def _createdelete_internal(row, column = nil,
+timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP, args 
= {})
+  temptimestamp = timestamp
+  if temptimestamp.kind_of?(Hash)
+timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP
+  end
+  d = org.apache.hadoop.hbase.client.Delete.new(row.to_s.to_java_bytes, 
timestamp)
+  if temptimestamp.kind_of?(Hash)
+temptimestamp.each do |k, v|
+  if v.kind_of?(String)
+set_cell_visibility(d, v) if v
+  end
+end
+  end
+  if args.any?
+ visibility = args[VISIBILITY]
+ set_cell_visibility(d, visibility) if visibility
+  end
+  if column
+family, qualifier = parse_column_name(column)
+d.addColumns(family, qualifier, timestamp)
+  end
+  return d
+end
+
+
#--
+# Delete rows using prefix
+def _deleterows_internal(row, column = nil,
+timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP, 
args={})
+  cache = row["CACHE"] ? row["CACHE"] : 100
+  prefix = row["ROWPREFIXFILTER"]
+
+  # create scan to get table names using prefix
+  scan = org.apache.hadoop.hbase.client.Scan.new
+  scan.setRowPrefixFilter(prefix.to_java_bytes)
+  # Run the scanner to get all rowkeys
+  scanner = @table.getScanner(scan)
+  # Create a list to store all deletes
+  list = java.util.ArrayList.new
+  # Iterate results
+  iter = scanner.iterator
+  while iter.hasNext
+row = iter.next
+key = org.apache.hadoop.hbase.util.Bytes::toStringBinary(row.getRow)
+d = _createdelete_internal(key, column, timestamp, args)
+list.add(d)
+if list.size >= cache
+  @table.delete(list)
+  list.clear
+end
+  end
+  @table.delete(list)
+end
+
+
#--
 # Delete a cell
 def _delete_internal(row, column,
timestamp = 
org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP, args = {})
@@ -175,27 +231,12 @@ EOF
   if is_meta_table?
 raise ArgumentError, "Row Not Found" if _get_internal(row).nil?
   end
-  temptimestamp = timestamp
-  if temptimestamp.kind_of?(Hash)
- timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP
-  end
-  d = org.apache.hadoop.hbase.client.Delete.new(row.to_s.to_java_bytes, 
timestamp)
-  if temptimestamp.kind_of?(Hash)
-   temptimestamp.each do |k, v|
- if v.kind_of?(String)
-   set_cell_visibility(d, v) if v
- end
-end
-  end
-  if args.any?
- visibility = args[VISIBILITY]
- set_cell_visibility(d, visibility) if visibility
-  end
-  if column
-family, qualifier = parse_column_name(column)
-d.addColumns(family, qualifier, timestamp)
+  if row.kind_of?(Hash)
+_deleterows_internal(row, column, timestamp, args)
+  else
+d = _createdelete_internal(row, column, timestamp, args)
+@t

hbase git commit: HBASE-16611 Flakey org.apache.hadoop.hbase.client.TestReplicasClient.testCancelOfMultiGet

2016-09-13 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master 422734e73 -> cd9f42237


HBASE-16611 Flakey 
org.apache.hadoop.hbase.client.TestReplicasClient.testCancelOfMultiGet


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cd9f4223
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cd9f4223
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cd9f4223

Branch: refs/heads/master
Commit: cd9f42237344756a7db395bd8241f41b00e359a2
Parents: 422734e
Author: chenheng <chenh...@apache.org>
Authored: Tue Sep 13 14:52:50 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Tue Sep 13 14:52:50 2016 +0800

--
 .../main/java/org/apache/hadoop/hbase/client/AsyncProcess.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cd9f4223/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index 1531201..93b17bc 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -756,7 +756,7 @@ class AsyncProcess {
 
   @Override
   public void run() {
-AbstractResponse res;
+AbstractResponse res = null;
 CancellableRegionServerCallable callable = currentCallable;
 try {
   // setup the callable based on the actions, if we don't have one 
already from the request
@@ -802,7 +802,7 @@ class AsyncProcess {
   throw new RuntimeException(t);
 } finally {
   decTaskCounters(multiAction.getRegions(), server);
-  if (callsInProgress != null && callable != null) {
+  if (callsInProgress != null && callable != null && res != null) {
 callsInProgress.remove(callable);
   }
 }



hbase git commit: HBASE-16607 Make NoncedRegionServerCallable extend CancellableRegionServerCallable

2016-09-11 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master 2c3b0f2c0 -> c19d2cabb


HBASE-16607 Make NoncedRegionServerCallable extend 
CancellableRegionServerCallable


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c19d2cab
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c19d2cab
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c19d2cab

Branch: refs/heads/master
Commit: c19d2cabbd4c6e312e4926f72d348a5e554cd3dd
Parents: 2c3b0f2
Author: chenheng <chenh...@apache.org>
Authored: Mon Sep 12 11:03:29 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Mon Sep 12 11:03:29 2016 +0800

--
 .../org/apache/hadoop/hbase/client/HTable.java  | 52 +++---
 .../client/NoncedRegionServerCallable.java  | 74 ++--
 2 files changed, 31 insertions(+), 95 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c19d2cab/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index e98424c..0d1b156 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -630,17 +630,17 @@ public class HTable implements Table {
   public Result append(final Append append) throws IOException {
 checkHasFamilies(append);
 NoncedRegionServerCallable callable =
-new NoncedRegionServerCallable(this.connection,
-this.rpcControllerFactory, getName(), append.getRow()) {
-  @Override
-  protected Result call(HBaseRpcController controller) throws Exception {
-MutateRequest request = RequestConverter.buildMutateRequest(
-  getLocation().getRegionInfo().getRegionName(), append, 
getNonceGroup(), getNonce());
-MutateResponse response = getStub().mutate(controller, request);
-if (!response.hasResult()) return null;
-return ProtobufUtil.toResult(response.getResult(), 
controller.cellScanner());
-  }
-};
+new NoncedRegionServerCallable(this.connection, 
this.rpcControllerFactory,
+getName(), append.getRow()) {
+  @Override
+  protected Result rpcCall() throws Exception {
+MutateRequest request = RequestConverter.buildMutateRequest(
+  getLocation().getRegionInfo().getRegionName(), append, 
getNonceGroup(), getNonce());
+MutateResponse response = getStub().mutate(getRpcController(), 
request);
+if (!response.hasResult()) return null;
+return ProtobufUtil.toResult(response.getResult(), 
getRpcControllerCellScanner());
+  }
+};
 return rpcCallerFactory. newCaller(this.writeRpcTimeout).
 callWithRetries(callable, this.operationTimeout);
   }
@@ -652,16 +652,16 @@ public class HTable implements Table {
   public Result increment(final Increment increment) throws IOException {
 checkHasFamilies(increment);
 NoncedRegionServerCallable callable =
-new NoncedRegionServerCallable(this.connection,
-this.rpcControllerFactory, getName(), increment.getRow()) {
-  @Override
-  protected Result call(HBaseRpcController controller) throws Exception {
-MutateRequest request = RequestConverter.buildMutateRequest(
-  getLocation().getRegionInfo().getRegionName(), increment, 
getNonceGroup(), getNonce());
-MutateResponse response = getStub().mutate(controller, request);
-// Should this check for null like append does?
-return ProtobufUtil.toResult(response.getResult(), 
controller.cellScanner());
-  }
+  new NoncedRegionServerCallable(this.connection,
+  this.rpcControllerFactory, getName(), increment.getRow()) {
+@Override
+protected Result rpcCall() throws Exception {
+  MutateRequest request = RequestConverter.buildMutateRequest(
+getLocation().getRegionInfo().getRegionName(), increment, 
getNonceGroup(), getNonce());
+  MutateResponse response = getStub().mutate(getRpcController(), 
request);
+  // Should this check for null like append does?
+  return ProtobufUtil.toResult(response.getResult(), 
getRpcControllerCellScanner());
+}
 };
 return rpcCallerFactory. 
newCaller(writeRpcTimeout).callWithRetries(callable,
 this.operationTimeout);
@@ -701,12 +701,12 @@ public class HTable implements Table {
 new NoncedRegionServerCallable(this.connection, 
this.rpcControllerFactory, getName(),
 row) {
   @Override
-  protected Long call(HBaseRpcController controller) t

hbase git commit: HBASE-16606 Remove some duplicate code in HTable

2016-09-11 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master 7bda5151e -> 2c3b0f2c0


HBASE-16606 Remove some duplicate code in HTable


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2c3b0f2c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2c3b0f2c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2c3b0f2c

Branch: refs/heads/master
Commit: 2c3b0f2c0b2d47dfd3a22e1f47f7eb1317d3514f
Parents: 7bda515
Author: chenheng <chenh...@apache.org>
Authored: Mon Sep 12 10:57:21 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Mon Sep 12 10:57:21 2016 +0800

--
 .../org/apache/hadoop/hbase/client/HTable.java  | 44 +---
 1 file changed, 10 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2c3b0f2c/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index 492714f..e98424c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -479,16 +479,18 @@ public class HTable implements Table {
   @Override
   public void batch(final List actions, final Object[] results)
   throws InterruptedException, IOException {
-AsyncRequestFuture ars = multiAp.submitAll(pool, tableName, actions, null, 
results);
-ars.waitUntilDone();
-if (ars.hasError()) {
-  throw ars.getErrors();
-}
+batch(actions, results, -1);
   }
 
   public void batch(final List actions, final Object[] results, 
int timeout)
   throws InterruptedException, IOException {
-AsyncRequestFuture ars = multiAp.submitAll(pool, tableName, actions, null, 
results, null, timeout);
+AsyncRequestFuture ars = null;
+if (timeout != -1) {
+  ars = multiAp.submitAll(pool, tableName, actions, null, results, null, 
timeout);
+} else {
+  // use default timeout in AP
+  ars = multiAp.submitAll(pool, tableName, actions, null, results);
+}
 ars.waitUntilDone();
 if (ars.hasError()) {
   throw ars.getErrors();
@@ -720,20 +722,7 @@ public class HTable implements Table {
   final byte [] family, final byte [] qualifier, final byte [] value,
   final Put put)
   throws IOException {
-RegionServerCallable callable =
-new RegionServerCallable(this.connection, 
this.rpcControllerFactory,
-getName(), row) {
-  @Override
-  protected Boolean rpcCall() throws Exception {
-MutateRequest request = RequestConverter.buildMutateRequest(
-  getLocation().getRegionInfo().getRegionName(), row, family, 
qualifier,
-  new BinaryComparator(value), CompareType.EQUAL, put);
-MutateResponse response = getStub().mutate(getRpcController(), 
request);
-return Boolean.valueOf(response.getProcessed());
-  }
-};
-return rpcCallerFactory. newCaller(this.writeRpcTimeout).
-callWithRetries(callable, this.operationTimeout);
+return checkAndPut(row, family, qualifier, CompareOp.EQUAL, value, put);
   }
 
   /**
@@ -768,20 +757,7 @@ public class HTable implements Table {
   public boolean checkAndDelete(final byte [] row, final byte [] family, final 
byte [] qualifier,
   final byte [] value, final Delete delete)
   throws IOException {
-RegionServerCallable callable =
-new RegionServerCallable(this.connection, 
this.rpcControllerFactory,
-getName(), row) {
-  @Override
-  protected Boolean rpcCall() throws Exception {
-MutateRequest request = RequestConverter.buildMutateRequest(
-  getLocation().getRegionInfo().getRegionName(), row, family, 
qualifier,
-  new BinaryComparator(value), CompareType.EQUAL, delete);
-MutateResponse response = getStub().mutate(getRpcController(), 
request);
-return Boolean.valueOf(response.getProcessed());
-  }
-};
-return rpcCallerFactory. newCaller(this.writeRpcTimeout).
-callWithRetries(callable, this.operationTimeout);
+return checkAndDelete(row, family, qualifier, CompareOp.EQUAL, value, 
delete);
   }
 
   /**



hbase git commit: HBASE-16086 TableCfWALEntryFilter and ScopeWALEntryFilter should not redundantly iterate over cells (Vincent Poon)

2016-09-10 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 a5f0223bd -> d40140784


HBASE-16086 TableCfWALEntryFilter and ScopeWALEntryFilter should not 
redundantly iterate over cells (Vincent Poon)

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ScopeWALEntryFilter.java

hbase-server/src/main/java/org/apache/hadoop/hbase/replication/TableCfWALEntryFilter.java

hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d4014078
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d4014078
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d4014078

Branch: refs/heads/branch-1.3
Commit: d4014078451325c2e1ba18a7f1775a43cde49305
Parents: a5f0223
Author: chenheng <chenh...@apache.org>
Authored: Sun Sep 11 09:55:08 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Sun Sep 11 10:50:54 2016 +0800

--
 .../hbase/replication/BulkLoadCellFilter.java   |  81 
 .../hbase/replication/ChainWALEntryFilter.java  |  38 +-
 .../hbase/replication/ScopeWALEntryFilter.java  |  98 ---
 .../replication/TableCfWALEntryFilter.java  | 125 +++
 .../hadoop/hbase/replication/WALCellFilter.java |  41 ++
 .../TestReplicationWALEntryFilters.java |  14 +--
 6 files changed, 234 insertions(+), 163 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d4014078/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java
new file mode 100644
index 000..3599d10
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor;
+import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+
+import com.google.common.base.Predicate;
+
+public class BulkLoadCellFilter {
+  private static final Log LOG = LogFactory.getLog(BulkLoadCellFilter.class);
+
+  /**
+   * Filters the bulk load cell using the supplied predicate.
+   * @param cell The WAL cell to filter.
+   * @param famPredicate Returns true of given family should be removed.
+   * @return The filtered cell.
+   */
+  public Cell filterCell(Cell cell, Predicate<byte[]> famPredicate) {
+byte[] fam;
+BulkLoadDescriptor bld = null;
+try {
+  bld = WALEdit.getBulkLoadDescriptor(cell);
+} catch (IOException e) {
+  LOG.warn("Failed to get bulk load events information from the WAL 
file.", e);
+  return cell;
+}
+List storesList = bld.getStoresList();
+// Copy the StoreDescriptor list and update it as storesList is a 
unmodifiableList
+List copiedStoresList = new 
ArrayList(storesList);
+Iterator copiedStoresListIterator = 
copiedStoresList.iterator();
+boolean anyStoreRemoved = false;
+while (copiedStoresListIterator.hasNext()) {
+  StoreDescriptor sd = copiedStoresListIterator.next();
+  fam = sd.getFamilyName().toByteArray();
+  if (famPredicate.apply(fam)) {
+copiedStoresListIterator.remove();
+anyStoreRemoved = true;
+  }
+}
+
+if (!anyStoreRemoved) {
+  return cell;
+} else i

hbase git commit: HBASE-16086 TableCfWALEntryFilter and ScopeWALEntryFilter should not redundantly iterate over cells (Vincent Poon)

2016-09-10 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1 fe57fa4da -> 94026d0d0


HBASE-16086 TableCfWALEntryFilter and ScopeWALEntryFilter should not 
redundantly iterate over cells (Vincent Poon)

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ScopeWALEntryFilter.java

hbase-server/src/main/java/org/apache/hadoop/hbase/replication/TableCfWALEntryFilter.java

hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/94026d0d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/94026d0d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/94026d0d

Branch: refs/heads/branch-1
Commit: 94026d0d098b3bdcb5a6251bd1e135a976f796f5
Parents: fe57fa4
Author: chenheng <chenh...@apache.org>
Authored: Sun Sep 11 09:55:08 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Sun Sep 11 10:48:00 2016 +0800

--
 .../hbase/replication/BulkLoadCellFilter.java   |  81 
 .../hbase/replication/ChainWALEntryFilter.java  |  38 +-
 .../hbase/replication/ScopeWALEntryFilter.java  |  98 ---
 .../replication/TableCfWALEntryFilter.java  | 125 +++
 .../hadoop/hbase/replication/WALCellFilter.java |  41 ++
 .../TestReplicationWALEntryFilters.java |  14 +--
 6 files changed, 234 insertions(+), 163 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/94026d0d/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java
new file mode 100644
index 000..3599d10
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor;
+import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+
+import com.google.common.base.Predicate;
+
+public class BulkLoadCellFilter {
+  private static final Log LOG = LogFactory.getLog(BulkLoadCellFilter.class);
+
+  /**
+   * Filters the bulk load cell using the supplied predicate.
+   * @param cell The WAL cell to filter.
+   * @param famPredicate Returns true of given family should be removed.
+   * @return The filtered cell.
+   */
+  public Cell filterCell(Cell cell, Predicate<byte[]> famPredicate) {
+byte[] fam;
+BulkLoadDescriptor bld = null;
+try {
+  bld = WALEdit.getBulkLoadDescriptor(cell);
+} catch (IOException e) {
+  LOG.warn("Failed to get bulk load events information from the WAL 
file.", e);
+  return cell;
+}
+List storesList = bld.getStoresList();
+// Copy the StoreDescriptor list and update it as storesList is a 
unmodifiableList
+List copiedStoresList = new 
ArrayList(storesList);
+Iterator copiedStoresListIterator = 
copiedStoresList.iterator();
+boolean anyStoreRemoved = false;
+while (copiedStoresListIterator.hasNext()) {
+  StoreDescriptor sd = copiedStoresListIterator.next();
+  fam = sd.getFamilyName().toByteArray();
+  if (famPredicate.apply(fam)) {
+copiedStoresListIterator.remove();
+anyStoreRemoved = true;
+  }
+}
+
+if (!anyStoreRemoved) {
+  return cell;
+} else i

hbase git commit: HBASE-16086 TableCfWALEntryFilter and ScopeWALEntryFilter should not redundantly iterate over cells (Vincent Poon)

2016-09-10 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master cc2a40a78 -> 80d8b2100


HBASE-16086 TableCfWALEntryFilter and ScopeWALEntryFilter should not 
redundantly iterate over cells (Vincent Poon)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/80d8b210
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/80d8b210
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/80d8b210

Branch: refs/heads/master
Commit: 80d8b2100d9f4dc2a01ea6bdbded6ec52d7e4263
Parents: cc2a40a
Author: chenheng <chenh...@apache.org>
Authored: Sun Sep 11 09:55:08 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Sun Sep 11 09:55:08 2016 +0800

--
 .../hbase/replication/BulkLoadCellFilter.java   |  81 
 .../hbase/replication/ChainWALEntryFilter.java  |  38 +-
 .../hbase/replication/ScopeWALEntryFilter.java  |  94 --
 .../replication/TableCfWALEntryFilter.java  | 124 +++
 .../hadoop/hbase/replication/WALCellFilter.java |  41 ++
 .../TestReplicationWALEntryFilters.java |  12 +-
 6 files changed, 231 insertions(+), 159 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/80d8b210/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java
new file mode 100644
index 000..3599d10
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BulkLoadCellFilter.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor;
+import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+
+import com.google.common.base.Predicate;
+
+public class BulkLoadCellFilter {
+  private static final Log LOG = LogFactory.getLog(BulkLoadCellFilter.class);
+
+  /**
+   * Filters the bulk load cell using the supplied predicate.
+   * @param cell The WAL cell to filter.
+   * @param famPredicate Returns true of given family should be removed.
+   * @return The filtered cell.
+   */
+  public Cell filterCell(Cell cell, Predicate<byte[]> famPredicate) {
+byte[] fam;
+BulkLoadDescriptor bld = null;
+try {
+  bld = WALEdit.getBulkLoadDescriptor(cell);
+} catch (IOException e) {
+  LOG.warn("Failed to get bulk load events information from the WAL 
file.", e);
+  return cell;
+}
+List storesList = bld.getStoresList();
+// Copy the StoreDescriptor list and update it as storesList is a 
unmodifiableList
+List copiedStoresList = new 
ArrayList(storesList);
+Iterator copiedStoresListIterator = 
copiedStoresList.iterator();
+boolean anyStoreRemoved = false;
+while (copiedStoresListIterator.hasNext()) {
+  StoreDescriptor sd = copiedStoresListIterator.next();
+  fam = sd.getFamilyName().toByteArray();
+  if (famPredicate.apply(fam)) {
+copiedStoresListIterator.remove();
+anyStoreRemoved = true;
+  }
+}
+
+if (!anyStoreRemoved) {
+  return cell;
+} else if (copiedStoresList.isEmpty()) {
+  return null;
+}
+BulkLoadDescriptor.Builder newDesc =
+BulkLoadDescriptor.newBuilder().setTableName(bld.getTableName())
+.setEncodedRegionName(bld.getEncodedRegionName())
+.setBulkloadSeqNum(bld.getBulkloadSeqNum());
+newDesc.addA

hbase git commit: HBASE-16596 Reduce redundant interfaces in AsyncProcess

2016-09-09 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master e1e063720 -> cc2a40a78


HBASE-16596 Reduce redundant interfaces in AsyncProcess


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cc2a40a7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cc2a40a7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cc2a40a7

Branch: refs/heads/master
Commit: cc2a40a78f4e65ef38dad2cbc921613c4d15cbf7
Parents: e1e0637
Author: chenheng <chenh...@apache.org>
Authored: Sat Sep 10 11:13:28 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Sat Sep 10 11:13:28 2016 +0800

--
 .../hadoop/hbase/client/AsyncProcess.java   | 29 +-
 .../org/apache/hadoop/hbase/client/HTable.java  |  2 +-
 .../hadoop/hbase/client/TestAsyncProcess.java   | 59 +---
 .../hadoop/hbase/client/TestClientPushback.java |  2 +-
 4 files changed, 31 insertions(+), 61 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cc2a40a7/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index 5bb0f58..c5745e9 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -392,15 +392,7 @@ class AsyncProcess {
 }
 throw new RuntimeException("Neither AsyncProcess nor request have 
ExecutorService");
   }
-  /**
-   * See {@link #submit(ExecutorService, TableName, List, boolean, 
Batch.Callback, boolean)}.
-   * Uses default ExecutorService for this AP (must have been created with 
one).
-   */
-  public  AsyncRequestFuture submit(TableName tableName, final List rows,
-  boolean atLeastOne, Batch.Callback callback, boolean 
needResults)
-  throws InterruptedIOException {
-return submit(null, tableName, rows, atLeastOne, callback, needResults);
-  }
+
   /**
* See {@link #submit(ExecutorService, TableName, RowAccess, boolean, 
Batch.Callback, boolean)}.
* Uses default ExecutorService for this AP (must have been created with 
one).
@@ -529,7 +521,7 @@ class AsyncProcess {
   List locationErrorRows, Map<ServerName, MultiAction> 
actionsByServer,
   ExecutorService pool) {
 AsyncRequestFutureImpl ars = createAsyncRequestFuture(
-  tableName, retainedActions, nonceGroup, pool, callback, results, 
needResults);
+  tableName, retainedActions, nonceGroup, pool, callback, results, 
needResults, null, timeout);
 // Add location errors if any
 if (locationErrors != null) {
   for (int i = 0; i < locationErrors.size(); ++i) {
@@ -564,14 +556,6 @@ class AsyncProcess {
 
 multiAction.add(regionName, action);
   }
-  /**
-   * See {@link #submitAll(ExecutorService, TableName, List, Batch.Callback, 
Object[])}.
-   * Uses default ExecutorService for this AP (must have been created with 
one).
-   */
-  public  AsyncRequestFuture submitAll(TableName tableName,
-  List rows, Batch.Callback callback, Object[] 
results) {
-return submitAll(null, tableName, rows, callback, results, null, timeout);
-  }
 
   public  AsyncRequestFuture submitAll(ExecutorService pool, 
TableName tableName,
   List rows, Batch.Callback callback, Object[] 
results) {
@@ -1785,15 +1769,6 @@ class AsyncProcess {
 results, callback, callable, curTimeout);
   }
 
-  @VisibleForTesting
-  /** Create AsyncRequestFuture. Isolated to be easily overridden in the 
tests. */
-  protected  AsyncRequestFutureImpl createAsyncRequestFuture(
-  TableName tableName, List<Action> actions, long nonceGroup, 
ExecutorService pool,
-  Batch.Callback callback, Object[] results, boolean needResults) 
{
-return createAsyncRequestFuture(
-tableName, actions, nonceGroup, pool, callback, results, needResults, 
null, timeout);
-  }
-
   /**
* Create a callable. Isolated to be easily overridden in the tests.
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/cc2a40a7/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index f8bbfc1..492714f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -1218,7 +1218,7 @@ public class HTable implements Table {
 RpcRetryin

hbase git commit: HBASE-16570 Compute region locality in parallel at startup (binlijin)

2016-09-09 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 493c31c29 -> a5f0223bd


HBASE-16570 Compute region locality in parallel at startup (binlijin)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a5f0223b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a5f0223b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a5f0223b

Branch: refs/heads/branch-1.3
Commit: a5f0223bd1db25c18c11ba1250f53066e50f28e8
Parents: 493c31c
Author: chenheng <chenh...@apache.org>
Authored: Fri Sep 9 10:54:48 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Fri Sep 9 14:19:48 2016 +0800

--
 .../hbase/master/balancer/BaseLoadBalancer.java | 49 +++
 .../master/balancer/RegionLocationFinder.java   | 18 +--
 .../master/balancer/TestBaseLoadBalancer.java   | 51 +---
 .../balancer/TestRegionLocationFinder.java  |  3 +-
 4 files changed, 100 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f0223b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index c2529a8..0c86557 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -33,6 +33,7 @@ import java.util.NavigableMap;
 import java.util.Random;
 import java.util.Set;
 import java.util.TreeMap;
+import java.util.concurrent.ExecutionException;
 
 import org.apache.commons.lang.NotImplementedException;
 import org.apache.commons.logging.Log;
@@ -57,6 +58,7 @@ import com.google.common.base.Joiner;
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.ListenableFuture;
 
 /**
  * The base class for load balancers. It provides the the functions used to by
@@ -115,6 +117,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 HRegionInfo[] regions;
 Deque[] regionLoads;
 private RegionLocationFinder regionFinder;
+ArrayList<ListenableFuture> regionLocationFutures;
 
 int[][] regionLocations; //regionIndex -> list of serverIndex sorted by 
locality
 
@@ -236,6 +239,13 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   regionIndexToTableIndex = new int[numRegions];
   regionIndexToPrimaryIndex = new int[numRegions];
   regionLoads = new Deque[numRegions];
+  regionLocationFutures = new 
ArrayList<ListenableFuture>(
+  numRegions);
+  if (regionFinder != null) {
+for (int i = 0; i < numRegions; i++) {
+  regionLocationFutures.add(null);
+}
+  }
   regionLocations = new int[numRegions][];
   serverIndicesSortedByRegionCount = new Integer[numServers];
   serverIndicesSortedByLocality = new Integer[numServers];
@@ -305,6 +315,33 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 regionIndex++;
   }
 
+  if (regionFinder != null) {
+for (int index = 0; index < regionLocationFutures.size(); index++) {
+  ListenableFuture future = 
regionLocationFutures
+  .get(index);
+  HDFSBlocksDistribution blockDistbn = null;
+  try {
+blockDistbn = future.get();
+  } catch (InterruptedException ite) {
+  } catch (ExecutionException ee) {
+LOG.debug(
+"IOException during HDFSBlocksDistribution computation. for 
region = "
++ regions[index].getEncodedName(), ee);
+  } finally {
+if (blockDistbn == null) {
+  blockDistbn = new HDFSBlocksDistribution();
+}
+  }
+  List loc = 
regionFinder.getTopBlockLocations(blockDistbn);
+  regionLocations[index] = new int[loc.size()];
+  for (int i = 0; i < loc.size(); i++) {
+regionLocations[index][i] = loc.get(i) == null ? -1
+: (serversToIndex.get(loc.get(i).getHostAndPort()) == null ? -1
+: serversToIndex.get(loc.get(i).getHostAndPort()));
+  }
+}
+  }
+
   for (int i = 0; i < serversPerHostList.size(); i++) {
 serversPerHost[i] = new int[serversPerHostList.get(i).size()];
 for (int j = 0; j < serversPerHost[i].length; j++) {
@@ -452,15 +489,9 @@ public abstract class BaseLoadBalancer implements 
Lo

hbase git commit: HBASE-16570 Compute region locality in parallel at startup (binlijin)

2016-09-09 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1 52963b342 -> b4086795f


HBASE-16570 Compute region locality in parallel at startup (binlijin)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b4086795
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b4086795
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b4086795

Branch: refs/heads/branch-1
Commit: b4086795f2dcb1497a367592850fa80f6514cde2
Parents: 52963b3
Author: chenheng <chenh...@apache.org>
Authored: Fri Sep 9 10:54:48 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Fri Sep 9 14:16:37 2016 +0800

--
 .../hbase/master/balancer/BaseLoadBalancer.java | 49 +++
 .../master/balancer/RegionLocationFinder.java   | 18 +--
 .../master/balancer/TestBaseLoadBalancer.java   | 51 +---
 .../balancer/TestRegionLocationFinder.java  |  3 +-
 4 files changed, 100 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b4086795/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index 0a61839..93b29b6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -33,6 +33,7 @@ import java.util.NavigableMap;
 import java.util.Random;
 import java.util.Set;
 import java.util.TreeMap;
+import java.util.concurrent.ExecutionException;
 
 import org.apache.commons.lang.NotImplementedException;
 import org.apache.commons.logging.Log;
@@ -57,6 +58,7 @@ import com.google.common.base.Joiner;
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.ListenableFuture;
 
 /**
  * The base class for load balancers. It provides the the functions used to by
@@ -115,6 +117,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 HRegionInfo[] regions;
 Deque[] regionLoads;
 private RegionLocationFinder regionFinder;
+ArrayList<ListenableFuture> regionLocationFutures;
 
 int[][] regionLocations; //regionIndex -> list of serverIndex sorted by 
locality
 
@@ -236,6 +239,13 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   regionIndexToTableIndex = new int[numRegions];
   regionIndexToPrimaryIndex = new int[numRegions];
   regionLoads = new Deque[numRegions];
+  regionLocationFutures = new 
ArrayList<ListenableFuture>(
+  numRegions);
+  if (regionFinder != null) {
+for (int i = 0; i < numRegions; i++) {
+  regionLocationFutures.add(null);
+}
+  }
   regionLocations = new int[numRegions][];
   serverIndicesSortedByRegionCount = new Integer[numServers];
   serverIndicesSortedByLocality = new Integer[numServers];
@@ -305,6 +315,33 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 regionIndex++;
   }
 
+  if (regionFinder != null) {
+for (int index = 0; index < regionLocationFutures.size(); index++) {
+  ListenableFuture future = 
regionLocationFutures
+  .get(index);
+  HDFSBlocksDistribution blockDistbn = null;
+  try {
+blockDistbn = future.get();
+  } catch (InterruptedException ite) {
+  } catch (ExecutionException ee) {
+LOG.debug(
+"IOException during HDFSBlocksDistribution computation. for 
region = "
++ regions[index].getEncodedName(), ee);
+  } finally {
+if (blockDistbn == null) {
+  blockDistbn = new HDFSBlocksDistribution();
+}
+  }
+  List loc = 
regionFinder.getTopBlockLocations(blockDistbn);
+  regionLocations[index] = new int[loc.size()];
+  for (int i = 0; i < loc.size(); i++) {
+regionLocations[index][i] = loc.get(i) == null ? -1
+: (serversToIndex.get(loc.get(i).getHostAndPort()) == null ? -1
+: serversToIndex.get(loc.get(i).getHostAndPort()));
+  }
+}
+  }
+
   for (int i = 0; i < serversPerHostList.size(); i++) {
 serversPerHost[i] = new int[serversPerHostList.get(i).size()];
 for (int j = 0; j < serversPerHost[i].length; j++) {
@@ -452,15 +489,9 @@ public abstract class BaseLoadBalancer implements 
Lo

hbase git commit: HBASE-16570 Compute region locality in parallel at startup (binlijin)

2016-09-09 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master 46c756a4a -> e11aafae9


HBASE-16570 Compute region locality in parallel at startup (binlijin)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e11aafae
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e11aafae
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e11aafae

Branch: refs/heads/master
Commit: e11aafae957bc8d71cb622833011f29325049987
Parents: 46c756a
Author: chenheng <chenh...@apache.org>
Authored: Fri Sep 9 10:54:48 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Fri Sep 9 10:54:48 2016 +0800

--
 .../hbase/master/balancer/BaseLoadBalancer.java | 49 +++
 .../master/balancer/RegionLocationFinder.java   | 18 +--
 .../master/balancer/TestBaseLoadBalancer.java   | 51 +---
 .../balancer/TestRegionLocationFinder.java  |  3 +-
 4 files changed, 100 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e11aafae/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index dc5bace..2b13b21 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -33,6 +33,7 @@ import java.util.NavigableMap;
 import java.util.Random;
 import java.util.Set;
 import java.util.TreeMap;
+import java.util.concurrent.ExecutionException;
 
 import org.apache.commons.lang.NotImplementedException;
 import org.apache.commons.logging.Log;
@@ -59,6 +60,7 @@ import com.google.common.base.Joiner;
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.ListenableFuture;
 
 /**
  * The base class for load balancers. It provides the the functions used to by
@@ -117,6 +119,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 HRegionInfo[] regions;
 Deque[] regionLoads;
 private RegionLocationFinder regionFinder;
+ArrayList<ListenableFuture> regionLocationFutures;
 
 int[][] regionLocations; //regionIndex -> list of serverIndex sorted by 
locality
 
@@ -238,6 +241,13 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   regionIndexToTableIndex = new int[numRegions];
   regionIndexToPrimaryIndex = new int[numRegions];
   regionLoads = new Deque[numRegions];
+  regionLocationFutures = new 
ArrayList<ListenableFuture>(
+  numRegions);
+  if (regionFinder != null) {
+for (int i = 0; i < numRegions; i++) {
+  regionLocationFutures.add(null);
+}
+  }
   regionLocations = new int[numRegions][];
   serverIndicesSortedByRegionCount = new Integer[numServers];
   serverIndicesSortedByLocality = new Integer[numServers];
@@ -307,6 +317,33 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 regionIndex++;
   }
 
+  if (regionFinder != null) {
+for (int index = 0; index < regionLocationFutures.size(); index++) {
+  ListenableFuture future = 
regionLocationFutures
+  .get(index);
+  HDFSBlocksDistribution blockDistbn = null;
+  try {
+blockDistbn = future.get();
+  } catch (InterruptedException ite) {
+  } catch (ExecutionException ee) {
+LOG.debug(
+"IOException during HDFSBlocksDistribution computation. for 
region = "
++ regions[index].getEncodedName(), ee);
+  } finally {
+if (blockDistbn == null) {
+  blockDistbn = new HDFSBlocksDistribution();
+}
+  }
+  List loc = 
regionFinder.getTopBlockLocations(blockDistbn);
+  regionLocations[index] = new int[loc.size()];
+  for (int i = 0; i < loc.size(); i++) {
+regionLocations[index][i] = loc.get(i) == null ? -1
+: (serversToIndex.get(loc.get(i).getHostAndPort()) == null ? -1
+: serversToIndex.get(loc.get(i).getHostAndPort()));
+  }
+}
+  }
+
   for (int i = 0; i < serversPerHostList.size(); i++) {
 serversPerHost[i] = new int[serversPerHostList.get(i).size()];
 for (int j = 0; j < serversPerHost[i].length; j++) {
@@ -454,15 +491,9 @@ public abstract class BaseLoadBalancer implements 
Lo

hbase git commit: HBASE-16562 ITBLL should fail to start if misconfigured

2016-09-07 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/0.98 daf57832d -> 66a82af9f


HBASE-16562 ITBLL should fail to start if misconfigured


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/66a82af9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/66a82af9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/66a82af9

Branch: refs/heads/0.98
Commit: 66a82af9f47230b1972a32bea50beb785fb7997a
Parents: daf5783
Author: chenheng <chenh...@apache.org>
Authored: Tue Sep 6 11:02:18 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Wed Sep 7 16:33:32 2016 +0800

--
 .../test/IntegrationTestBigLinkedList.java  | 34 ++--
 1 file changed, 24 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/66a82af9/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
index d0df043..8f7539b 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
@@ -218,6 +218,11 @@ public class IntegrationTestBigLinkedList extends 
IntegrationTestBase {
 
 private static final Log LOG = LogFactory.getLog(Generator.class);
 
+public static final String USAGE =  "Usage : " + 
Generator.class.getSimpleName() +
+"[ 
 \n" +
+"where  should be a multiple of width*wrap 
multiplier, " +
+"25M by default \n";
+
 static class GeneratorInputFormat extends 
InputFormat<BytesWritable,NullWritable> {
   static class GeneratorInputSplit extends InputSplit implements Writable {
 @Override
@@ -434,21 +439,20 @@ public class IntegrationTestBigLinkedList extends 
IntegrationTestBase {
 @Override
 public int run(String[] args) throws Exception {
   if (args.length < 3) {
-System.out.println("Usage : " + Generator.class.getSimpleName() +
-"[ 
]");
-System.out.println("   where  should be a multiple 
of " +
-" width*wrap multiplier, 25M by default");
-return 0;
+System.err.println(USAGE);
+return 1;
   }
 
   int numMappers = Integer.parseInt(args[0]);
   long numNodes = Long.parseLong(args[1]);
   Path tmpOutput = new Path(args[2]);
   Integer width = (args.length < 4) ? null : Integer.parseInt(args[3]);
-  Integer wrapMuplitplier = (args.length < 5) ? null : 
Integer.parseInt(args[4]);
-  return run(numMappers, numNodes, tmpOutput, width, wrapMuplitplier);
+  Integer wrapMultiplier = (args.length < 5) ? null : 
Integer.parseInt(args[4]);
+  return run(numMappers, numNodes, tmpOutput, width, wrapMultiplier);
 }
 
+
+
 protected void createSchema() throws IOException {
   Configuration conf = getConf();
   HBaseAdmin admin = new HBaseAdmin(conf);
@@ -549,12 +553,22 @@ public class IntegrationTestBigLinkedList extends 
IntegrationTestBase {
 }
 
 public int run(int numMappers, long numNodes, Path tmpOutput,
-Integer width, Integer wrapMuplitplier) throws Exception {
-  int ret = runRandomInputGenerator(numMappers, numNodes, tmpOutput, 
width, wrapMuplitplier);
+Integer width, Integer wrapMultiplier) throws Exception {
+  long wrap = (long)width*wrapMultiplier;
+  if (wrap < numNodes && numNodes % wrap != 0) {
+/**
+ *  numNodes should be a multiple of width*wrapMultiplier.
+ *  If numNodes less than wrap, wrap will be set to be equal with 
numNodes,
+ *  See {@link GeneratorMapper#setup(Mapper.Context)}
+ * */
+System.err.println(USAGE);
+return 1;
+  }
+  int ret = runRandomInputGenerator(numMappers, numNodes, tmpOutput, 
width, wrapMultiplier);
   if (ret > 0) {
 return ret;
   }
-  return runGenerator(numMappers, numNodes, tmpOutput, width, 
wrapMuplitplier);
+  return runGenerator(numMappers, numNodes, tmpOutput, width, 
wrapMultiplier);
 }
   }
 



hbase git commit: HBASE-16562 ITBLL should fail to start if misconfigured

2016-09-07 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.0 fba13a6ef -> a55842a0a


HBASE-16562 ITBLL should fail to start if misconfigured


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a55842a0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a55842a0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a55842a0

Branch: refs/heads/branch-1.0
Commit: a55842a0a86040545eff6692317191acb84032ae
Parents: fba13a6
Author: chenheng <chenh...@apache.org>
Authored: Tue Sep 6 11:02:18 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Wed Sep 7 16:04:18 2016 +0800

--
 .../test/IntegrationTestBigLinkedList.java  | 34 ++--
 1 file changed, 24 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a55842a0/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
index 99be272..b0c5371 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
@@ -239,6 +239,11 @@ public class IntegrationTestBigLinkedList extends 
IntegrationTestBase {
 
 private static final Log LOG = LogFactory.getLog(Generator.class);
 
+public static final String USAGE =  "Usage : " + 
Generator.class.getSimpleName() +
+"[ 
 \n" +
+"where  should be a multiple of width*wrap 
multiplier, " +
+"25M by default \n";
+
 static class GeneratorInputFormat extends 
InputFormat<BytesWritable,NullWritable> {
   static class GeneratorInputSplit extends InputSplit implements Writable {
 @Override
@@ -461,21 +466,20 @@ public class IntegrationTestBigLinkedList extends 
IntegrationTestBase {
 @Override
 public int run(String[] args) throws Exception {
   if (args.length < 3) {
-System.out.println("Usage : " + Generator.class.getSimpleName() +
-"[ 
]");
-System.out.println("   where  should be a multiple 
of " +
-" width*wrap multiplier, 25M by default");
-return 0;
+System.err.println(USAGE);
+return 1;
   }
 
   int numMappers = Integer.parseInt(args[0]);
   long numNodes = Long.parseLong(args[1]);
   Path tmpOutput = new Path(args[2]);
   Integer width = (args.length < 4) ? null : Integer.parseInt(args[3]);
-  Integer wrapMuplitplier = (args.length < 5) ? null : 
Integer.parseInt(args[4]);
-  return run(numMappers, numNodes, tmpOutput, width, wrapMuplitplier);
+  Integer wrapMultiplier = (args.length < 5) ? null : 
Integer.parseInt(args[4]);
+  return run(numMappers, numNodes, tmpOutput, width, wrapMultiplier);
 }
 
+
+
 protected void createSchema() throws IOException {
   Configuration conf = getConf();
   Admin admin = new HBaseAdmin(conf);
@@ -575,12 +579,22 @@ public class IntegrationTestBigLinkedList extends 
IntegrationTestBase {
 }
 
 public int run(int numMappers, long numNodes, Path tmpOutput,
-Integer width, Integer wrapMuplitplier) throws Exception {
-  int ret = runRandomInputGenerator(numMappers, numNodes, tmpOutput, 
width, wrapMuplitplier);
+Integer width, Integer wrapMultiplier) throws Exception {
+  long wrap = (long)width*wrapMultiplier;
+  if (wrap < numNodes && numNodes % wrap != 0) {
+/**
+ *  numNodes should be a multiple of width*wrapMultiplier.
+ *  If numNodes less than wrap, wrap will be set to be equal with 
numNodes,
+ *  See {@link GeneratorMapper#setup(Mapper.Context)}
+ * */
+System.err.println(USAGE);
+return 1;
+  }
+  int ret = runRandomInputGenerator(numMappers, numNodes, tmpOutput, 
width, wrapMultiplier);
   if (ret > 0) {
 return ret;
   }
-  return runGenerator(numMappers, numNodes, tmpOutput, width, 
wrapMuplitplier);
+  return runGenerator(numMappers, numNodes, tmpOutput, width, 
wrapMultiplier);
 }
   }
 



hbase git commit: HBASE-16562 ITBLL should fail to start if misconfigured

2016-09-07 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.1 6f747178f -> 6e076d940


HBASE-16562 ITBLL should fail to start if misconfigured


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6e076d94
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6e076d94
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6e076d94

Branch: refs/heads/branch-1.1
Commit: 6e076d940f8882d6ac096f6bb2ebdf05feeb4218
Parents: 6f74717
Author: chenheng <chenh...@apache.org>
Authored: Tue Sep 6 11:02:18 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Wed Sep 7 15:56:30 2016 +0800

--
 .../test/IntegrationTestBigLinkedList.java  | 35 ++--
 1 file changed, 25 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6e076d94/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
index f3d8360..a06e91d 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
@@ -116,6 +116,7 @@ import 
org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
 import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.jruby.RubyProcess;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -257,6 +258,11 @@ public class IntegrationTestBigLinkedList extends 
IntegrationTestBase {
 public static final String MULTIPLE_UNEVEN_COLUMNFAMILIES_KEY =
 "generator.multiple.columnfamilies";
 
+public static final String USAGE =  "Usage : " + 
Generator.class.getSimpleName() +
+"[ 
 \n" +
+"where  should be a multiple of width*wrap 
multiplier, " +
+"25M by default \n";
+
 static class GeneratorInputFormat extends 
InputFormat<BytesWritable,NullWritable> {
   static class GeneratorInputSplit extends InputSplit implements Writable {
 @Override
@@ -499,21 +505,20 @@ public class IntegrationTestBigLinkedList extends 
IntegrationTestBase {
 @Override
 public int run(String[] args) throws Exception {
   if (args.length < 3) {
-System.out.println("Usage : " + Generator.class.getSimpleName() +
-"[ 
]");
-System.out.println("   where  should be a multiple 
of " +
-" width*wrap multiplier, 25M by default");
-return 0;
+System.err.println(USAGE);
+return 1;
   }
 
   int numMappers = Integer.parseInt(args[0]);
   long numNodes = Long.parseLong(args[1]);
   Path tmpOutput = new Path(args[2]);
   Integer width = (args.length < 4) ? null : Integer.parseInt(args[3]);
-  Integer wrapMuplitplier = (args.length < 5) ? null : 
Integer.parseInt(args[4]);
-  return run(numMappers, numNodes, tmpOutput, width, wrapMuplitplier);
+  Integer wrapMultiplier = (args.length < 5) ? null : 
Integer.parseInt(args[4]);
+  return run(numMappers, numNodes, tmpOutput, width, wrapMultiplier);
 }
 
+
+
 protected void createSchema() throws IOException {
   Configuration conf = getConf();
   TableName tableName = getTableName(conf);
@@ -619,12 +624,22 @@ public class IntegrationTestBigLinkedList extends 
IntegrationTestBase {
 }
 
 public int run(int numMappers, long numNodes, Path tmpOutput,
-Integer width, Integer wrapMuplitplier) throws Exception {
-  int ret = runRandomInputGenerator(numMappers, numNodes, tmpOutput, 
width, wrapMuplitplier);
+Integer width, Integer wrapMultiplier) throws Exception {
+  long wrap = (long)width*wrapMultiplier;
+  if (wrap < numNodes && numNodes % wrap != 0) {
+/**
+ *  numNodes should be a multiple of width*wrapMultiplier.
+ *  If numNodes less than wrap, wrap will be set to be equal with 
numNodes,
+ *  See {@link GeneratorMapper#setup(Mapper.Context)}
+ * */
+System.err.println(USAGE);
+return 1;
+  }
+  int ret = runRandomInputGenerator(numMappers, numNodes, tmpOutput, 
width, wrapMultiplier);
   if (ret > 0) {
 return ret;
   }
-  return runGenerator(numMappers, numNodes, tmpOutput, width, 
wrapMuplitplier);
+  return runGenerator(numMappers, numNodes, tmpOutput, width, 
wrapMultiplier);
 }
   }
 



hbase git commit: HBASE-16562 ITBLL should fail to start if misconfigured

2016-09-07 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 0bdcfd90e -> a13f31aa7


HBASE-16562 ITBLL should fail to start if misconfigured


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a13f31aa
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a13f31aa
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a13f31aa

Branch: refs/heads/branch-1.2
Commit: a13f31aa7e83fd7b5ae6186f8ca9899427d0ef0d
Parents: 0bdcfd9
Author: chenheng <chenh...@apache.org>
Authored: Tue Sep 6 11:02:18 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Wed Sep 7 15:03:54 2016 +0800

--
 .../test/IntegrationTestBigLinkedList.java  | 35 ++--
 1 file changed, 25 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a13f31aa/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
index 7f0f732..880b13f 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
@@ -116,6 +116,7 @@ import 
org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
 import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.jruby.RubyProcess;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -257,6 +258,11 @@ public class IntegrationTestBigLinkedList extends 
IntegrationTestBase {
 public static final String MULTIPLE_UNEVEN_COLUMNFAMILIES_KEY =
 "generator.multiple.columnfamilies";
 
+public static final String USAGE =  "Usage : " + 
Generator.class.getSimpleName() +
+"[ 
 \n" +
+"where  should be a multiple of width*wrap 
multiplier, " +
+"25M by default \n";
+
 static class GeneratorInputFormat extends 
InputFormat<BytesWritable,NullWritable> {
   static class GeneratorInputSplit extends InputSplit implements Writable {
 @Override
@@ -498,21 +504,20 @@ public class IntegrationTestBigLinkedList extends 
IntegrationTestBase {
 @Override
 public int run(String[] args) throws Exception {
   if (args.length < 3) {
-System.out.println("Usage : " + Generator.class.getSimpleName() +
-"[ 
]");
-System.out.println("   where  should be a multiple 
of " +
-" width*wrap multiplier, 25M by default");
-return 0;
+System.err.println(USAGE);
+return 1;
   }
 
   int numMappers = Integer.parseInt(args[0]);
   long numNodes = Long.parseLong(args[1]);
   Path tmpOutput = new Path(args[2]);
   Integer width = (args.length < 4) ? null : Integer.parseInt(args[3]);
-  Integer wrapMuplitplier = (args.length < 5) ? null : 
Integer.parseInt(args[4]);
-  return run(numMappers, numNodes, tmpOutput, width, wrapMuplitplier);
+  Integer wrapMultiplier = (args.length < 5) ? null : 
Integer.parseInt(args[4]);
+  return run(numMappers, numNodes, tmpOutput, width, wrapMultiplier);
 }
 
+
+
 protected void createSchema() throws IOException {
   Configuration conf = getConf();
   TableName tableName = getTableName(conf);
@@ -624,12 +629,22 @@ public class IntegrationTestBigLinkedList extends 
IntegrationTestBase {
 }
 
 public int run(int numMappers, long numNodes, Path tmpOutput,
-Integer width, Integer wrapMuplitplier) throws Exception {
-  int ret = runRandomInputGenerator(numMappers, numNodes, tmpOutput, 
width, wrapMuplitplier);
+Integer width, Integer wrapMultiplier) throws Exception {
+  long wrap = (long)width*wrapMultiplier;
+  if (wrap < numNodes && numNodes % wrap != 0) {
+/**
+ *  numNodes should be a multiple of width*wrapMultiplier.
+ *  If numNodes less than wrap, wrap will be set to be equal with 
numNodes,
+ *  See {@link GeneratorMapper#setup(Mapper.Context)}
+ * */
+System.err.println(USAGE);
+return 1;
+  }
+  int ret = runRandomInputGenerator(numMappers, numNodes, tmpOutput, 
width, wrapMultiplier);
   if (ret > 0) {
 return ret;
   }
-  return runGenerator(numMappers, numNodes, tmpOutput, width, 
wrapMuplitplier);
+  return runGenerator(numMappers, numNodes, tmpOutput, width, 
wrapMultiplier);
 }
   }
 



hbase git commit: HBASE-16562 ITBLL should fail to start if misconfigured, addendum

2016-09-07 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 046123ff1 -> f7b4ecb46


HBASE-16562 ITBLL should fail to start if misconfigured, addendum


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f7b4ecb4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f7b4ecb4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f7b4ecb4

Branch: refs/heads/branch-1.3
Commit: f7b4ecb4685937e9a12b07e2677ed4f59f4dfd7d
Parents: 046123f
Author: chenheng <chenh...@apache.org>
Authored: Wed Sep 7 15:29:55 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Wed Sep 7 15:49:02 2016 +0800

--
 .../test/IntegrationTestBigLinkedList.java  | 21 ++--
 1 file changed, 10 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f7b4ecb4/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
index 366fc02..87ac1f7 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
@@ -678,17 +678,6 @@ public class IntegrationTestBigLinkedList extends 
IntegrationTestBase {
 Integer width = (args.length < 4) ? null : Integer.parseInt(args[3]);
 Integer wrapMultiplier = (args.length < 5) ? null : 
Integer.parseInt(args[4]);
 Integer numWalkers = (args.length < 6) ? null : 
Integer.parseInt(args[5]);
-
-long wrap = (long)width*wrapMultiplier;
-if (wrap < numNodes && numNodes % wrap != 0) {
-  /**
-   *  numNodes should be a multiple of width*wrapMultiplier.
-   *  If numNodes less than wrap, wrap will be set to be equal with 
numNodes,
-   *  See {@link GeneratorMapper#setup(Mapper.Context)}
-   * */
-  System.err.println(USAGE);
-  return 1;
-}
 return run(numMappers, numNodes, tmpOutput, width, wrapMultiplier, 
numWalkers);
   } catch (NumberFormatException e) {
 System.err.println("Parsing generator arguments failed: " + 
e.getMessage());
@@ -813,6 +802,16 @@ public class IntegrationTestBigLinkedList extends 
IntegrationTestBase {
 public int run(int numMappers, long numNodes, Path tmpOutput,
 Integer width, Integer wrapMultiplier, Integer numWalkers)
 throws Exception {
+  long wrap = (long)width*wrapMultiplier;
+  if (wrap < numNodes && numNodes % wrap != 0) {
+/**
+ *  numNodes should be a multiple of width*wrapMultiplier.
+ *  If numNodes less than wrap, wrap will be set to be equal with 
numNodes,
+ *  See {@link GeneratorMapper#setup(Mapper.Context)}
+ * */
+System.err.println(USAGE);
+return 1;
+  }
   int ret = runRandomInputGenerator(numMappers, numNodes, tmpOutput, 
width, wrapMultiplier,
   numWalkers);
   if (ret > 0) {



hbase git commit: HBASE-16562 ITBLL should fail to start if misconfigured, addendum

2016-09-07 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1 38b946c27 -> 6f73ef2df


HBASE-16562 ITBLL should fail to start if misconfigured, addendum


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6f73ef2d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6f73ef2d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6f73ef2d

Branch: refs/heads/branch-1
Commit: 6f73ef2dff46692ede976621f3e4b9e5cfae01fa
Parents: 38b946c
Author: chenheng <chenh...@apache.org>
Authored: Wed Sep 7 15:29:55 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Wed Sep 7 15:45:09 2016 +0800

--
 .../test/IntegrationTestBigLinkedList.java  | 21 ++--
 1 file changed, 10 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6f73ef2d/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
index 366fc02..87ac1f7 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
@@ -678,17 +678,6 @@ public class IntegrationTestBigLinkedList extends 
IntegrationTestBase {
 Integer width = (args.length < 4) ? null : Integer.parseInt(args[3]);
 Integer wrapMultiplier = (args.length < 5) ? null : 
Integer.parseInt(args[4]);
 Integer numWalkers = (args.length < 6) ? null : 
Integer.parseInt(args[5]);
-
-long wrap = (long)width*wrapMultiplier;
-if (wrap < numNodes && numNodes % wrap != 0) {
-  /**
-   *  numNodes should be a multiple of width*wrapMultiplier.
-   *  If numNodes less than wrap, wrap will be set to be equal with 
numNodes,
-   *  See {@link GeneratorMapper#setup(Mapper.Context)}
-   * */
-  System.err.println(USAGE);
-  return 1;
-}
 return run(numMappers, numNodes, tmpOutput, width, wrapMultiplier, 
numWalkers);
   } catch (NumberFormatException e) {
 System.err.println("Parsing generator arguments failed: " + 
e.getMessage());
@@ -813,6 +802,16 @@ public class IntegrationTestBigLinkedList extends 
IntegrationTestBase {
 public int run(int numMappers, long numNodes, Path tmpOutput,
 Integer width, Integer wrapMultiplier, Integer numWalkers)
 throws Exception {
+  long wrap = (long)width*wrapMultiplier;
+  if (wrap < numNodes && numNodes % wrap != 0) {
+/**
+ *  numNodes should be a multiple of width*wrapMultiplier.
+ *  If numNodes less than wrap, wrap will be set to be equal with 
numNodes,
+ *  See {@link GeneratorMapper#setup(Mapper.Context)}
+ * */
+System.err.println(USAGE);
+return 1;
+  }
   int ret = runRandomInputGenerator(numMappers, numNodes, tmpOutput, 
width, wrapMultiplier,
   numWalkers);
   if (ret > 0) {



hbase git commit: HBASE-16562 ITBLL should fail to start if misconfigured, addendum

2016-09-07 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master 78af20944 -> 7363a7666


HBASE-16562 ITBLL should fail to start if misconfigured, addendum


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7363a766
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7363a766
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7363a766

Branch: refs/heads/master
Commit: 7363a76660583492c5c037113ac06cfef92e034e
Parents: 78af209
Author: chenheng <chenh...@apache.org>
Authored: Wed Sep 7 15:29:55 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Wed Sep 7 15:29:55 2016 +0800

--
 .../test/IntegrationTestBigLinkedList.java  | 21 ++--
 1 file changed, 10 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7363a766/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
index 41aa128..07c5cf2 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
@@ -676,17 +676,6 @@ public class IntegrationTestBigLinkedList extends 
IntegrationTestBase {
 Integer width = (args.length < 4) ? null : Integer.parseInt(args[3]);
 Integer wrapMultiplier = (args.length < 5) ? null : 
Integer.parseInt(args[4]);
 Integer numWalkers = (args.length < 6) ? null : 
Integer.parseInt(args[5]);
-
-long wrap = (long)width*wrapMultiplier;
-if (wrap < numNodes && numNodes % wrap != 0) {
-  /**
-   *  numNodes should be a multiple of width*wrapMultiplier.
-   *  If numNodes less than wrap, wrap will be set to be equal with 
numNodes,
-   *  See {@link GeneratorMapper#setup(Mapper.Context)}
-   * */
-  System.err.println(USAGE);
-  return 1;
-}
 return run(numMappers, numNodes, tmpOutput, width, wrapMultiplier, 
numWalkers);
   } catch (NumberFormatException e) {
 System.err.println("Parsing generator arguments failed: " + 
e.getMessage());
@@ -818,6 +807,16 @@ public class IntegrationTestBigLinkedList extends 
IntegrationTestBase {
 public int run(int numMappers, long numNodes, Path tmpOutput,
 Integer width, Integer wrapMultiplier, Integer numWalkers)
 throws Exception {
+  long wrap = (long)width*wrapMultiplier;
+  if (wrap < numNodes && numNodes % wrap != 0) {
+/**
+ *  numNodes should be a multiple of width*wrapMultiplier.
+ *  If numNodes less than wrap, wrap will be set to be equal with 
numNodes,
+ *  See {@link GeneratorMapper#setup(Mapper.Context)}
+ * */
+System.err.println(USAGE);
+return 1;
+  }
   int ret = runRandomInputGenerator(numMappers, numNodes, tmpOutput, 
width, wrapMultiplier,
   numWalkers);
   if (ret > 0) {



hbase git commit: HBASE-16562 ITBLL should fail to start if misconfigured

2016-09-07 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 2530258c1 -> 046123ff1


HBASE-16562 ITBLL should fail to start if misconfigured


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/046123ff
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/046123ff
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/046123ff

Branch: refs/heads/branch-1.3
Commit: 046123ff14586e6d6d1ec562cd6064ee9ab10edb
Parents: 2530258
Author: chenheng <chenh...@apache.org>
Authored: Tue Sep 6 11:02:18 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Wed Sep 7 13:58:36 2016 +0800

--
 .../hadoop/hbase/test/IntegrationTestBigLinkedList.java  | 11 +++
 1 file changed, 11 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/046123ff/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
index 2d828f4..366fc02 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
@@ -678,6 +678,17 @@ public class IntegrationTestBigLinkedList extends 
IntegrationTestBase {
 Integer width = (args.length < 4) ? null : Integer.parseInt(args[3]);
 Integer wrapMultiplier = (args.length < 5) ? null : 
Integer.parseInt(args[4]);
 Integer numWalkers = (args.length < 6) ? null : 
Integer.parseInt(args[5]);
+
+long wrap = (long)width*wrapMultiplier;
+if (wrap < numNodes && numNodes % wrap != 0) {
+  /**
+   *  numNodes should be a multiple of width*wrapMultiplier.
+   *  If numNodes less than wrap, wrap will be set to be equal with 
numNodes,
+   *  See {@link GeneratorMapper#setup(Mapper.Context)}
+   * */
+  System.err.println(USAGE);
+  return 1;
+}
 return run(numMappers, numNodes, tmpOutput, width, wrapMultiplier, 
numWalkers);
   } catch (NumberFormatException e) {
 System.err.println("Parsing generator arguments failed: " + 
e.getMessage());



hbase git commit: HBASE-16562 ITBLL should fail to start if misconfigured

2016-09-06 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1 b694b63ed -> 38b946c27


HBASE-16562 ITBLL should fail to start if misconfigured


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/38b946c2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/38b946c2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/38b946c2

Branch: refs/heads/branch-1
Commit: 38b946c2762d10f547de3e6c2b0676d3f7dfbd54
Parents: b694b63
Author: chenheng <chenh...@apache.org>
Authored: Tue Sep 6 11:02:18 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Wed Sep 7 13:50:36 2016 +0800

--
 .../hadoop/hbase/test/IntegrationTestBigLinkedList.java  | 11 +++
 1 file changed, 11 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/38b946c2/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
index 2d828f4..366fc02 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
@@ -678,6 +678,17 @@ public class IntegrationTestBigLinkedList extends 
IntegrationTestBase {
 Integer width = (args.length < 4) ? null : Integer.parseInt(args[3]);
 Integer wrapMultiplier = (args.length < 5) ? null : 
Integer.parseInt(args[4]);
 Integer numWalkers = (args.length < 6) ? null : 
Integer.parseInt(args[5]);
+
+long wrap = (long)width*wrapMultiplier;
+if (wrap < numNodes && numNodes % wrap != 0) {
+  /**
+   *  numNodes should be a multiple of width*wrapMultiplier.
+   *  If numNodes less than wrap, wrap will be set to be equal with 
numNodes,
+   *  See {@link GeneratorMapper#setup(Mapper.Context)}
+   * */
+  System.err.println(USAGE);
+  return 1;
+}
 return run(numMappers, numNodes, tmpOutput, width, wrapMultiplier, 
numWalkers);
   } catch (NumberFormatException e) {
 System.err.println("Parsing generator arguments failed: " + 
e.getMessage());



hbase git commit: HBASE-16562 ITBLL should fail to start if misconfigured

2016-09-06 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master b6ba13c37 -> 78af20944


HBASE-16562 ITBLL should fail to start if misconfigured


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/78af2094
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/78af2094
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/78af2094

Branch: refs/heads/master
Commit: 78af20944cbe2a65148da1457f1de87cff884858
Parents: b6ba13c
Author: chenheng <chenh...@apache.org>
Authored: Tue Sep 6 11:02:18 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Wed Sep 7 11:29:54 2016 +0800

--
 .../hadoop/hbase/test/IntegrationTestBigLinkedList.java  | 11 +++
 1 file changed, 11 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/78af2094/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
index 135bea7..41aa128 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
@@ -676,6 +676,17 @@ public class IntegrationTestBigLinkedList extends 
IntegrationTestBase {
 Integer width = (args.length < 4) ? null : Integer.parseInt(args[3]);
 Integer wrapMultiplier = (args.length < 5) ? null : 
Integer.parseInt(args[4]);
 Integer numWalkers = (args.length < 6) ? null : 
Integer.parseInt(args[5]);
+
+long wrap = (long)width*wrapMultiplier;
+if (wrap < numNodes && numNodes % wrap != 0) {
+  /**
+   *  numNodes should be a multiple of width*wrapMultiplier.
+   *  If numNodes less than wrap, wrap will be set to be equal with 
numNodes,
+   *  See {@link GeneratorMapper#setup(Mapper.Context)}
+   * */
+  System.err.println(USAGE);
+  return 1;
+}
 return run(numMappers, numNodes, tmpOutput, width, wrapMultiplier, 
numWalkers);
   } catch (NumberFormatException e) {
 System.err.println("Parsing generator arguments failed: " + 
e.getMessage());



hbase git commit: HBASE-15278 AsyncRPCClient hangs if Connection closes before RPC call response

2016-08-31 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master 847955427 -> 389255048


HBASE-15278 AsyncRPCClient hangs if Connection closes before RPC call response


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/38925504
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/38925504
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/38925504

Branch: refs/heads/master
Commit: 3892550484c110e53236a6b5c337ce5ef96ee194
Parents: 8479554
Author: chenheng <chenh...@apache.org>
Authored: Thu Sep 1 10:33:24 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Sep 1 10:33:24 2016 +0800

--
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  |  2 +-
 .../hadoop/hbase/ipc/AbstractTestIPC.java   | 55 
 2 files changed, 56 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/38925504/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index 759da82..c787c98 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -2677,7 +2677,7 @@ public class RpcServer implements RpcServerInterface, 
ConfigurationObserver {
 }
 
 Connection register(SocketChannel channel) {
-  Connection connection = new Connection(channel, 
System.currentTimeMillis());
+  Connection connection = getConnection(channel, 
System.currentTimeMillis());
   add(connection);
   if (LOG.isDebugEnabled()) {
 LOG.debug("Server connection from " + connection +

http://git-wip-us.apache.org/repos/asf/hbase/blob/38925504/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
index 771ef93..2e26e13 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
@@ -35,6 +35,8 @@ import com.google.protobuf.ServiceException;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
+import java.nio.ByteBuffer;
+import java.nio.channels.SocketChannel;
 import java.util.ArrayList;
 import java.util.List;
 
@@ -287,4 +289,57 @@ public abstract class AbstractTestIPC {
   rpcServer.stop();
 }
   }
+
+  static class TestFailingRpcServer extends TestRpcServer {
+
+TestFailingRpcServer() throws IOException {
+  this(new FifoRpcScheduler(CONF, 1), CONF);
+}
+
+TestFailingRpcServer(Configuration conf) throws IOException {
+  this(new FifoRpcScheduler(conf, 1), conf);
+}
+
+TestFailingRpcServer(RpcScheduler scheduler, Configuration conf) throws 
IOException {
+  super(scheduler, conf);
+}
+
+class FailingConnection extends Connection {
+  public FailingConnection(SocketChannel channel, long lastContact) {
+  super(channel, lastContact);
+  }
+
+  @Override
+  protected void processRequest(ByteBuffer buf) throws IOException, 
InterruptedException {
+// this will throw exception after the connection header is read, and 
an RPC is sent
+// from client
+throw new DoNotRetryIOException("Failing for test");
+  }
+}
+
+@Override
+protected Connection getConnection(SocketChannel channel, long time) {
+return new FailingConnection(channel, time);
+}
+  }
+
+  /** Tests that the connection closing is handled by the client with 
outstanding RPC calls */
+  @Test (timeout = 3)
+  public void testConnectionCloseWithOutstandingRPCs() throws 
InterruptedException, IOException {
+Configuration conf = new Configuration(CONF);
+RpcServer rpcServer = new TestFailingRpcServer(conf);
+try (AbstractRpcClient client = createRpcClient(conf)) {
+  rpcServer.start();
+  BlockingInterface stub = newBlockingStub(client, 
rpcServer.getListenerAddress());
+  EchoRequestProto param = 
EchoRequestProto.newBuilder().setMessage("hello").build();
+  stub.echo(
+  new 
PayloadCarryingRpcController(CellUtil.createCellScanner(ImmutableList. 
of(CELL))),
+  param);
+  fail("RPC should have failed because connection closed");
+} catch (ServiceException e) {
+  LOG.info("Caught expected exception: " + e.toString());
+} finally {
+  rpcServer.stop();
+}
+  }
 }



hbase git commit: HBASE-16224 Reduce the number of RPCs for the large PUTs (ChiaPing Tsai)

2016-08-29 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1 8013459da -> ce9b6c0ab


HBASE-16224 Reduce the number of RPCs for the large PUTs (ChiaPing Tsai)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ce9b6c0a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ce9b6c0a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ce9b6c0a

Branch: refs/heads/branch-1
Commit: ce9b6c0abfc2d8bb2a9d22a4ed4c5508d0567987
Parents: 8013459
Author: chenheng <chenh...@apache.org>
Authored: Tue Aug 30 13:30:23 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Tue Aug 30 13:30:23 2016 +0800

--
 .../hadoop/hbase/client/AsyncProcess.java   | 503 +++
 .../hbase/client/BufferedMutatorImpl.java   | 151 --
 .../apache/hadoop/hbase/client/RowAccess.java   |  44 ++
 .../hadoop/hbase/client/TestAsyncProcess.java   | 412 ++-
 .../coprocessor/MultiRowMutationEndpoint.java   |   2 +-
 5 files changed, 968 insertions(+), 144 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ce9b6c0a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index 780de18..aa3ffc1 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -19,17 +19,22 @@
 
 package org.apache.hadoop.hbase.client;
 
+import com.google.common.annotations.VisibleForTesting;
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Date;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Iterator;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.TreeSet;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ConcurrentSkipListMap;
@@ -38,30 +43,29 @@ import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.RetryImmediatelyException;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.AsyncProcess.RowChecker.ReturnCode;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.RetryImmediatelyException;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.backoff.ServerStatistics;
 import org.apache.hadoop.hbase.client.coprocessor.Batch;
 import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdge;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.htrace.Trace;
 
-import com.google.common.annotations.VisibleForTesting;
-
 /**
  * This class  allows a continuous flow of requests. It's written to be 
compatible with a
  * synchronous caller such as HTable.
@@ -126,6 +130,25 @@ class AsyncProcess {
   private final int THRESHOLD_TO_LOG_REGION_DETAILS = 2;
 
   /**
+   * The maximum size of single RegionServer.
+   */
+  public static final String HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE = 
"hbase.client.max.perrequest.heapsize";
+
+  /**
+   * Default value of {@link #HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE}.
+   */
+  public static final long DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE = 
4194304;
+
+  /**
+   * The maximum size of submit.
+   */
+  public static final String HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE = 
"hbase.client.max.submit.heapsize";
+  /**
+   * Default value of {@link #HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE}.
+   */
+  public static final long DEFAULT_HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE = 
DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE;
+
+ 

hbase git commit: HBASE-16224 Reduce the number of RPCs for the large PUTs (ChiaPing Tsai)

2016-08-29 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master de5a3a006 -> c000f29e4


HBASE-16224 Reduce the number of RPCs for the large PUTs (ChiaPing Tsai)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c000f29e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c000f29e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c000f29e

Branch: refs/heads/master
Commit: c000f29e47b9a6a7ffa71848f0df3d5c1a72312e
Parents: de5a3a0
Author: chenheng <chenh...@apache.org>
Authored: Tue Aug 30 06:35:33 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Tue Aug 30 06:35:33 2016 +0800

--
 .../hadoop/hbase/client/AsyncProcess.java   | 528 +++
 .../hbase/client/BufferedMutatorImpl.java   | 151 --
 .../apache/hadoop/hbase/client/RowAccess.java   |  44 ++
 .../hadoop/hbase/client/TestAsyncProcess.java   | 430 ++-
 .../coprocessor/MultiRowMutationEndpoint.java   |   2 +-
 5 files changed, 997 insertions(+), 158 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c000f29e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index d699233..045885f 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -19,17 +19,13 @@
 
 package org.apache.hadoop.hbase.client;
 
-import java.io.IOException;
+import com.google.common.annotations.VisibleForTesting;
 import java.io.InterruptedIOException;
+import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ConcurrentSkipListMap;
@@ -38,31 +34,39 @@ import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
-
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.RetryImmediatelyException;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.AsyncProcess.RowChecker.ReturnCode;
+import org.apache.hadoop.hbase.client.backoff.ServerStatistics;
+import org.apache.hadoop.hbase.client.coprocessor.Batch;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.RetryImmediatelyException;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.backoff.ServerStatistics;
-import org.apache.hadoop.hbase.client.coprocessor.Batch;
-import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdge;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.htrace.Trace;
 
-import com.google.common.annotations.VisibleForTesting;
-
 /**
  * This class  allows a continuous flow of requests. It's written to be 
compatible with a
  * synchronous caller such as HTable.
@@ -127,6 +131,25 @@ class AsyncProcess {
   private final int THRESHOLD_TO_LOG_REGION_DETAILS = 2;
 
   /**
+   * The maximum size of single RegionServer.
+   */
+  public static final String HBASE_CLIENT_MAX_PERR

hbase git commit: HBASE016490 Fix race condition between SnapshotManager and SnapshotCleaner

2016-08-26 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master 1b9af2a2f -> 7eaba369e


HBASE016490 Fix race condition between SnapshotManager and SnapshotCleaner


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7eaba369
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7eaba369
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7eaba369

Branch: refs/heads/master
Commit: 7eaba369e745f8095204a39a037c1d96ae7deac6
Parents: 1b9af2a
Author: chenheng <chenh...@apache.org>
Authored: Sat Aug 27 10:36:21 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Sat Aug 27 10:48:21 2016 +0800

--
 .../org/apache/hadoop/hbase/master/HMaster.java |  5 +++-
 .../master/cleaner/BaseFileCleanerDelegate.java |  7 +
 .../hbase/master/cleaner/CleanerChore.java  | 29 +---
 .../master/cleaner/FileCleanerDelegate.java |  8 ++
 .../hbase/master/cleaner/HFileCleaner.java  | 12 ++--
 .../snapshot/DisabledTableSnapshotHandler.java  |  4 +--
 .../snapshot/EnabledTableSnapshotHandler.java   |  2 +-
 .../master/snapshot/SnapshotFileCache.java  | 28 ---
 .../master/snapshot/SnapshotHFileCleaner.java   | 15 +-
 .../hbase/master/snapshot/SnapshotManager.java  | 22 ++-
 .../master/snapshot/TakeSnapshotHandler.java| 10 +--
 .../master/snapshot/TestSnapshotFileCache.java  | 11 
 .../snapshot/TestSnapshotHFileCleaner.java  | 19 ++---
 13 files changed, 127 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7eaba369/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 5b3984b..f4c2c1c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -35,6 +35,7 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Comparator;
+import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -973,8 +974,10 @@ public class HMaster extends HRegionServer implements 
MasterServices {
 
//start the hfile archive cleaner thread
 Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
+Map<String, Object> params = new HashMap<String, Object>();
+params.put(MASTER, this);
 this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, 
getMasterFileSystem()
-.getFileSystem(), archiveDir);
+.getFileSystem(), archiveDir, params);
 getChoreService().scheduleChore(hfileCleaner);
 serviceStarted = true;
 if (LOG.isTraceEnabled()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/7eaba369/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java
index c6955d0..891db22 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java
@@ -23,6 +23,8 @@ import org.apache.hadoop.hbase.BaseConfigurable;
 import com.google.common.base.Predicate;
 import com.google.common.collect.Iterables;
 
+import java.util.Map;
+
 /**
  * Base class for file cleaners which allows subclasses to implement a simple
  * isFileDeletable method (which used to be the FileCleanerDelegate contract).
@@ -39,6 +41,11 @@ implements FileCleanerDelegate {
   }});
   }
 
+  @Override
+  public void init(Map<String, Object> params) {
+// subclass could override it if needed.
+  }
+
   /**
* Should the master delete the file or keep it?
* @param fStat file status of the file to check

http://git-wip-us.apache.org/repos/asf/hbase/blob/7eaba369/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
index 5a93a6d..b094507 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
+++ 
b/hbase-server/src/main/java/org/a

hbase git commit: HBASE-16464 archive folder grows bigger and bigger due to corrupt snapshot under tmp dir

2016-08-23 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.1 8eca263c4 -> bb7a054ec


HBASE-16464 archive folder grows bigger and bigger due to corrupt snapshot 
under tmp dir


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bb7a054e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bb7a054e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bb7a054e

Branch: refs/heads/branch-1.1
Commit: bb7a054ec69cc198dc9605928c9cc7ea0aa81418
Parents: 8eca263
Author: chenheng <chenh...@apache.org>
Authored: Mon Aug 22 16:45:20 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Wed Aug 24 13:51:08 2016 +0800

--
 .../master/snapshot/SnapshotFileCache.java  | 17 -
 .../master/snapshot/TakeSnapshotHandler.java|  2 +-
 .../snapshot/SnapshotDescriptionUtils.java  | 14 ++
 .../hbase/snapshot/SnapshotManifestV2.java  | 18 --
 .../snapshot/TestSnapshotHFileCleaner.java  | 20 
 .../hbase/snapshot/SnapshotTestingUtils.java| 12 
 6 files changed, 75 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bb7a054e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
index dfd3cb5..5b367c5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.util.FSUtils;
 
@@ -300,7 +301,21 @@ public class SnapshotFileCache implements Stoppable {
 FileStatus[] running = FSUtils.listStatus(fs, snapshotTmpDir);
 if (running != null) {
   for (FileStatus run : running) {
-
snapshotInProgress.addAll(fileInspector.filesUnderSnapshot(run.getPath()));
+try {
+  
snapshotInProgress.addAll(fileInspector.filesUnderSnapshot(run.getPath()));
+} catch (CorruptedSnapshotException e) {
+  // See HBASE-16464
+  if (e.getCause() instanceof FileNotFoundException) {
+// If the snapshot is not in progress, we will delete it
+if (!fs.exists(new Path(run.getPath(),
+  SnapshotDescriptionUtils.SNAPSHOT_IN_PROGRESS))) {
+  fs.delete(run.getPath(), true);
+  LOG.warn("delete the " + run.getPath() + " due to exception:", 
e.getCause());
+}
+  } else {
+throw e;
+  }
+}
   }
 }
 return snapshotInProgress;

http://git-wip-us.apache.org/repos/asf/hbase/blob/bb7a054e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
index 5ac9cbc..d590798 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
@@ -162,7 +162,7 @@ public abstract class TakeSnapshotHandler extends 
EventHandler implements Snapsh
 try {
   // If regions move after this meta scan, the region specific snapshot 
should fail, triggering
   // an external exception that gets captured here.
-
+  SnapshotDescriptionUtils.createInProgressTag(workingDir, fs);
   // write down the snapshot info in the working directory
   SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, workingDir, fs);
   snapshotManifest.addTableDescriptor(this.htd);

http://git-wip-us.apache.org/repos/asf/hbase/blob/bb7a054e/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
index 2211aab..28a9d05 100644
--- 
a/hb

hbase git commit: HBASE-16464 archive folder grows bigger and bigger due to corrupt snapshot under tmp dir

2016-08-23 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 71e583df3 -> 09ade3208


HBASE-16464 archive folder grows bigger and bigger due to corrupt snapshot 
under tmp dir


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/09ade320
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/09ade320
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/09ade320

Branch: refs/heads/branch-1.2
Commit: 09ade32086cdb3ba76587da773de007c17bdfc5c
Parents: 71e583d
Author: chenheng <chenh...@apache.org>
Authored: Mon Aug 22 16:45:20 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Wed Aug 24 13:48:14 2016 +0800

--
 .../master/snapshot/SnapshotFileCache.java  | 17 -
 .../master/snapshot/TakeSnapshotHandler.java|  2 +-
 .../snapshot/SnapshotDescriptionUtils.java  | 14 ++
 .../hbase/snapshot/SnapshotManifestV2.java  | 18 --
 .../snapshot/TestSnapshotHFileCleaner.java  | 20 
 .../hbase/snapshot/SnapshotTestingUtils.java| 12 
 6 files changed, 75 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/09ade320/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
index dfd3cb5..5b367c5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.util.FSUtils;
 
@@ -300,7 +301,21 @@ public class SnapshotFileCache implements Stoppable {
 FileStatus[] running = FSUtils.listStatus(fs, snapshotTmpDir);
 if (running != null) {
   for (FileStatus run : running) {
-
snapshotInProgress.addAll(fileInspector.filesUnderSnapshot(run.getPath()));
+try {
+  
snapshotInProgress.addAll(fileInspector.filesUnderSnapshot(run.getPath()));
+} catch (CorruptedSnapshotException e) {
+  // See HBASE-16464
+  if (e.getCause() instanceof FileNotFoundException) {
+// If the snapshot is not in progress, we will delete it
+if (!fs.exists(new Path(run.getPath(),
+  SnapshotDescriptionUtils.SNAPSHOT_IN_PROGRESS))) {
+  fs.delete(run.getPath(), true);
+  LOG.warn("delete the " + run.getPath() + " due to exception:", 
e.getCause());
+}
+  } else {
+throw e;
+  }
+}
   }
 }
 return snapshotInProgress;

http://git-wip-us.apache.org/repos/asf/hbase/blob/09ade320/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
index 7c87ea1..39387cb 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
@@ -164,7 +164,7 @@ public abstract class TakeSnapshotHandler extends 
EventHandler implements Snapsh
 try {
   // If regions move after this meta scan, the region specific snapshot 
should fail, triggering
   // an external exception that gets captured here.
-
+  SnapshotDescriptionUtils.createInProgressTag(workingDir, fs);
   // write down the snapshot info in the working directory
   SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, workingDir, fs);
   snapshotManifest.addTableDescriptor(this.htd);

http://git-wip-us.apache.org/repos/asf/hbase/blob/09ade320/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
index b6b608c..aebb647 100644
--- 
a/hb

hbase git commit: HBASE-16464 archive folder grows bigger and bigger due to corrupt snapshot under tmp dir

2016-08-23 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 cf3230468 -> 58891babc


HBASE-16464 archive folder grows bigger and bigger due to corrupt snapshot 
under tmp dir


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/58891bab
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/58891bab
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/58891bab

Branch: refs/heads/branch-1.3
Commit: 58891babc853c75d067ff7e4d74a6801b2f41102
Parents: cf32304
Author: chenheng <chenh...@apache.org>
Authored: Mon Aug 22 16:45:20 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Wed Aug 24 13:45:52 2016 +0800

--
 .../master/snapshot/SnapshotFileCache.java  | 17 -
 .../master/snapshot/TakeSnapshotHandler.java|  2 +-
 .../snapshot/SnapshotDescriptionUtils.java  | 14 ++
 .../hbase/snapshot/SnapshotManifestV2.java  | 18 --
 .../snapshot/TestSnapshotHFileCleaner.java  | 20 
 .../hbase/snapshot/SnapshotTestingUtils.java| 12 
 6 files changed, 75 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/58891bab/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
index dfd3cb5..5b367c5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.util.FSUtils;
 
@@ -300,7 +301,21 @@ public class SnapshotFileCache implements Stoppable {
 FileStatus[] running = FSUtils.listStatus(fs, snapshotTmpDir);
 if (running != null) {
   for (FileStatus run : running) {
-
snapshotInProgress.addAll(fileInspector.filesUnderSnapshot(run.getPath()));
+try {
+  
snapshotInProgress.addAll(fileInspector.filesUnderSnapshot(run.getPath()));
+} catch (CorruptedSnapshotException e) {
+  // See HBASE-16464
+  if (e.getCause() instanceof FileNotFoundException) {
+// If the snapshot is not in progress, we will delete it
+if (!fs.exists(new Path(run.getPath(),
+  SnapshotDescriptionUtils.SNAPSHOT_IN_PROGRESS))) {
+  fs.delete(run.getPath(), true);
+  LOG.warn("delete the " + run.getPath() + " due to exception:", 
e.getCause());
+}
+  } else {
+throw e;
+  }
+}
   }
 }
 return snapshotInProgress;

http://git-wip-us.apache.org/repos/asf/hbase/blob/58891bab/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
index 7c87ea1..39387cb 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
@@ -164,7 +164,7 @@ public abstract class TakeSnapshotHandler extends 
EventHandler implements Snapsh
 try {
   // If regions move after this meta scan, the region specific snapshot 
should fail, triggering
   // an external exception that gets captured here.
-
+  SnapshotDescriptionUtils.createInProgressTag(workingDir, fs);
   // write down the snapshot info in the working directory
   SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, workingDir, fs);
   snapshotManifest.addTableDescriptor(this.htd);

http://git-wip-us.apache.org/repos/asf/hbase/blob/58891bab/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
index b6b608c..aebb647 100644
--- 
a/hb

hbase git commit: HBASE-16464 archive folder grows bigger and bigger due to corrupt snapshot under tmp dir

2016-08-23 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1 73818646b -> de2f653ca


HBASE-16464 archive folder grows bigger and bigger due to corrupt snapshot 
under tmp dir


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/de2f653c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/de2f653c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/de2f653c

Branch: refs/heads/branch-1
Commit: de2f653cafde0264ec0b8eeabafada8c5eef34ea
Parents: 7381864
Author: chenheng <chenh...@apache.org>
Authored: Mon Aug 22 16:45:20 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Wed Aug 24 13:43:18 2016 +0800

--
 .../master/snapshot/SnapshotFileCache.java  | 17 -
 .../master/snapshot/TakeSnapshotHandler.java|  2 +-
 .../snapshot/SnapshotDescriptionUtils.java  | 14 ++
 .../hbase/snapshot/SnapshotManifestV2.java  | 18 --
 .../snapshot/TestSnapshotHFileCleaner.java  | 20 
 .../hbase/snapshot/SnapshotTestingUtils.java| 12 
 6 files changed, 75 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/de2f653c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
index dfd3cb5..5b367c5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.util.FSUtils;
 
@@ -300,7 +301,21 @@ public class SnapshotFileCache implements Stoppable {
 FileStatus[] running = FSUtils.listStatus(fs, snapshotTmpDir);
 if (running != null) {
   for (FileStatus run : running) {
-
snapshotInProgress.addAll(fileInspector.filesUnderSnapshot(run.getPath()));
+try {
+  
snapshotInProgress.addAll(fileInspector.filesUnderSnapshot(run.getPath()));
+} catch (CorruptedSnapshotException e) {
+  // See HBASE-16464
+  if (e.getCause() instanceof FileNotFoundException) {
+// If the snapshot is not in progress, we will delete it
+if (!fs.exists(new Path(run.getPath(),
+  SnapshotDescriptionUtils.SNAPSHOT_IN_PROGRESS))) {
+  fs.delete(run.getPath(), true);
+  LOG.warn("delete the " + run.getPath() + " due to exception:", 
e.getCause());
+}
+  } else {
+throw e;
+  }
+}
   }
 }
 return snapshotInProgress;

http://git-wip-us.apache.org/repos/asf/hbase/blob/de2f653c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
index 7c87ea1..39387cb 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
@@ -164,7 +164,7 @@ public abstract class TakeSnapshotHandler extends 
EventHandler implements Snapsh
 try {
   // If regions move after this meta scan, the region specific snapshot 
should fail, triggering
   // an external exception that gets captured here.
-
+  SnapshotDescriptionUtils.createInProgressTag(workingDir, fs);
   // write down the snapshot info in the working directory
   SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, workingDir, fs);
   snapshotManifest.addTableDescriptor(this.htd);

http://git-wip-us.apache.org/repos/asf/hbase/blob/de2f653c/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
index b6b608c..aebb647 100644
--- 
a/hbase-ser

hbase git commit: HBASE-16464 archive folder grows bigger and bigger due to corrupt snapshot under tmp dir

2016-08-23 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master 97b164ac3 -> 3909b7c96


HBASE-16464 archive folder grows bigger and bigger due to corrupt snapshot 
under tmp dir


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3909b7c9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3909b7c9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3909b7c9

Branch: refs/heads/master
Commit: 3909b7c96fc36e9fddef90fa7ade9ed61f19826d
Parents: 97b164a
Author: chenheng <chenh...@apache.org>
Authored: Mon Aug 22 16:45:20 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Wed Aug 24 13:38:32 2016 +0800

--
 .../master/snapshot/SnapshotFileCache.java  | 17 -
 .../master/snapshot/TakeSnapshotHandler.java|  2 +-
 .../snapshot/SnapshotDescriptionUtils.java  | 14 ++
 .../hbase/snapshot/SnapshotManifestV2.java  | 18 --
 .../snapshot/TestSnapshotHFileCleaner.java  | 20 
 .../hbase/snapshot/SnapshotTestingUtils.java| 12 
 6 files changed, 75 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3909b7c9/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
index dfd3cb5..5b367c5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.util.FSUtils;
 
@@ -300,7 +301,21 @@ public class SnapshotFileCache implements Stoppable {
 FileStatus[] running = FSUtils.listStatus(fs, snapshotTmpDir);
 if (running != null) {
   for (FileStatus run : running) {
-
snapshotInProgress.addAll(fileInspector.filesUnderSnapshot(run.getPath()));
+try {
+  
snapshotInProgress.addAll(fileInspector.filesUnderSnapshot(run.getPath()));
+} catch (CorruptedSnapshotException e) {
+  // See HBASE-16464
+  if (e.getCause() instanceof FileNotFoundException) {
+// If the snapshot is not in progress, we will delete it
+if (!fs.exists(new Path(run.getPath(),
+  SnapshotDescriptionUtils.SNAPSHOT_IN_PROGRESS))) {
+  fs.delete(run.getPath(), true);
+  LOG.warn("delete the " + run.getPath() + " due to exception:", 
e.getCause());
+}
+  } else {
+throw e;
+  }
+}
   }
 }
 return snapshotInProgress;

http://git-wip-us.apache.org/repos/asf/hbase/blob/3909b7c9/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
index 9172e06..8967a70 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
@@ -164,7 +164,7 @@ public abstract class TakeSnapshotHandler extends 
EventHandler implements Snapsh
 try {
   // If regions move after this meta scan, the region specific snapshot 
should fail, triggering
   // an external exception that gets captured here.
-
+  SnapshotDescriptionUtils.createInProgressTag(workingDir, fs);
   // write down the snapshot info in the working directory
   SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, workingDir, fs);
   snapshotManifest.addTableDescriptor(this.htd);

http://git-wip-us.apache.org/repos/asf/hbase/blob/3909b7c9/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
index 560a42b..2fd619d 100644
--- 
a/hbase-ser

hbase git commit: HBASE-15635 Mean age of Blocks in cache (seconds) on webUI should be greater than zero

2016-08-17 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.0 89b432d45 -> a049e518f


HBASE-15635 Mean age of Blocks in cache (seconds) on webUI should be greater 
than zero

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a049e518
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a049e518
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a049e518

Branch: refs/heads/branch-1.0
Commit: a049e518f3c1967de1668a4c5e618705ab11fb02
Parents: 89b432d
Author: chenheng <chenh...@apache.org>
Authored: Thu Aug 18 10:28:06 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Aug 18 11:00:35 2016 +0800

--
 .../hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon  | 11 ++-
 .../org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java |  4 +++-
 .../org/apache/hadoop/hbase/io/hfile/CacheStats.java |  4 +++-
 3 files changed, 8 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a049e518/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
index 9883848..f5485c2 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
@@ -176,9 +176,6 @@ org.apache.hadoop.util.StringUtils;
 
 <%java>
   AgeSnapshot ageAtEvictionSnapshot = bc.getStats().getAgeAtEvictionSnapshot();
-  // Only show if non-zero mean and stddev as is the case in combinedblockcache
-  double mean = ageAtEvictionSnapshot.getMean();
-  double stddev = ageAtEvictionSnapshot.getStdDev();
 
 
 Evicted
@@ -190,20 +187,16 @@ org.apache.hadoop.util.StringUtils;
 <% String.format("%,d", bc.getStats().getEvictionCount()) %>
 The total number of times an eviction has occurred
 
-<%if mean > 0 %>
 
 Mean
-<% String.format("%,d", 
(long)(ageAtEvictionSnapshot.getMean()/(100 * 1000))) %>
+<% String.format("%,d", (long)(ageAtEvictionSnapshot.getMean())) 
%>
 Mean age of Blocks at eviction time (seconds)
 
-
-<%if stddev > 0 %>
 
 StdDev
-<% String.format("%,d", 
(long)(ageAtEvictionSnapshot.getStdDev()/100)) %>
+<% String.format("%,d", (long)(ageAtEvictionSnapshot.getStdDev())) 
%>
 Standard Deviation for age of Blocks at eviction time
 
-
 
 
 <%def bc_stats>

http://git-wip-us.apache.org/repos/asf/hbase/blob/a049e518/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
index 94638da..2d3f524 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
@@ -41,6 +41,8 @@ import com.yammer.metrics.stats.Snapshot;
  */
 @InterfaceAudience.Private
 public class BlockCacheUtil {
+
+  public static final long NANOS_PER_SECOND = 10;
   /**
* Needed making histograms.
*/
@@ -225,7 +227,7 @@ public class BlockCacheUtil {
 this.dataBlockCount++;
 this.dataSize += cb.getSize();
   }
-  long age = this.now - cb.getCachedTime();
+  long age = (this.now - cb.getCachedTime())/NANOS_PER_SECOND;
   this.age.update(age);
   return false;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a049e518/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java
index 00accfc..ba5fbf4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java
@@ -131,7 +131,9 @@ public class CacheStats {
   }
 
   public void evicted(final long t) {
-if (t > this.startTime) this.ageAtEviction.update(t - this.s

hbase git commit: HBASE-15635 Mean age of Blocks in cache (seconds) on webUI should be greater than zero

2016-08-17 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.1 d82cf96f2 -> 941ad56e7


HBASE-15635 Mean age of Blocks in cache (seconds) on webUI should be greater 
than zero

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/941ad56e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/941ad56e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/941ad56e

Branch: refs/heads/branch-1.1
Commit: 941ad56e7fac6defe4978a35e46f28098f3e1439
Parents: d82cf96
Author: chenheng <chenh...@apache.org>
Authored: Thu Aug 18 10:28:06 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Aug 18 10:57:42 2016 +0800

--
 .../hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon  | 11 ++-
 .../org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java |  4 +++-
 .../org/apache/hadoop/hbase/io/hfile/CacheStats.java |  4 +++-
 3 files changed, 8 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/941ad56e/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
index 6986f12..a25c224 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
@@ -176,9 +176,6 @@ org.apache.hadoop.util.StringUtils;
 
 <%java>
   AgeSnapshot ageAtEvictionSnapshot = bc.getStats().getAgeAtEvictionSnapshot();
-  // Only show if non-zero mean and stddev as is the case in combinedblockcache
-  double mean = ageAtEvictionSnapshot.getMean();
-  double stddev = ageAtEvictionSnapshot.getStdDev();
 
 
 Evicted
@@ -190,20 +187,16 @@ org.apache.hadoop.util.StringUtils;
 <% String.format("%,d", bc.getStats().getEvictionCount()) %>
 The total number of times an eviction has occurred
 
-<%if mean > 0 %>
 
 Mean
-<% String.format("%,d", 
(long)(ageAtEvictionSnapshot.getMean()/(100 * 1000))) %>
+<% String.format("%,d", (long)(ageAtEvictionSnapshot.getMean())) 
%>
 Mean age of Blocks at eviction time (seconds)
 
-
-<%if stddev > 0 %>
 
 StdDev
-<% String.format("%,d", 
(long)(ageAtEvictionSnapshot.getStdDev()/100)) %>
+<% String.format("%,d", (long)(ageAtEvictionSnapshot.getStdDev())) 
%>
 Standard Deviation for age of Blocks at eviction time
 
-
 
 
 <%def hits_tmpl>

http://git-wip-us.apache.org/repos/asf/hbase/blob/941ad56e/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
index 94638da..2d3f524 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
@@ -41,6 +41,8 @@ import com.yammer.metrics.stats.Snapshot;
  */
 @InterfaceAudience.Private
 public class BlockCacheUtil {
+
+  public static final long NANOS_PER_SECOND = 10;
   /**
* Needed making histograms.
*/
@@ -225,7 +227,7 @@ public class BlockCacheUtil {
 this.dataBlockCount++;
 this.dataSize += cb.getSize();
   }
-  long age = this.now - cb.getCachedTime();
+  long age = (this.now - cb.getCachedTime())/NANOS_PER_SECOND;
   this.age.update(age);
   return false;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/941ad56e/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java
index 00accfc..ba5fbf4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java
@@ -131,7 +131,9 @@ public class CacheStats {
   }
 
   public void evicted(final long t) {
-if (t > this.startTime) this.ageAtEviction.update(t - this.s

hbase git commit: HBASE-15635 Mean age of Blocks in cache (seconds) on webUI should be greater than zero

2016-08-17 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 ca1b49dd0 -> a84ff653a


HBASE-15635 Mean age of Blocks in cache (seconds) on webUI should be greater 
than zero


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a84ff653
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a84ff653
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a84ff653

Branch: refs/heads/branch-1.2
Commit: a84ff653ad2af8102d2acbf6102634b477cb7c8b
Parents: ca1b49d
Author: chenheng <chenh...@apache.org>
Authored: Thu Aug 18 10:28:06 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Aug 18 10:36:16 2016 +0800

--
 .../hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon  | 11 ++-
 .../org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java |  4 +++-
 .../org/apache/hadoop/hbase/io/hfile/CacheStats.java |  4 +++-
 3 files changed, 8 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a84ff653/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
index 00b36c3..2114539 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
@@ -176,9 +176,6 @@ org.apache.hadoop.util.StringUtils;
 
 <%java>
   AgeSnapshot ageAtEvictionSnapshot = bc.getStats().getAgeAtEvictionSnapshot();
-  // Only show if non-zero mean and stddev as is the case in combinedblockcache
-  double mean = ageAtEvictionSnapshot.getMean();
-  double stddev = ageAtEvictionSnapshot.getStdDev();
 
 
 Evicted
@@ -190,20 +187,16 @@ org.apache.hadoop.util.StringUtils;
 <% String.format("%,d", bc.getStats().getEvictionCount()) %>
 The total number of times an eviction has occurred
 
-<%if mean > 0 %>
 
 Mean
-<% String.format("%,d", 
(long)(ageAtEvictionSnapshot.getMean()/(100 * 1000))) %>
+<% String.format("%,d", (long)(ageAtEvictionSnapshot.getMean())) 
%>
 Mean age of Blocks at eviction time (seconds)
 
-
-<%if stddev > 0 %>
 
 StdDev
-<% String.format("%,d", 
(long)(ageAtEvictionSnapshot.getStdDev()/100)) %>
+<% String.format("%,d", (long)(ageAtEvictionSnapshot.getStdDev())) 
%>
 Standard Deviation for age of Blocks at eviction time
 
-
 
 
 <%def hits_tmpl>

http://git-wip-us.apache.org/repos/asf/hbase/blob/a84ff653/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
index 05eb019..247b6be 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
@@ -41,6 +41,8 @@ import com.yammer.metrics.stats.Snapshot;
  */
 @InterfaceAudience.Private
 public class BlockCacheUtil {
+
+  public static final long NANOS_PER_SECOND = 10;
   /**
* Needed making histograms.
*/
@@ -231,7 +233,7 @@ public class BlockCacheUtil {
 this.dataBlockCount++;
 this.dataSize += cb.getSize();
   }
-  long age = this.now - cb.getCachedTime();
+  long age = (this.now - cb.getCachedTime())/NANOS_PER_SECOND;
   this.age.update(age);
   return false;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a84ff653/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java
index fff6585..78f022f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java
@@ -150,7 +150,9 @@ public class CacheStats {
   }
 
   public void evicted(final long t, boolean primary) {
-if (t > this.startTime) this.ageAtEviction.update(t - this.startTime);
+if (t > this.startTime) {
+  this.ageAtEviction.up

hbase git commit: HBASE-16287 LruBlockCache size should not exceed acceptableSize too many(Yu Sun)

2016-08-04 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 dfc9616ae -> 3bce24dee


HBASE-16287 LruBlockCache size should not exceed acceptableSize too many(Yu Sun)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3bce24de
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3bce24de
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3bce24de

Branch: refs/heads/branch-1.2
Commit: 3bce24dee6edc3af9d3190a506e9d6df4c115108
Parents: dfc9616
Author: chenheng <chenh...@apache.org>
Authored: Thu Aug 4 21:13:42 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Aug 4 21:37:48 2016 +0800

--
 .../hadoop/hbase/io/hfile/LruBlockCache.java| 36 ++--
 .../hadoop/hbase/io/hfile/TestCacheOnWrite.java |  2 ++
 .../hbase/io/hfile/TestLruBlockCache.java   |  5 +++
 3 files changed, 40 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3bce24de/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
index 2781833..52ecf00 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
@@ -112,6 +112,10 @@ public class LruBlockCache implements ResizableBlockCache, 
HeapSize {
*/
   static final String LRU_ACCEPTABLE_FACTOR_CONFIG_NAME = 
"hbase.lru.blockcache.acceptable.factor";
 
+  /**
+   * Hard capacity limit of cache, will reject any put if size > this * 
acceptable
+   */
+  static final String LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME = 
"hbase.lru.blockcache.hard.capacity.limit.factor";
   static final String LRU_SINGLE_PERCENTAGE_CONFIG_NAME = 
"hbase.lru.blockcache.single.percentage";
   static final String LRU_MULTI_PERCENTAGE_CONFIG_NAME = 
"hbase.lru.blockcache.multi.percentage";
   static final String LRU_MEMORY_PERCENTAGE_CONFIG_NAME = 
"hbase.lru.blockcache.memory.percentage";
@@ -138,6 +142,9 @@ public class LruBlockCache implements ResizableBlockCache, 
HeapSize {
   static final float DEFAULT_MULTI_FACTOR = 0.50f;
   static final float DEFAULT_MEMORY_FACTOR = 0.25f;
 
+  /** default hard capacity limit */
+  static final float DEFAULT_HARD_CAPACITY_LIMIT_FACTOR = 1.2f;
+
   static final boolean DEFAULT_IN_MEMORY_FORCE_MODE = false;
 
   /** Statistics thread */
@@ -171,6 +178,9 @@ public class LruBlockCache implements ResizableBlockCache, 
HeapSize {
   /** Cache access count (sequential ID) */
   private final AtomicLong count;
 
+  /** hard capacity limit */
+  private float hardCapacityLimitFactor;
+
   /** Cache statistics */
   private final CacheStats stats;
 
@@ -228,6 +238,7 @@ public class LruBlockCache implements ResizableBlockCache, 
HeapSize {
 DEFAULT_SINGLE_FACTOR,
 DEFAULT_MULTI_FACTOR,
 DEFAULT_MEMORY_FACTOR,
+DEFAULT_HARD_CAPACITY_LIMIT_FACTOR,
 false,
 DEFAULT_MAX_BLOCK_SIZE
 );
@@ -243,6 +254,7 @@ public class LruBlockCache implements ResizableBlockCache, 
HeapSize {
 conf.getFloat(LRU_SINGLE_PERCENTAGE_CONFIG_NAME, 
DEFAULT_SINGLE_FACTOR),
 conf.getFloat(LRU_MULTI_PERCENTAGE_CONFIG_NAME, DEFAULT_MULTI_FACTOR),
 conf.getFloat(LRU_MEMORY_PERCENTAGE_CONFIG_NAME, 
DEFAULT_MEMORY_FACTOR),
+conf.getFloat(LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, 
DEFAULT_HARD_CAPACITY_LIMIT_FACTOR),
 conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, 
DEFAULT_IN_MEMORY_FORCE_MODE),
 conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE)
 );
@@ -269,7 +281,8 @@ public class LruBlockCache implements ResizableBlockCache, 
HeapSize {
   public LruBlockCache(long maxSize, long blockSize, boolean evictionThread,
   int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel,
   float minFactor, float acceptableFactor, float singleFactor,
-  float multiFactor, float memoryFactor, boolean forceInMemory, long 
maxBlockSize) {
+  float multiFactor, float memoryFactor, float hardLimitFactor,
+  boolean forceInMemory, long maxBlockSize) {
 this.maxBlockSize = maxBlockSize;
 if(singleFactor + multiFactor + memoryFactor != 1 ||
 singleFactor < 0 || multiFactor < 0 || memoryFactor < 0) {
@@ -297,6 +310,7 @@ public class LruBlockCache implements ResizableBlockCache, 
HeapSize {
 this.elements = new AtomicLong(0);
 this.overhead = calculateOverhead(maxSize, blockSize, mapConcurrencyLevel);
 this.size = new A

hbase git commit: HBASE-16287 LruBlockCache size should not exceed acceptableSize too many(Yu Sun)

2016-08-04 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 af6ebcd1a -> f49d2fc7e


HBASE-16287 LruBlockCache size should not exceed acceptableSize too many(Yu Sun)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f49d2fc7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f49d2fc7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f49d2fc7

Branch: refs/heads/branch-1.3
Commit: f49d2fc7e1b3e3cbfee78a1a565ba8a03296d410
Parents: af6ebcd
Author: chenheng <chenh...@apache.org>
Authored: Thu Aug 4 21:13:42 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Aug 4 21:31:55 2016 +0800

--
 .../hadoop/hbase/io/hfile/LruBlockCache.java| 36 ++--
 .../hadoop/hbase/io/hfile/TestCacheOnWrite.java |  2 ++
 .../hbase/io/hfile/TestLruBlockCache.java   |  5 +++
 3 files changed, 40 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f49d2fc7/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
index 268322a..09d82d0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
@@ -112,6 +112,10 @@ public class LruBlockCache implements ResizableBlockCache, 
HeapSize {
*/
   static final String LRU_ACCEPTABLE_FACTOR_CONFIG_NAME = 
"hbase.lru.blockcache.acceptable.factor";
 
+  /**
+   * Hard capacity limit of cache, will reject any put if size > this * 
acceptable
+   */
+  static final String LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME = 
"hbase.lru.blockcache.hard.capacity.limit.factor";
   static final String LRU_SINGLE_PERCENTAGE_CONFIG_NAME = 
"hbase.lru.blockcache.single.percentage";
   static final String LRU_MULTI_PERCENTAGE_CONFIG_NAME = 
"hbase.lru.blockcache.multi.percentage";
   static final String LRU_MEMORY_PERCENTAGE_CONFIG_NAME = 
"hbase.lru.blockcache.memory.percentage";
@@ -138,6 +142,9 @@ public class LruBlockCache implements ResizableBlockCache, 
HeapSize {
   static final float DEFAULT_MULTI_FACTOR = 0.50f;
   static final float DEFAULT_MEMORY_FACTOR = 0.25f;
 
+  /** default hard capacity limit */
+  static final float DEFAULT_HARD_CAPACITY_LIMIT_FACTOR = 1.2f;
+
   static final boolean DEFAULT_IN_MEMORY_FORCE_MODE = false;
 
   /** Statistics thread */
@@ -171,6 +178,9 @@ public class LruBlockCache implements ResizableBlockCache, 
HeapSize {
   /** Cache access count (sequential ID) */
   private final AtomicLong count;
 
+  /** hard capacity limit */
+  private float hardCapacityLimitFactor;
+
   /** Cache statistics */
   private final CacheStats stats;
 
@@ -228,6 +238,7 @@ public class LruBlockCache implements ResizableBlockCache, 
HeapSize {
 DEFAULT_SINGLE_FACTOR,
 DEFAULT_MULTI_FACTOR,
 DEFAULT_MEMORY_FACTOR,
+DEFAULT_HARD_CAPACITY_LIMIT_FACTOR,
 false,
 DEFAULT_MAX_BLOCK_SIZE
 );
@@ -243,6 +254,7 @@ public class LruBlockCache implements ResizableBlockCache, 
HeapSize {
 conf.getFloat(LRU_SINGLE_PERCENTAGE_CONFIG_NAME, 
DEFAULT_SINGLE_FACTOR),
 conf.getFloat(LRU_MULTI_PERCENTAGE_CONFIG_NAME, DEFAULT_MULTI_FACTOR),
 conf.getFloat(LRU_MEMORY_PERCENTAGE_CONFIG_NAME, 
DEFAULT_MEMORY_FACTOR),
+conf.getFloat(LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, 
DEFAULT_HARD_CAPACITY_LIMIT_FACTOR),
 conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, 
DEFAULT_IN_MEMORY_FORCE_MODE),
 conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE)
 );
@@ -269,7 +281,8 @@ public class LruBlockCache implements ResizableBlockCache, 
HeapSize {
   public LruBlockCache(long maxSize, long blockSize, boolean evictionThread,
   int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel,
   float minFactor, float acceptableFactor, float singleFactor,
-  float multiFactor, float memoryFactor, boolean forceInMemory, long 
maxBlockSize) {
+  float multiFactor, float memoryFactor, float hardLimitFactor,
+  boolean forceInMemory, long maxBlockSize) {
 this.maxBlockSize = maxBlockSize;
 if(singleFactor + multiFactor + memoryFactor != 1 ||
 singleFactor < 0 || multiFactor < 0 || memoryFactor < 0) {
@@ -297,6 +310,7 @@ public class LruBlockCache implements ResizableBlockCache, 
HeapSize {
 this.elements = new AtomicLong(0);
 this.overhead = calculateOverhead(maxSize, blockSize, mapConcurrencyLevel);
 this.size = new A

hbase git commit: HBASE-16287 LruBlockCache size should not exceed acceptableSize too many(Yu Sun)

2016-08-04 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1 737e06324 -> 9c8cbd44e


HBASE-16287 LruBlockCache size should not exceed acceptableSize too many(Yu Sun)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9c8cbd44
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9c8cbd44
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9c8cbd44

Branch: refs/heads/branch-1
Commit: 9c8cbd44edbf304e71ab87cd93ca90f136df5dd3
Parents: 737e063
Author: chenheng <chenh...@apache.org>
Authored: Thu Aug 4 21:13:42 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Aug 4 21:23:27 2016 +0800

--
 .../hadoop/hbase/io/hfile/LruBlockCache.java| 36 ++--
 .../hadoop/hbase/io/hfile/TestCacheOnWrite.java |  2 ++
 .../hbase/io/hfile/TestLruBlockCache.java   |  5 +++
 3 files changed, 40 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9c8cbd44/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
index cc91cc3..f427e04 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
@@ -112,6 +112,10 @@ public class LruBlockCache implements ResizableBlockCache, 
HeapSize {
*/
   static final String LRU_ACCEPTABLE_FACTOR_CONFIG_NAME = 
"hbase.lru.blockcache.acceptable.factor";
 
+  /**
+   * Hard capacity limit of cache, will reject any put if size > this * 
acceptable
+   */
+  static final String LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME = 
"hbase.lru.blockcache.hard.capacity.limit.factor";
   static final String LRU_SINGLE_PERCENTAGE_CONFIG_NAME = 
"hbase.lru.blockcache.single.percentage";
   static final String LRU_MULTI_PERCENTAGE_CONFIG_NAME = 
"hbase.lru.blockcache.multi.percentage";
   static final String LRU_MEMORY_PERCENTAGE_CONFIG_NAME = 
"hbase.lru.blockcache.memory.percentage";
@@ -138,6 +142,9 @@ public class LruBlockCache implements ResizableBlockCache, 
HeapSize {
   static final float DEFAULT_MULTI_FACTOR = 0.50f;
   static final float DEFAULT_MEMORY_FACTOR = 0.25f;
 
+  /** default hard capacity limit */
+  static final float DEFAULT_HARD_CAPACITY_LIMIT_FACTOR = 1.2f;
+
   static final boolean DEFAULT_IN_MEMORY_FORCE_MODE = false;
 
   /** Statistics thread */
@@ -171,6 +178,9 @@ public class LruBlockCache implements ResizableBlockCache, 
HeapSize {
   /** Cache access count (sequential ID) */
   private final AtomicLong count;
 
+  /** hard capacity limit */
+  private float hardCapacityLimitFactor;
+
   /** Cache statistics */
   private final CacheStats stats;
 
@@ -228,6 +238,7 @@ public class LruBlockCache implements ResizableBlockCache, 
HeapSize {
 DEFAULT_SINGLE_FACTOR,
 DEFAULT_MULTI_FACTOR,
 DEFAULT_MEMORY_FACTOR,
+DEFAULT_HARD_CAPACITY_LIMIT_FACTOR,
 false,
 DEFAULT_MAX_BLOCK_SIZE
 );
@@ -243,6 +254,7 @@ public class LruBlockCache implements ResizableBlockCache, 
HeapSize {
 conf.getFloat(LRU_SINGLE_PERCENTAGE_CONFIG_NAME, 
DEFAULT_SINGLE_FACTOR),
 conf.getFloat(LRU_MULTI_PERCENTAGE_CONFIG_NAME, DEFAULT_MULTI_FACTOR),
 conf.getFloat(LRU_MEMORY_PERCENTAGE_CONFIG_NAME, 
DEFAULT_MEMORY_FACTOR),
+conf.getFloat(LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, 
DEFAULT_HARD_CAPACITY_LIMIT_FACTOR),
 conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, 
DEFAULT_IN_MEMORY_FORCE_MODE),
 conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE)
 );
@@ -269,7 +281,8 @@ public class LruBlockCache implements ResizableBlockCache, 
HeapSize {
   public LruBlockCache(long maxSize, long blockSize, boolean evictionThread,
   int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel,
   float minFactor, float acceptableFactor, float singleFactor,
-  float multiFactor, float memoryFactor, boolean forceInMemory, long 
maxBlockSize) {
+  float multiFactor, float memoryFactor, float hardLimitFactor,
+  boolean forceInMemory, long maxBlockSize) {
 this.maxBlockSize = maxBlockSize;
 if(singleFactor + multiFactor + memoryFactor != 1 ||
 singleFactor < 0 || multiFactor < 0 || memoryFactor < 0) {
@@ -297,6 +310,7 @@ public class LruBlockCache implements ResizableBlockCache, 
HeapSize {
 this.elements = new AtomicLong(0);
 this.overhead = calculateOverhead(maxSize, blockSize, mapConcurrencyLevel);
 this.size = new A

hbase git commit: HBASE-16287 LruBlockCache size should not exceed acceptableSize too many(Yu Sun)

2016-08-04 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master 544dc1e55 -> 550b937bc


HBASE-16287 LruBlockCache size should not exceed acceptableSize too many(Yu Sun)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/550b937b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/550b937b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/550b937b

Branch: refs/heads/master
Commit: 550b937bcf0b5a7e030194cca7d90524a0fc0f3d
Parents: 544dc1e
Author: chenheng <chenh...@apache.org>
Authored: Thu Aug 4 21:13:42 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Aug 4 21:13:42 2016 +0800

--
 .../hadoop/hbase/io/hfile/LruBlockCache.java| 36 ++--
 .../hadoop/hbase/io/hfile/TestCacheOnWrite.java |  2 ++
 .../hbase/io/hfile/TestLruBlockCache.java   |  5 +++
 3 files changed, 40 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/550b937b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
index 29c5922..2fd9fdf 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
@@ -110,6 +110,10 @@ public class LruBlockCache implements ResizableBlockCache, 
HeapSize {
*/
   static final String LRU_ACCEPTABLE_FACTOR_CONFIG_NAME = 
"hbase.lru.blockcache.acceptable.factor";
 
+  /**
+   * Hard capacity limit of cache, will reject any put if size > this * 
acceptable
+   */
+  static final String LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME = 
"hbase.lru.blockcache.hard.capacity.limit.factor";
   static final String LRU_SINGLE_PERCENTAGE_CONFIG_NAME = 
"hbase.lru.blockcache.single.percentage";
   static final String LRU_MULTI_PERCENTAGE_CONFIG_NAME = 
"hbase.lru.blockcache.multi.percentage";
   static final String LRU_MEMORY_PERCENTAGE_CONFIG_NAME = 
"hbase.lru.blockcache.memory.percentage";
@@ -136,6 +140,9 @@ public class LruBlockCache implements ResizableBlockCache, 
HeapSize {
   static final float DEFAULT_MULTI_FACTOR = 0.50f;
   static final float DEFAULT_MEMORY_FACTOR = 0.25f;
 
+  /** default hard capacity limit */
+  static final float DEFAULT_HARD_CAPACITY_LIMIT_FACTOR = 1.2f;
+
   static final boolean DEFAULT_IN_MEMORY_FORCE_MODE = false;
 
   /** Statistics thread */
@@ -169,6 +176,9 @@ public class LruBlockCache implements ResizableBlockCache, 
HeapSize {
   /** Cache access count (sequential ID) */
   private final AtomicLong count;
 
+  /** hard capacity limit */
+  private float hardCapacityLimitFactor;
+
   /** Cache statistics */
   private final CacheStats stats;
 
@@ -226,6 +236,7 @@ public class LruBlockCache implements ResizableBlockCache, 
HeapSize {
 DEFAULT_SINGLE_FACTOR,
 DEFAULT_MULTI_FACTOR,
 DEFAULT_MEMORY_FACTOR,
+DEFAULT_HARD_CAPACITY_LIMIT_FACTOR,
 false,
 DEFAULT_MAX_BLOCK_SIZE
 );
@@ -241,6 +252,7 @@ public class LruBlockCache implements ResizableBlockCache, 
HeapSize {
 conf.getFloat(LRU_SINGLE_PERCENTAGE_CONFIG_NAME, 
DEFAULT_SINGLE_FACTOR),
 conf.getFloat(LRU_MULTI_PERCENTAGE_CONFIG_NAME, DEFAULT_MULTI_FACTOR),
 conf.getFloat(LRU_MEMORY_PERCENTAGE_CONFIG_NAME, 
DEFAULT_MEMORY_FACTOR),
+conf.getFloat(LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, 
DEFAULT_HARD_CAPACITY_LIMIT_FACTOR),
 conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, 
DEFAULT_IN_MEMORY_FORCE_MODE),
 conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE)
 );
@@ -267,7 +279,8 @@ public class LruBlockCache implements ResizableBlockCache, 
HeapSize {
   public LruBlockCache(long maxSize, long blockSize, boolean evictionThread,
   int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel,
   float minFactor, float acceptableFactor, float singleFactor,
-  float multiFactor, float memoryFactor, boolean forceInMemory, long 
maxBlockSize) {
+  float multiFactor, float memoryFactor, float hardLimitFactor,
+  boolean forceInMemory, long maxBlockSize) {
 this.maxBlockSize = maxBlockSize;
 if(singleFactor + multiFactor + memoryFactor != 1 ||
 singleFactor < 0 || multiFactor < 0 || memoryFactor < 0) {
@@ -295,6 +308,7 @@ public class LruBlockCache implements ResizableBlockCache, 
HeapSize {
 this.elements = new AtomicLong(0);
 this.overhead = calculateOverhead(maxSize, blockSize, mapConcurrencyLevel);
 this.size = new A

hbase git commit: HBASE-16234 Expect and handle nulls when assigning replicas(Yi Liang)

2016-07-31 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master 69d170063 -> 7e6f56275


HBASE-16234 Expect and handle nulls when assigning replicas(Yi Liang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7e6f5627
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7e6f5627
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7e6f5627

Branch: refs/heads/master
Commit: 7e6f562754715a8d23755e10a35117633b3a7e9a
Parents: 69d1700
Author: chenheng <chenh...@apache.org>
Authored: Mon Aug 1 11:20:05 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Mon Aug 1 11:21:13 2016 +0800

--
 .../hadoop/hbase/master/AssignmentManager.java  | 48 ++--
 1 file changed, 25 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7e6f5627/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index 3cb3a6e..54f8391 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -1668,6 +1668,25 @@ public class AssignmentManager {
   }
 
   /**
+   * Get number of replicas of a table
+   */
+  private static int getNumReplicas(MasterServices master, TableName table) {
+int numReplica = 1;
+try {
+  HTableDescriptor htd = master.getTableDescriptors().get(table);
+  if (htd == null) {
+LOG.warn("master can not get TableDescriptor from table '" + table);
+  } else {
+numReplica = htd.getRegionReplication();
+  }
+} catch (IOException e){
+  LOG.warn("Couldn't get the replication attribute of the table " + table 
+ " due to "
+  + e.getMessage());
+}
+return numReplica;
+  }
+
+  /**
* Get a list of replica regions that are:
* not recorded in meta yet. We might not have recorded the locations
* for the replicas since the replicas may not have been online yet, master 
restarted
@@ -1683,9 +1702,9 @@ public class AssignmentManager {
 List regionsNotRecordedInMeta = new ArrayList();
 for (HRegionInfo hri : regionsRecordedInMeta) {
   TableName table = hri.getTable();
-  HTableDescriptor htd = master.getTableDescriptors().get(table);
-  // look at the HTD for the replica count. That's the source of truth
-  int desiredRegionReplication = htd.getRegionReplication();
+  if(master.getTableDescriptors().get(table) == null)
+continue;
+  int  desiredRegionReplication = getNumReplicas(master, table);
   for (int i = 0; i < desiredRegionReplication; i++) {
 HRegionInfo replica = RegionReplicaUtil.getRegionInfoForReplica(hri, 
i);
 if (regionsRecordedInMeta.contains(replica)) continue;
@@ -1728,8 +1747,7 @@ public class AssignmentManager {
   // maybe because it crashed.
   PairOfSameType p = 
MetaTableAccessor.getMergeRegions(result);
   if (p.getFirst() != null && p.getSecond() != null) {
-int numReplicas = 
((MasterServices)server).getTableDescriptors().get(p.getFirst().
-getTable()).getRegionReplication();
+int numReplicas = getNumReplicas(server, p.getFirst().getTable());
 for (HRegionInfo merge : p) {
   for (int i = 1; i < numReplicas; i++) {
 
replicasToClose.add(RegionReplicaUtil.getRegionInfoForReplica(merge, i));
@@ -2679,15 +2697,7 @@ public class AssignmentManager {
 }
   }
 }
-int numReplicas = 1;
-try {
-  numReplicas = 
((MasterServices)server).getTableDescriptors().get(mergedHri.getTable()).
-  getRegionReplication();
-} catch (IOException e) {
-  LOG.warn("Couldn't get the replication attribute of the table " + 
mergedHri.getTable() +
-  " due to " + e.getMessage() + ". The assignment of replicas for the 
merged region " +
-  "will not be done");
-}
+int numReplicas = getNumReplicas(server, mergedHri.getTable());
 List regions = new ArrayList();
 for (int i = 1; i < numReplicas; i++) {
   regions.add(RegionReplicaUtil.getRegionInfoForReplica(mergedHri, i));
@@ -2708,15 +2718,7 @@ public class AssignmentManager {
 // create new regions for the replica, and assign them to match with the
 // current replica assignments. If replica1 of parent is assigned to RS1,
 // the replica1s of daughters will be on the same machine
-int numReplicas = 1;
-try {
-  numReplicas = 
((M

hbase git commit: HBASE-16076 Cannot configure split policy in HBase shell

2016-07-17 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master a8dfd8c43 -> cf0f4f72d


HBASE-16076 Cannot configure split policy in HBase shell


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cf0f4f72
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cf0f4f72
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cf0f4f72

Branch: refs/heads/master
Commit: cf0f4f72d989a86d062010773874d98870ad2e59
Parents: a8dfd8c
Author: chenheng <chenh...@apache.org>
Authored: Mon Jul 18 10:28:05 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Mon Jul 18 10:28:05 2016 +0800

--
 .../java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java| 1 +
 src/main/asciidoc/_chapters/architecture.adoc | 3 +--
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cf0f4f72/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
index 66968e0..222e146 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
@@ -270,6 +270,7 @@ public class HFileBlockIndex {
 public Cell getRootBlockKey(int i) {
   return blockKeys[i];
 }
+
 @Override
 public BlockWithScanInfo loadDataBlockWithScanInfo(Cell key, HFileBlock 
currentBlock,
 boolean cacheBlocks, boolean pread, boolean isCompaction,

http://git-wip-us.apache.org/repos/asf/hbase/blob/cf0f4f72/src/main/asciidoc/_chapters/architecture.adoc
--
diff --git a/src/main/asciidoc/_chapters/architecture.adoc 
b/src/main/asciidoc/_chapters/architecture.adoc
index 4b88665..cfdd638 100644
--- a/src/main/asciidoc/_chapters/architecture.adoc
+++ b/src/main/asciidoc/_chapters/architecture.adoc
@@ -1412,8 +1412,7 @@ admin.createTable(tableDesc);
 [source]
 .Configuring the Split Policy On a Table Using HBase Shell
 
-hbase> create 'test', {METHOD => 'table_att', CONFIG => {'SPLIT_POLICY' => 
'org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy'}},
-{NAME => 'cf1'}
+hbase> create 'test', {METADATA => {'SPLIT_POLICY' => 
'org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy'}},{NAME => 
'cf1'}
 
 
 The default split policy can be overwritten using a custom



hbase git commit: HBASE-15844 We should respect hfile.block.index.cacheonwrite when write intermediate index Block

2016-07-01 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1 7121bc41e -> f18ac8157


HBASE-15844 We should respect hfile.block.index.cacheonwrite when write 
intermediate index Block


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f18ac815
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f18ac815
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f18ac815

Branch: refs/heads/branch-1
Commit: f18ac8157b254324f442252349e84548cbe328de
Parents: 7121bc4
Author: chenheng <chenh...@apache.org>
Authored: Sat Jul 2 09:04:15 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Sat Jul 2 09:09:51 2016 +0800

--
 .../org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java | 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f18ac815/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
index 4f97c0a..e44b4c9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
@@ -883,6 +883,11 @@ public class HFileBlockIndex {
 if (midKeyMetadata != null)
   blockStream.write(midKeyMetadata);
 blockWriter.writeHeaderAndData(out);
+if (cacheConf != null) {
+  HFileBlock blockForCaching = 
blockWriter.getBlockForCaching(cacheConf);
+  cacheConf.getBlockCache().cacheBlock(new 
BlockCacheKey(nameForCaching,
+rootLevelIndexPos, true, blockForCaching.getBlockType()), 
blockForCaching);
+}
   }
 
   // Add root index block size
@@ -978,7 +983,7 @@ public class HFileBlockIndex {
   byte[] curFirstKey = curChunk.getBlockKey(0);
   blockWriter.writeHeaderAndData(out);
 
-  if (cacheConf != null) {
+  if (getCacheOnWrite()) {
 HFileBlock blockForCaching = blockWriter.getBlockForCaching(cacheConf);
 cacheConf.getBlockCache().cacheBlock(new BlockCacheKey(nameForCaching,
   beginOffset, true, blockForCaching.getBlockType()), blockForCaching);



hbase git commit: HBASE-15844 We should respect hfile.block.index.cacheonwrite when write intermediate index Block

2016-07-01 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master af9422c04 -> 561eb8296


HBASE-15844 We should respect hfile.block.index.cacheonwrite when write 
intermediate index Block


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/561eb829
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/561eb829
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/561eb829

Branch: refs/heads/master
Commit: 561eb82968360cd571f897a0f28f8f6e853e85ab
Parents: af9422c
Author: chenheng <chenh...@apache.org>
Authored: Sat Jul 2 09:04:15 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Sat Jul 2 09:04:15 2016 +0800

--
 .../org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java | 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/561eb829/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
index 76fec06..66968e0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
@@ -1072,6 +1072,11 @@ public class HFileBlockIndex {
 if (midKeyMetadata != null)
   blockStream.write(midKeyMetadata);
 blockWriter.writeHeaderAndData(out);
+if (cacheConf != null) {
+  HFileBlock blockForCaching = 
blockWriter.getBlockForCaching(cacheConf);
+  cacheConf.getBlockCache().cacheBlock(new 
BlockCacheKey(nameForCaching,
+rootLevelIndexPos, true, blockForCaching.getBlockType()), 
blockForCaching);
+}
   }
 
   // Add root index block size
@@ -1167,7 +1172,7 @@ public class HFileBlockIndex {
   byte[] curFirstKey = curChunk.getBlockKey(0);
   blockWriter.writeHeaderAndData(out);
 
-  if (cacheConf != null) {
+  if (getCacheOnWrite()) {
 HFileBlock blockForCaching = blockWriter.getBlockForCaching(cacheConf);
 cacheConf.getBlockCache().cacheBlock(new BlockCacheKey(nameForCaching,
   beginOffset, true, blockForCaching.getBlockType()), blockForCaching);



hbase git commit: HBASE-16111 Truncate preserve shell command is broken

2016-06-27 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master fc4b8aa89 -> e05f52799


HBASE-16111 Truncate preserve shell command is broken


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e05f5279
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e05f5279
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e05f5279

Branch: refs/heads/master
Commit: e05f527991356405ad7186dd48d9d06201695388
Parents: fc4b8aa
Author: chenheng <chenh...@apache.org>
Authored: Mon Jun 27 15:06:51 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Mon Jun 27 15:06:51 2016 +0800

--
 hbase-shell/src/main/ruby/hbase/admin.rb | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e05f5279/hbase-shell/src/main/ruby/hbase/admin.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb 
b/hbase-shell/src/main/ruby/hbase/admin.rb
index 73a5863..73b78d8 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -490,7 +490,7 @@ module Hbase
 
#--
 # Truncates table while maintaing region boundaries (deletes all records 
by recreating the table)
 def truncate_preserve(table_name_str)
-  puts "Truncating '#{table}' table (it may take a while):"
+  puts "Truncating '#{table_name_str}' table (it may take a while):"
   table_name = TableName.valueOf(table_name_str)
   locator = @connection.getRegionLocator(table_name)
   begin



hbase git commit: HBASE-16040 Remove configuration "hbase.replication"

2016-06-23 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master f4cec2e20 -> 68c1b34db


HBASE-16040 Remove configuration "hbase.replication"


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/68c1b34d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/68c1b34d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/68c1b34d

Branch: refs/heads/master
Commit: 68c1b34dbc15afd56246294a4ebac7d7b72a54e0
Parents: f4cec2e
Author: chenheng <chenh...@apache.org>
Authored: Wed Jun 22 15:07:05 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Fri Jun 24 10:41:35 2016 +0800

--
 .../client/replication/ReplicationAdmin.java|   5 -
 .../org/apache/hadoop/hbase/HConstants.java |   4 -
 .../src/main/resources/hbase-default.xml|   2 -
 ...IntegrationTestRegionReplicaReplication.java |   1 -
 .../replication/VerifyReplication.java  |   4 -
 .../hbase/regionserver/HRegionServer.java   |   6 --
 .../master/ReplicationHFileCleaner.java |   7 +-
 .../master/ReplicationLogCleaner.java   |   6 --
 .../replication/regionserver/Replication.java   | 107 +++
 .../hbase/util/ServerRegionReplicaUtil.java |   2 -
 .../replication/TestReplicationAdmin.java   |   1 -
 ...licationAdminWithTwoDifferentZKClusters.java |   1 -
 .../hbase/master/cleaner/TestLogsCleaner.java   |   1 -
 .../hbase/regionserver/TestClusterId.java   |   1 -
 .../regionserver/TestRegionReplicaFailover.java |   1 -
 .../replication/TestMasterReplication.java  |  25 -
 .../replication/TestMultiSlaveReplication.java  |   1 -
 .../replication/TestPerTableCFReplication.java  |   1 -
 .../hbase/replication/TestReplicationBase.java  |   2 -
 .../TestRegionReplicaReplicationEndpoint.java   |   1 -
 ...egionReplicaReplicationEndpointNoMaster.java |   1 -
 .../regionserver/TestReplicationSink.java   |   2 -
 .../TestReplicationSourceManagerZkImpl.java |   2 -
 ...tTableBasedReplicationSourceManagerImpl.java |   2 -
 ...sibilityLabelReplicationWithExpAsString.java |   2 -
 .../TestVisibilityLabelsReplication.java|   2 -
 hbase-shell/src/main/ruby/hbase/admin.rb|  68 ++--
 src/main/asciidoc/_chapters/architecture.adoc   |   2 +-
 src/main/asciidoc/_chapters/hbase-default.adoc  |   3 +-
 src/main/asciidoc/_chapters/ops_mgt.adoc|   5 -
 30 files changed, 76 insertions(+), 192 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/68c1b34d/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
index b04d317..dca1821 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
@@ -113,11 +113,6 @@ public class ReplicationAdmin implements Closeable {
* @throws RuntimeException if replication isn't enabled.
*/
   public ReplicationAdmin(Configuration conf) throws IOException {
-if (!conf.getBoolean(HConstants.REPLICATION_ENABLE_KEY,
-HConstants.REPLICATION_ENABLE_DEFAULT)) {
-  throw new RuntimeException("hbase.replication isn't true, please " +
-  "enable it in order to use replication");
-}
 this.connection = ConnectionFactory.createConnection(conf);
 try {
   zkw = createZooKeeperWatcher();

http://git-wip-us.apache.org/repos/asf/hbase/blob/68c1b34d/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index b05a520..fa4ce64 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -839,10 +839,6 @@ public final class HConstants {
* cluster replication constants.
*/
   public static final String
-  REPLICATION_ENABLE_KEY = "hbase.replication";
-  public static final boolean
-  REPLICATION_ENABLE_DEFAULT = true;
-  public static final String
   REPLICATION_SOURCE_SERVICE_CLASSNAME = 
"hbase.replication.source.service";
   public static final String
   REPLICATION_SINK_SERVICE_CLASSNAME = "hbase.replication.sink.service";

http://git-wip-us.apache.org/repo

hbase git commit: HBASE-16031 Documents about "hbase.replication" default value seems wrong

2016-06-16 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master f19f1d9e9 -> d1de9337e


HBASE-16031 Documents about "hbase.replication" default value seems wrong


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d1de9337
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d1de9337
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d1de9337

Branch: refs/heads/master
Commit: d1de9337ef7084c6d23d0bc3116d29049e8af255
Parents: f19f1d9
Author: chenheng <chenh...@apache.org>
Authored: Thu Jun 16 13:52:16 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Jun 16 14:12:37 2016 +0800

--
 src/main/asciidoc/_chapters/ops_mgt.adoc | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d1de9337/src/main/asciidoc/_chapters/ops_mgt.adoc
--
diff --git a/src/main/asciidoc/_chapters/ops_mgt.adoc 
b/src/main/asciidoc/_chapters/ops_mgt.adoc
index bc75951..e7ca940 100644
--- a/src/main/asciidoc/_chapters/ops_mgt.adoc
+++ b/src/main/asciidoc/_chapters/ops_mgt.adoc
@@ -1651,9 +1651,9 @@ The following metrics are exposed at the global region 
server level and (since H
 | hbase.replication
 | Whether replication is enabled or disabled on a given
 cluster
-| false
+| true
 
-| eplication.sleep.before.failover
+| replication.sleep.before.failover
 | How many milliseconds a worker should sleep before attempting to replicate
 a dead region server's WAL queues.
 |



hbase git commit: HBASE-16023 Fastpath for the FIFO rpcscheduler. addendum

2016-06-15 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master ee86e91e7 -> db234bf15


HBASE-16023 Fastpath for the FIFO rpcscheduler. addendum


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/db234bf1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/db234bf1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/db234bf1

Branch: refs/heads/master
Commit: db234bf15dac9c7904a2ad50a86ac4f1a7da3ca1
Parents: ee86e91
Author: chenheng <chenh...@apache.org>
Authored: Wed Jun 15 16:15:41 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Wed Jun 15 16:20:55 2016 +0800

--
 .../java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java| 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/db234bf1/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
index 53f9175..0e50761 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
@@ -55,6 +55,7 @@ import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
 import org.apache.hadoop.hbase.protobuf.generated.RPCProtos;
 import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader;
 import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.testclassification.RPCTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdge;



hbase git commit: HBASE-15596 hbAdmin is not closed after LoadIncrementalHFiles completes

2016-05-08 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 23d0846a7 -> 8a20ba049


HBASE-15596 hbAdmin is not closed after LoadIncrementalHFiles completes


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8a20ba04
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8a20ba04
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8a20ba04

Branch: refs/heads/branch-1.2
Commit: 8a20ba049143395bffd304fee860217c4ca9
Parents: 23d0846
Author: chenheng <chenh...@apache.org>
Authored: Mon May 9 11:42:14 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Mon May 9 11:42:14 2016 +0800

--
 .../hadoop/hbase/mapreduce/LoadIncrementalHFiles.java   | 12 +++-
 1 file changed, 7 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8a20ba04/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
index 752a639..9630a35 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
@@ -373,10 +373,10 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
*/
   boolean validateHFile = 
getConf().getBoolean("hbase.loadincremental.validate.hfile", true);
   if(!validateHFile) {
-   LOG.warn("You are skipping HFiles validation, it might cause some data 
loss if files " +
-   "are not correct. If you fail to read data from your table after 
using this " +
-   "option, consider removing the files and bulkload again without 
this option. " +
-   "See HBASE-13985");
+LOG.warn("You are skipping HFiles validation, it might cause some data 
loss if files " +
+  "are not correct. If you fail to read data from your table after 
using this " +
+  "option, consider removing the files and bulkload again without this 
option. " +
+  "See HBASE-13985");
   }
   discoverLoadQueue(queue, hfofDir, validateHFile);
   // check whether there is invalid family name in HFiles to be bulkloaded
@@ -1035,7 +1035,9 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
 throw new TableNotFoundException(errorMsg);
   }
 }
-
+if (hbAdmin != null) {
+  hbAdmin.close();
+}
 Path hfofDir = new Path(dirPath);
 
 try (Connection connection = ConnectionFactory.createConnection(getConf());



hbase git commit: HBASE-15714 We are calling checkRow() twice in doMiniBatchMutation()

2016-05-03 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1 330b3b281 -> 0ee3ca2a7


HBASE-15714 We are calling checkRow() twice in doMiniBatchMutation()


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0ee3ca2a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0ee3ca2a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0ee3ca2a

Branch: refs/heads/branch-1
Commit: 0ee3ca2a781f60cdaa9433a0a4f20bf3a146e4e6
Parents: 330b3b2
Author: chenheng <chenh...@apache.org>
Authored: Tue May 3 12:45:18 2016 +1000
Committer: chenheng <chenh...@apache.org>
Committed: Tue May 3 22:34:09 2016 +1000

--
 .../hadoop/hbase/regionserver/HRegion.java  | 22 
 .../hbase/regionserver/TestAtomicOperation.java |  4 ++--
 2 files changed, 15 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0ee3ca2a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index a9ca483..e578c8c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -3104,7 +3104,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 // get the next one.
 RowLock rowLock = null;
 try {
-  rowLock = getRowLock(mutation.getRow(), true);
+  rowLock = getRowLockInternal(mutation.getRow(), true);
 } catch (IOException ioe) {
   LOG.warn("Failed getting lock in batch put, row="
 + Bytes.toStringBinary(mutation.getRow()), ioe);
@@ -3455,9 +3455,9 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   Get get = new Get(row);
   checkFamily(family);
   get.addColumn(family, qualifier);
-
+  checkRow(row, "checkAndMutate");
   // Lock row - note that doBatchMutate will relock this row if called
-  RowLock rowLock = getRowLock(get.getRow());
+  RowLock rowLock = getRowLockInternal(get.getRow(), false);
   // wait for all previous transactions to complete (with lock held)
   mvcc.await();
   try {
@@ -3565,9 +3565,9 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   Get get = new Get(row);
   checkFamily(family);
   get.addColumn(family, qualifier);
-
+  checkRow(row, "checkAndRowMutate");
   // Lock row - note that doBatchMutate will relock this row if called
-  RowLock rowLock = getRowLock(get.getRow());
+  RowLock rowLock = getRowLockInternal(get.getRow(), false);
   // wait for all previous transactions to complete (with lock held)
   mvcc.await();
   try {
@@ -5202,6 +5202,10 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   public RowLock getRowLock(byte[] row, boolean readLock) throws IOException {
 // Make sure the row is inside of this region before getting the lock for 
it.
 checkRow(row, "row lock");
+return getRowLockInternal(row, readLock);
+  }
+
+  protected RowLock getRowLockInternal(byte[] row, boolean readLock) throws 
IOException {
 // create an object to use a a key in the row lock map
 HashedBytes rowKey = new HashedBytes(row);
 
@@ -7022,7 +7026,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   for (byte[] row : rowsToLock) {
 // Attempt to lock all involved rows, throw if any lock times out
 // use a writer lock for mixed reads and writes
-acquiredRowLocks.add(getRowLock(row));
+acquiredRowLocks.add(getRowLockInternal(row, false));
   }
   // 3. Region lock
   lock(this.updatesLock.readLock(), acquiredRowLocks.size() == 0 ? 1 : 
acquiredRowLocks.size());
@@ -7259,7 +7263,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 WALKey walKey = null;
 boolean doRollBackMemstore = false;
 try {
-  rowLock = getRowLock(row);
+  rowLock = getRowLockInternal(row, false);
   assert rowLock != null;
   try {
 lock(this.updatesLock.readLock());
@@ -7549,7 +7553,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 // changing it. These latter increments by zero are NOT added to the WAL.
 List allKVs = new ArrayList(increment.size());
 Durability effectiveDurability = 
getEffectiveDurability(increment.getDurability());
-RowLock rowLock = getRowLock(increme

hbase git commit: HBASE-15714 We are calling checkRow() twice in doMiniBatchMutation()

2016-05-02 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master 58c4c3d17 -> d77972ff1


HBASE-15714 We are calling checkRow() twice in doMiniBatchMutation()


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d77972ff
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d77972ff
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d77972ff

Branch: refs/heads/master
Commit: d77972ff16ef884b6cc5651a789597ae68bb
Parents: 58c4c3d
Author: chenheng <chenh...@apache.org>
Authored: Tue May 3 09:42:41 2016 +1000
Committer: chenheng <chenh...@apache.org>
Committed: Tue May 3 09:42:41 2016 +1000

--
 .../org/apache/hadoop/hbase/regionserver/HRegion.java | 14 +-
 .../hbase/regionserver/TestAtomicOperation.java   |  4 ++--
 2 files changed, 11 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d77972ff/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 428541f..5676e15 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -3019,7 +3019,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 // If we haven't got any rows in our batch, we should block to get the 
next one.
 RowLock rowLock = null;
 try {
-  rowLock = getRowLock(mutation.getRow(), true);
+  rowLock = getRowLockInternal(mutation.getRow(), true);
 } catch (IOException ioe) {
   LOG.warn("Failed getting lock, row=" + 
Bytes.toStringBinary(mutation.getRow()), ioe);
 }
@@ -3423,7 +3423,8 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   checkFamily(family);
   get.addColumn(family, qualifier);
   // Lock row - note that doBatchMutate will relock this row if called
-  RowLock rowLock = getRowLock(get.getRow());
+  checkRow(row, "doCheckAndRowMutate");
+  RowLock rowLock = getRowLockInternal(get.getRow(), false);
   try {
 if (mutation != null && this.getCoprocessorHost() != null) {
   // Call coprocessor.
@@ -5107,8 +5108,11 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
*/
   @Override
   public RowLock getRowLock(byte[] row, boolean readLock) throws IOException {
-// Make sure the row is inside of this region before getting the lock for 
it.
 checkRow(row, "row lock");
+return getRowLockInternal(row, readLock);
+  }
+
+  protected RowLock getRowLockInternal(byte[] row, boolean readLock) throws 
IOException {
 // create an object to use a a key in the row lock map
 HashedBytes rowKey = new HashedBytes(row);
 
@@ -6827,7 +6831,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   for (byte[] row : rowsToLock) {
 // Attempt to lock all involved rows, throw if any lock times out
 // use a writer lock for mixed reads and writes
-acquiredRowLocks.add(getRowLock(row));
+acquiredRowLocks.add(getRowLockInternal(row, false));
   }
   // STEP 3. Region lock
   lock(this.updatesLock.readLock(), acquiredRowLocks.size() == 0 ? 1 : 
acquiredRowLocks.size());
@@ -7007,7 +7011,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 startRegionOperation(op);
 long accumulatedResultSize = 0;
 List results = returnResults? new ArrayList(mutation.size()): 
null;
-RowLock rowLock = getRowLock(mutation.getRow());
+RowLock rowLock = getRowLockInternal(mutation.getRow(), false);
 try {
   lock(this.updatesLock.readLock());
   try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/d77972ff/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
index 0626dce..11bd280 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
@@ -666,11 +666,11 @@ public class TestAtomicOperation {
 }
 
 @Override
-public RowLock getRowLock(final byte[] row, boolea

hbase git commit: HBASE-15720 Print row locks at the debug dump page, addendum

2016-05-02 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.0 891119501 -> a3e77bf36


HBASE-15720 Print row locks at the debug dump page, addendum


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a3e77bf3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a3e77bf3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a3e77bf3

Branch: refs/heads/branch-1.0
Commit: a3e77bf3664d361a5337ba6b584848eb65ceccbb
Parents: 8911195
Author: chenheng <chenh...@apache.org>
Authored: Tue May 3 09:31:45 2016 +1000
Committer: chenheng <chenh...@apache.org>
Committed: Tue May 3 09:31:45 2016 +1000

--
 .../java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java  | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a3e77bf3/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
index 56d0417..8fe6126 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
@@ -105,8 +105,7 @@ public class RSDumpServlet extends StateDumpServlet {
 
   public static void dumpRowLock(HRegionServer hrs, PrintWriter out) {
 StringBuilder sb = new StringBuilder();
-for (Region region : hrs.getOnlineRegionsLocalContext()) {
-  HRegion hRegion = (HRegion)region;
+for (HRegion hRegion : hrs.getOnlineRegionsLocalContext()) {
   if (hRegion.getLockedRows().size() > 0) {
 for (HRegion.RowLockContext rowLockContext : 
hRegion.getLockedRows().values()) {
   sb.setLength(0);



hbase git commit: HBASE-15720 Print row locks at the debug dump page; addendum

2016-05-02 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 23bd2275a -> bf1394159


HBASE-15720 Print row locks at the debug dump page; addendum


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bf139415
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bf139415
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bf139415

Branch: refs/heads/branch-1.2
Commit: bf13941592ab0c947ce76cf4c353696414fc1067
Parents: 23bd227
Author: chenheng <chenh...@apache.org>
Authored: Tue May 3 09:18:28 2016 +1000
Committer: chenheng <chenh...@apache.org>
Committed: Tue May 3 09:18:28 2016 +1000

--
 .../java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bf139415/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
index 0c69bd9..56d0417 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
@@ -105,7 +105,7 @@ public class RSDumpServlet extends StateDumpServlet {
 
   public static void dumpRowLock(HRegionServer hrs, PrintWriter out) {
 StringBuilder sb = new StringBuilder();
-for (Region region : hrs.getOnlineRegions()) {
+for (Region region : hrs.getOnlineRegionsLocalContext()) {
   HRegion hRegion = (HRegion)region;
   if (hRegion.getLockedRows().size() > 0) {
 for (HRegion.RowLockContext rowLockContext : 
hRegion.getLockedRows().values()) {



hbase git commit: Revert "HBASE-15278 AsyncRPCClient hangs if Connection closes before RPC call response"

2016-04-29 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master 01c0448cc -> ccdb4


Revert "HBASE-15278 AsyncRPCClient hangs if Connection closes before RPC call 
response"

This reverts commit 01c0448ccd943186ba8045074a59e53f8f08c364.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ccdb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ccdb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ccdb

Branch: refs/heads/master
Commit: ccdb4b5cbb5580633bc204c82edb25707287
Parents: 01c0448
Author: chenheng <chenh...@apache.org>
Authored: Sat Apr 30 11:37:05 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Sat Apr 30 11:37:05 2016 +0800

--
 .../hadoop/hbase/ipc/AsyncRpcChannel.java   |  6 --
 .../hbase/ipc/AsyncServerResponseHandler.java   |  8 ++-
 .../hadoop/hbase/ipc/AbstractTestIPC.java   | 69 +---
 3 files changed, 9 insertions(+), 74 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ccdb/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java
index ef3240c..53eb824 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java
@@ -210,12 +210,6 @@ public class AsyncRpcChannel {
 ch.pipeline().addLast("frameDecoder",
   new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4));
 ch.pipeline().addLast(new AsyncServerResponseHandler(this));
-ch.closeFuture().addListener(new GenericFutureListener() {
-  @Override
-  public void operationComplete(ChannelFuture future) throws Exception {
-close(null);
-  }
-});
 try {
   writeChannelHeader(ch).addListener(new 
GenericFutureListener() {
 @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/ccdb/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncServerResponseHandler.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncServerResponseHandler.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncServerResponseHandler.java
index 5c604a4..e0c7586 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncServerResponseHandler.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncServerResponseHandler.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.ipc;
 
+import java.io.IOException;
+
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -103,6 +105,11 @@ public class AsyncServerResponseHandler extends 
SimpleChannelInboundHandlere
@@ -116,5 +123,4 @@ public class AsyncServerResponseHandler extends 
SimpleChannelInboundHandlerhttp://git-wip-us.apache.org/repos/asf/hbase/blob/ccdb/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
index bfbfa8c..69c8fe2 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
@@ -30,8 +30,6 @@ import java.net.ConnectException;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.SocketTimeoutException;
-import java.nio.ByteBuffer;
-import java.nio.channels.SocketChannel;
 import java.util.ArrayList;
 import java.util.List;
 
@@ -41,7 +39,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
@@ -161,39 +158,6 @@ public abstract class AbstractTestIPC {
 }
   }
 
-  static class TestFailingRpcServer extends TestRpcServer {
-
-TestFailingRpcServer() throws IOException {
-  this(new FifoRpcScheduler(CONF, 1), CONF);
-}
-
-TestFailingRpcServer(Configuration conf) throws IOException {
-  this(new FifoRpcScheduler(conf, 1), conf);
-}
-
-TestFailingRpcServer(Rp

hbase git commit: HBASE-15278 AsyncRPCClient hangs if Connection closes before RPC call response

2016-04-29 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master c236409c3 -> 01c0448cc


HBASE-15278 AsyncRPCClient hangs if Connection closes before RPC call response


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/01c0448c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/01c0448c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/01c0448c

Branch: refs/heads/master
Commit: 01c0448ccd943186ba8045074a59e53f8f08c364
Parents: c236409
Author: chenheng <chenh...@apache.org>
Authored: Sat Apr 30 09:27:32 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Sat Apr 30 09:27:32 2016 +0800

--
 .../hadoop/hbase/ipc/AsyncRpcChannel.java   |  6 ++
 .../hbase/ipc/AsyncServerResponseHandler.java   |  8 +--
 .../hadoop/hbase/ipc/AbstractTestIPC.java   | 69 +++-
 3 files changed, 74 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/01c0448c/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java
index 53eb824..ef3240c 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java
@@ -210,6 +210,12 @@ public class AsyncRpcChannel {
 ch.pipeline().addLast("frameDecoder",
   new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4));
 ch.pipeline().addLast(new AsyncServerResponseHandler(this));
+ch.closeFuture().addListener(new GenericFutureListener() {
+  @Override
+  public void operationComplete(ChannelFuture future) throws Exception {
+close(null);
+  }
+});
 try {
   writeChannelHeader(ch).addListener(new 
GenericFutureListener() {
 @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/01c0448c/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncServerResponseHandler.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncServerResponseHandler.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncServerResponseHandler.java
index e0c7586..5c604a4 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncServerResponseHandler.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncServerResponseHandler.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hbase.ipc;
 
-import java.io.IOException;
-
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -105,11 +103,6 @@ public class AsyncServerResponseHandler extends 
SimpleChannelInboundHandlere
@@ -123,4 +116,5 @@ public class AsyncServerResponseHandler extends 
SimpleChannelInboundHandlerhttp://git-wip-us.apache.org/repos/asf/hbase/blob/01c0448c/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
index 69c8fe2..bfbfa8c 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
@@ -30,6 +30,8 @@ import java.net.ConnectException;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.SocketTimeoutException;
+import java.nio.ByteBuffer;
+import java.nio.channels.SocketChannel;
 import java.util.ArrayList;
 import java.util.List;
 
@@ -39,6 +41,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
@@ -158,6 +161,39 @@ public abstract class AbstractTestIPC {
 }
   }
 
+  static class TestFailingRpcServer extends TestRpcServer {
+
+TestFailingRpcServer() throws IOException {
+  this(new FifoRpcScheduler(CONF, 1), CONF);
+}
+
+TestFailingRpcServer(Configuration conf) throws IOException {
+  this(new FifoRpcScheduler(conf, 1), conf);
+}
+
+TestFailingRpcServer(RpcScheduler scheduler, Configuration conf) throws 
IOException {
+  

hbase git commit: HBASE-15720 Print row locks at the debug dump page

2016-04-27 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/0.98 1c70befd7 -> 716ca48a7


HBASE-15720 Print row locks at the debug dump page

Conflicts:
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/716ca48a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/716ca48a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/716ca48a

Branch: refs/heads/0.98
Commit: 716ca48a7491ecf561efba532fb7a5ff3f7a5e57
Parents: 1c70bef
Author: chenheng <chenh...@apache.org>
Authored: Thu Apr 28 12:18:32 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Apr 28 12:37:55 2016 +0800

--
 .../hadoop/hbase/regionserver/HRegion.java  | 15 +-
 .../hbase/regionserver/RSDumpServlet.java   | 21 +++-
 2 files changed, 34 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/716ca48a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index aa88257..f426cdd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -250,7 +250,6 @@ public class HRegion implements HeapSize { // , Writable{
   
//
   // Members
   
//
-
   // map from a locked row to the context for that lock including:
   // - CountDownLatch for threads waiting on that row
   // - the thread that owns the lock (allow reentrancy)
@@ -6616,6 +6615,15 @@ public class HRegion implements HeapSize { // , Writable{
 latch.countDown();
   }
 }
+
+@Override
+public String toString() {
+ return "RowLockContext{" +
+ "row=" + row +
+ ", count=" + lockCount +
+ ", threadName=" + thread.getName() +
+ '}';
+}
   }
 
   /**
@@ -6660,4 +6668,9 @@ public class HRegion implements HeapSize { // , Writable{
   public void updatesUnlock() throws InterruptedIOException {
 updatesLock.readLock().unlock();
   }
+
+  public ConcurrentHashMap<HashedBytes, RowLockContext> getLockedRows() {
+return lockedRows;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/716ca48a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
index f2aab40..fd72245 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
@@ -72,7 +72,11 @@ public class RSDumpServlet extends StateDumpServlet {
 out.println("\n\nTasks:");
 out.println(LINE);
 TaskMonitor.get().dumpAsText(out);
-
+
+out.println("\n\nRowLocks:");
+out.println(LINE);
+dumpRowLock(hrs, out);
+
 out.println("\n\nExecutors:");
 out.println(LINE);
 dumpExecutors(hrs.getExecutorService(), out);
@@ -120,5 +124,20 @@ public class RSDumpServlet extends StateDumpServlet {
 + hrs.cacheFlusher.toString());
 out.println(hrs.cacheFlusher.dumpQueue());
   }
+
+  public static void dumpRowLock(HRegionServer hrs, PrintWriter out) {
+StringBuilder sb = new StringBuilder();
+for (HRegion hRegion : hrs.getOnlineRegionsLocalContext()) {
+  if (hRegion.getLockedRows().size() > 0) {
+for (HRegion.RowLockContext rowLockContext : 
hRegion.getLockedRows().values()) {
+  sb.setLength(0);
+  sb.append(hRegion.getTableDesc().getTableName()).append(",")
+.append(hRegion.getRegionInfo().getEncodedName()).append(",");
+  sb.append(rowLockContext.toString());
+  out.println(sb.toString());
+}
+  }
+}
+  }
   
 }



hbase git commit: HBASE-15720 Print row locks at the debug dump page

2016-04-27 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.0 719993e0f -> a837182b7


HBASE-15720 Print row locks at the debug dump page

Conflicts:
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a837182b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a837182b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a837182b

Branch: refs/heads/branch-1.0
Commit: a837182b79df1eb1bf7c50418d54d9881ea26509
Parents: 719993e
Author: chenheng <chenh...@apache.org>
Authored: Thu Apr 28 12:18:32 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Apr 28 12:25:06 2016 +0800

--
 .../hadoop/hbase/regionserver/HRegion.java  | 17 +++-
 .../hbase/regionserver/RSDumpServlet.java   | 21 +++-
 2 files changed, 36 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a837182b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 8aed3a6..18a6684 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -268,7 +268,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver { //
   
//
   // Members
   
//
-
   // map from a locked row to the context for that lock including:
   // - CountDownLatch for threads waiting on that row
   // - the thread that owns the lock (allow reentrancy)
@@ -3862,6 +3861,10 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver { //
 return this.stores.get(column);
   }
 
+  public ConcurrentHashMap<HashedBytes, RowLockContext> getLockedRows() {
+return lockedRows;
+  }
+
   /**
* Return HStore instance. Does not do any copy: as the number of store is 
limited, we
*  iterate on the list.
@@ -6774,6 +6777,16 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver { //
 latch.countDown();
   }
 }
+
+@Override
+public String toString() {
+  return "RowLockContext{" +
+"row=" + row +
+", count=" + lockCount +
+", threadName=" + thread.getName() +
+'}';
+}
+
   }
 
   /**
@@ -6867,4 +6880,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver { //
   public RegionSplitPolicy getSplitPolicy() {
 return this.splitPolicy;
   }
+
+
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a837182b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
index 77b68ec..56d0417 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
@@ -23,7 +23,6 @@ import java.io.OutputStream;
 import java.io.PrintStream;
 import java.io.PrintWriter;
 import java.util.Date;
-
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
@@ -69,6 +68,10 @@ public class RSDumpServlet extends StateDumpServlet {
 out.println(LINE);
 TaskMonitor.get().dumpAsText(out);
 
+out.println("\n\nRowLocks:");
+out.println(LINE);
+dumpRowLock(hrs, out);
+
 out.println("\n\nExecutors:");
 out.println(LINE);
 dumpExecutors(hrs.getExecutorService(), out);
@@ -100,6 +103,22 @@ public class RSDumpServlet extends StateDumpServlet {
 out.flush();
   }
 
+  public static void dumpRowLock(HRegionServer hrs, PrintWriter out) {
+StringBuilder sb = new StringBuilder();
+for (Region region : hrs.getOnlineRegionsLocalContext()) {
+  HRegion hRegion = (HRegion)region;
+  if (hRegion.getLockedRows().size() > 0) {
+for (HRegion.RowLockContext rowLockContext : 
hRegion.getLockedRows().values()) {
+  sb.setLength(0);
+  sb.append(hRegion.getTableDesc().getTableName()).append(",")
+ 

hbase git commit: HBASE-15720 Print row locks at the debug dump page

2016-04-27 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.1 46dda26df -> 1c92d02b0


HBASE-15720 Print row locks at the debug dump page

Conflicts:
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1c92d02b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1c92d02b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1c92d02b

Branch: refs/heads/branch-1.1
Commit: 1c92d02b001e1bf3c9bf1d9dfe7d094c2c932e75
Parents: 46dda26
Author: chenheng <chenh...@apache.org>
Authored: Thu Apr 28 12:18:32 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Apr 28 12:24:18 2016 +0800

--
 .../hadoop/hbase/regionserver/HRegion.java  | 17 +++-
 .../hbase/regionserver/RSDumpServlet.java   | 21 +++-
 2 files changed, 36 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1c92d02b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 8dde6cf..d41549c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -265,7 +265,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   
//
   // Members
   
//
-
   // map from a locked row to the context for that lock including:
   // - CountDownLatch for threads waiting on that row
   // - the thread that owns the lock (allow reentrancy)
@@ -4974,6 +4973,10 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 return this.stores.get(column);
   }
 
+  public ConcurrentHashMap<HashedBytes, RowLockContext> getLockedRows() {
+return lockedRows;
+  }
+
   /**
* Return HStore instance. Does not do any copy: as the number of store is 
limited, we
*  iterate on the list.
@@ -7985,6 +7988,16 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 latch.countDown();
   }
 }
+
+@Override
+public String toString() {
+  return "RowLockContext{" +
+"row=" + row +
+", count=" + lockCount +
+", threadName=" + thread.getName() +
+'}';
+}
+
   }
 
   public static class RowLockImpl implements RowLock {
@@ -8066,4 +8079,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   public RegionSplitPolicy getSplitPolicy() {
 return this.splitPolicy;
   }
+
+
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/1c92d02b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
index 77b68ec..56d0417 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
@@ -23,7 +23,6 @@ import java.io.OutputStream;
 import java.io.PrintStream;
 import java.io.PrintWriter;
 import java.util.Date;
-
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
@@ -69,6 +68,10 @@ public class RSDumpServlet extends StateDumpServlet {
 out.println(LINE);
 TaskMonitor.get().dumpAsText(out);
 
+out.println("\n\nRowLocks:");
+out.println(LINE);
+dumpRowLock(hrs, out);
+
 out.println("\n\nExecutors:");
 out.println(LINE);
 dumpExecutors(hrs.getExecutorService(), out);
@@ -100,6 +103,22 @@ public class RSDumpServlet extends StateDumpServlet {
 out.flush();
   }
 
+  public static void dumpRowLock(HRegionServer hrs, PrintWriter out) {
+StringBuilder sb = new StringBuilder();
+for (Region region : hrs.getOnlineRegionsLocalContext()) {
+  HRegion hRegion = (HRegion)region;
+  if (hRegion.getLockedRows().size() > 0) {
+for (HRegion.RowLockContext rowLockContext : 
hRegion.getLockedRows().values()) {
+  sb.setLength(0);
+  sb.append(hRegion.g

hbase git commit: HBASE-15720 Print row locks at the debug dump page

2016-04-27 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 ed520133d -> 92eec5382


HBASE-15720 Print row locks at the debug dump page


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/92eec538
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/92eec538
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/92eec538

Branch: refs/heads/branch-1.2
Commit: 92eec5382995e2254ff9db556d0e784559c08b2e
Parents: ed52013
Author: chenheng <chenh...@apache.org>
Authored: Thu Apr 28 11:40:29 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Apr 28 11:52:39 2016 +0800

--
 .../hadoop/hbase/regionserver/HRegion.java  | 11 ++
 .../hbase/regionserver/RSDumpServlet.java   | 21 +++-
 2 files changed, 31 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/92eec538/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index b5f863c..041c838 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -5141,6 +5141,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 rowLockContext.cleanUp();
 throw new IOException("Timed out waiting for lock for row: " + rowKey);
   }
+  rowLockContext.setThreadName(Thread.currentThread().getName());
   return result;
 } catch (InterruptedException ie) {
   LOG.warn("Thread interrupted waiting for lock on row: " + rowKey);
@@ -5168,6 +5169,10 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 }
   }
 
+  public ConcurrentHashMap<HashedBytes, RowLockContext> getLockedRows() {
+return lockedRows;
+  }
+
   @VisibleForTesting
   class RowLockContext {
 private final HashedBytes row;
@@ -5175,6 +5180,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 final AtomicBoolean usable = new AtomicBoolean(true);
 final AtomicInteger count = new AtomicInteger(0);
 final Object lock = new Object();
+private String threadName;
 
 RowLockContext(HashedBytes row) {
   this.row = row;
@@ -5213,12 +5219,17 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   }
 }
 
+public void setThreadName(String threadName) {
+  this.threadName = threadName;
+}
+
 @Override
 public String toString() {
   return "RowLockContext{" +
   "row=" + row +
   ", readWriteLock=" + readWriteLock +
   ", count=" + count +
+  ", threadName=" + threadName +
   '}';
 }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/92eec538/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
index 77b68ec..0c69bd9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
@@ -23,7 +23,6 @@ import java.io.OutputStream;
 import java.io.PrintStream;
 import java.io.PrintWriter;
 import java.util.Date;
-
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
@@ -69,6 +68,10 @@ public class RSDumpServlet extends StateDumpServlet {
 out.println(LINE);
 TaskMonitor.get().dumpAsText(out);
 
+out.println("\n\nRowLocks:");
+out.println(LINE);
+dumpRowLock(hrs, out);
+
 out.println("\n\nExecutors:");
 out.println(LINE);
 dumpExecutors(hrs.getExecutorService(), out);
@@ -100,6 +103,22 @@ public class RSDumpServlet extends StateDumpServlet {
 out.flush();
   }
 
+  public static void dumpRowLock(HRegionServer hrs, PrintWriter out) {
+StringBuilder sb = new StringBuilder();
+for (Region region : hrs.getOnlineRegions()) {
+  HRegion hRegion = (HRegion)region;
+  if (hRegion.getLockedRows().size() > 0) {
+for (HRegion.RowLockContext rowLockContext : 
hRegion.getLockedRows().values()) {
+  sb.setLength(0);
+  sb.append(hRegion.getTableDesc().getTableName()).append(",&quo

hbase git commit: HBASE-15720 Print row locks at the debug dump page

2016-04-27 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 bc44128fc -> 710ad4a16


HBASE-15720 Print row locks at the debug dump page


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/710ad4a1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/710ad4a1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/710ad4a1

Branch: refs/heads/branch-1.3
Commit: 710ad4a16883bc28f505b4d18fc8c7e38151b855
Parents: bc44128
Author: chenheng <chenh...@apache.org>
Authored: Thu Apr 28 11:40:29 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Apr 28 11:50:59 2016 +0800

--
 .../hadoop/hbase/regionserver/HRegion.java  | 11 ++
 .../hbase/regionserver/RSDumpServlet.java   | 21 +++-
 2 files changed, 31 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/710ad4a1/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index e620c60..a9ca483 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -5248,6 +5248,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 rowLockContext.cleanUp();
 throw new IOException("Timed out waiting for lock for row: " + rowKey);
   }
+  rowLockContext.setThreadName(Thread.currentThread().getName());
   return result;
 } catch (InterruptedException ie) {
   LOG.warn("Thread interrupted waiting for lock on row: " + rowKey);
@@ -5275,6 +5276,10 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 }
   }
 
+  public ConcurrentHashMap<HashedBytes, RowLockContext> getLockedRows() {
+return lockedRows;
+  }
+
   @VisibleForTesting
   class RowLockContext {
 private final HashedBytes row;
@@ -5282,6 +5287,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 final AtomicBoolean usable = new AtomicBoolean(true);
 final AtomicInteger count = new AtomicInteger(0);
 final Object lock = new Object();
+private String threadName;
 
 RowLockContext(HashedBytes row) {
   this.row = row;
@@ -5320,12 +5326,17 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   }
 }
 
+public void setThreadName(String threadName) {
+  this.threadName = threadName;
+}
+
 @Override
 public String toString() {
   return "RowLockContext{" +
   "row=" + row +
   ", readWriteLock=" + readWriteLock +
   ", count=" + count +
+  ", threadName=" + threadName +
   '}';
 }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/710ad4a1/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
index 77b68ec..0c69bd9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
@@ -23,7 +23,6 @@ import java.io.OutputStream;
 import java.io.PrintStream;
 import java.io.PrintWriter;
 import java.util.Date;
-
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
@@ -69,6 +68,10 @@ public class RSDumpServlet extends StateDumpServlet {
 out.println(LINE);
 TaskMonitor.get().dumpAsText(out);
 
+out.println("\n\nRowLocks:");
+out.println(LINE);
+dumpRowLock(hrs, out);
+
 out.println("\n\nExecutors:");
 out.println(LINE);
 dumpExecutors(hrs.getExecutorService(), out);
@@ -100,6 +103,22 @@ public class RSDumpServlet extends StateDumpServlet {
 out.flush();
   }
 
+  public static void dumpRowLock(HRegionServer hrs, PrintWriter out) {
+StringBuilder sb = new StringBuilder();
+for (Region region : hrs.getOnlineRegions()) {
+  HRegion hRegion = (HRegion)region;
+  if (hRegion.getLockedRows().size() > 0) {
+for (HRegion.RowLockContext rowLockContext : 
hRegion.getLockedRows().values()) {
+  sb.setLength(0);
+  sb.append(hRegion.getTableDesc().getTableName()).append(",&quo

hbase git commit: HBASE-15720 Print row locks at the debug dump page

2016-04-27 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1 4ba57cb93 -> 5f5cdaae9


HBASE-15720 Print row locks at the debug dump page


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5f5cdaae
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5f5cdaae
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5f5cdaae

Branch: refs/heads/branch-1
Commit: 5f5cdaae9a0b056dabb36b6866ef06b6aaa99190
Parents: 4ba57cb
Author: chenheng <chenh...@apache.org>
Authored: Thu Apr 28 11:40:29 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Apr 28 11:46:06 2016 +0800

--
 .../hadoop/hbase/regionserver/HRegion.java  | 11 ++
 .../hbase/regionserver/RSDumpServlet.java   | 21 +++-
 2 files changed, 31 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5f5cdaae/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index e620c60..a9ca483 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -5248,6 +5248,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 rowLockContext.cleanUp();
 throw new IOException("Timed out waiting for lock for row: " + rowKey);
   }
+  rowLockContext.setThreadName(Thread.currentThread().getName());
   return result;
 } catch (InterruptedException ie) {
   LOG.warn("Thread interrupted waiting for lock on row: " + rowKey);
@@ -5275,6 +5276,10 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 }
   }
 
+  public ConcurrentHashMap<HashedBytes, RowLockContext> getLockedRows() {
+return lockedRows;
+  }
+
   @VisibleForTesting
   class RowLockContext {
 private final HashedBytes row;
@@ -5282,6 +5287,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 final AtomicBoolean usable = new AtomicBoolean(true);
 final AtomicInteger count = new AtomicInteger(0);
 final Object lock = new Object();
+private String threadName;
 
 RowLockContext(HashedBytes row) {
   this.row = row;
@@ -5320,12 +5326,17 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   }
 }
 
+public void setThreadName(String threadName) {
+  this.threadName = threadName;
+}
+
 @Override
 public String toString() {
   return "RowLockContext{" +
   "row=" + row +
   ", readWriteLock=" + readWriteLock +
   ", count=" + count +
+  ", threadName=" + threadName +
   '}';
 }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/5f5cdaae/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
index 77b68ec..0c69bd9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
@@ -23,7 +23,6 @@ import java.io.OutputStream;
 import java.io.PrintStream;
 import java.io.PrintWriter;
 import java.util.Date;
-
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
@@ -69,6 +68,10 @@ public class RSDumpServlet extends StateDumpServlet {
 out.println(LINE);
 TaskMonitor.get().dumpAsText(out);
 
+out.println("\n\nRowLocks:");
+out.println(LINE);
+dumpRowLock(hrs, out);
+
 out.println("\n\nExecutors:");
 out.println(LINE);
 dumpExecutors(hrs.getExecutorService(), out);
@@ -100,6 +103,22 @@ public class RSDumpServlet extends StateDumpServlet {
 out.flush();
   }
 
+  public static void dumpRowLock(HRegionServer hrs, PrintWriter out) {
+StringBuilder sb = new StringBuilder();
+for (Region region : hrs.getOnlineRegions()) {
+  HRegion hRegion = (HRegion)region;
+  if (hRegion.getLockedRows().size() > 0) {
+for (HRegion.RowLockContext rowLockContext : 
hRegion.getLockedRows().values()) {
+  sb.setLength(0);
+  sb.append(hRegion.getTableDesc().getTableName()).append(",&quo

hbase git commit: HBASE-15720 Print row locks at the debug dump page

2016-04-27 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master 3d4f02670 -> e8b37422c


HBASE-15720 Print row locks at the debug dump page


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e8b37422
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e8b37422
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e8b37422

Branch: refs/heads/master
Commit: e8b37422c3217bf6794607e3d88d01b99321eda5
Parents: 3d4f026
Author: chenheng <chenh...@apache.org>
Authored: Thu Apr 28 11:40:29 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Apr 28 11:40:29 2016 +0800

--
 .../hadoop/hbase/regionserver/HRegion.java  | 11 ++
 .../hbase/regionserver/RSDumpServlet.java   | 21 +++-
 2 files changed, 31 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e8b37422/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 72238cc..1210253 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -5167,6 +5167,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 rowLockContext.cleanUp();
 throw new IOException("Timed out waiting for lock for row: " + rowKey);
   }
+  rowLockContext.setThreadName(Thread.currentThread().getName());
   return result;
 } catch (InterruptedException ie) {
   LOG.warn("Thread interrupted waiting for lock on row: " + rowKey);
@@ -5194,6 +5195,10 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 }
   }
 
+  public ConcurrentHashMap<HashedBytes, RowLockContext> getLockedRows() {
+return lockedRows;
+  }
+
   @VisibleForTesting
   class RowLockContext {
 private final HashedBytes row;
@@ -5201,6 +5206,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 final AtomicBoolean usable = new AtomicBoolean(true);
 final AtomicInteger count = new AtomicInteger(0);
 final Object lock = new Object();
+private String threadName;
 
 RowLockContext(HashedBytes row) {
   this.row = row;
@@ -5239,12 +5245,17 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   }
 }
 
+public void setThreadName(String threadName) {
+  this.threadName = threadName;
+}
+
 @Override
 public String toString() {
   return "RowLockContext{" +
   "row=" + row +
   ", readWriteLock=" + readWriteLock +
   ", count=" + count +
+  ", threadName=" + threadName +
   '}';
 }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e8b37422/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
index 77b68ec..0c69bd9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
@@ -23,7 +23,6 @@ import java.io.OutputStream;
 import java.io.PrintStream;
 import java.io.PrintWriter;
 import java.util.Date;
-
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
@@ -69,6 +68,10 @@ public class RSDumpServlet extends StateDumpServlet {
 out.println(LINE);
 TaskMonitor.get().dumpAsText(out);
 
+out.println("\n\nRowLocks:");
+out.println(LINE);
+dumpRowLock(hrs, out);
+
 out.println("\n\nExecutors:");
 out.println(LINE);
 dumpExecutors(hrs.getExecutorService(), out);
@@ -100,6 +103,22 @@ public class RSDumpServlet extends StateDumpServlet {
 out.flush();
   }
 
+  public static void dumpRowLock(HRegionServer hrs, PrintWriter out) {
+StringBuilder sb = new StringBuilder();
+for (Region region : hrs.getOnlineRegions()) {
+  HRegion hRegion = (HRegion)region;
+  if (hRegion.getLockedRows().size() > 0) {
+for (HRegion.RowLockContext rowLockContext : 
hRegion.getLockedRows().values()) {
+  sb.setLength(0);
+  sb.append(hRegion.getTableDesc().getTableName()).append(",&quo

hbase git commit: HBASE-15674 HRegionLocator#getAllRegionLocations should put the results in cache

2016-04-21 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.0 52f8ad902 -> a74c495f3


HBASE-15674 HRegionLocator#getAllRegionLocations should put the results in cache

Conflicts:

hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java

hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java

hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a74c495f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a74c495f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a74c495f

Branch: refs/heads/branch-1.0
Commit: a74c495f32ff5446377b2b5d6d4d190ca2f00457
Parents: 52f8ad9
Author: chenheng <chenh...@apache.org>
Authored: Thu Apr 21 14:16:06 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Apr 21 15:00:27 2016 +0800

--
 .../hadoop/hbase/client/ClusterConnection.java  |  3 +++
 .../hadoop/hbase/client/ConnectionAdapter.java  |  6 ++
 .../hadoop/hbase/client/ConnectionManager.java  |  3 ++-
 .../apache/hadoop/hbase/client/ConnectionUtils.java |  1 +
 .../java/org/apache/hadoop/hbase/client/HTable.java |  3 +++
 .../hadoop/hbase/client/TestFromClientSide.java | 16 +++-
 6 files changed, 30 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a74c495f/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
index 9ceb112..40c4462 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
@@ -83,6 +83,9 @@ public interface ClusterConnection extends HConnection {
   @Override
   void clearRegionCache();
 
+
+  void cacheLocation(final TableName tableName, final RegionLocations 
location);
+
   /**
* Allows flushing the region cache of all locations that pertain to
* tableName

http://git-wip-us.apache.org/repos/asf/hbase/blob/a74c495f/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
index d67df2a..fd56692 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
@@ -238,6 +238,12 @@ abstract class ConnectionAdapter implements 
ClusterConnection {
 wrappedConnection.clearRegionCache(tableName);
   }
 
+
+  @Override
+  public void cacheLocation(TableName tableName, RegionLocations location) {
+wrappedConnection.cacheLocation(tableName, location);
+  }
+
   @Override
   public void deleteCachedRegionLocation(HRegionLocation location) {
 wrappedConnection.deleteCachedRegionLocation(location);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a74c495f/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index 78fb17f..ede3157 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -1296,7 +1296,8 @@ class ConnectionManager {
  * @param tableName The table name.
  * @param location the new location
  */
-private void cacheLocation(final TableName tableName, final 
RegionLocations location) {
+@Override
+public void cacheLocation(final TableName tableName, final RegionLocations 
location) {
   metaCache.cacheLocation(tableName, location);
 }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a74c495f/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index b939b17..cd6b0e0 100644
--- 
a/hbase-client/src/main/java/org/apache/hado

hbase git commit: HBASE-15674 HRegionLocator#getAllRegionLocations should put the results in cache

2016-04-21 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.1 1bca4820f -> aef2d17e7


HBASE-15674 HRegionLocator#getAllRegionLocations should put the results in cache

Conflicts:

hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java

hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/aef2d17e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/aef2d17e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/aef2d17e

Branch: refs/heads/branch-1.1
Commit: aef2d17e7dcc0887afd94692eedb3871a25d7c0a
Parents: 1bca482
Author: chenheng <chenh...@apache.org>
Authored: Thu Apr 21 14:16:06 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Apr 21 14:41:48 2016 +0800

--
 .../hadoop/hbase/client/ClusterConnection.java |  3 +++
 .../hadoop/hbase/client/ConnectionAdapter.java |  6 ++
 .../hadoop/hbase/client/ConnectionManager.java |  3 ++-
 .../hadoop/hbase/client/ConnectionUtils.java   |  1 +
 .../apache/hadoop/hbase/client/HRegionLocator.java |  6 +-
 .../hadoop/hbase/client/TestFromClientSide.java| 17 +
 6 files changed, 34 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/aef2d17e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
index 9debbcf..eb3f0f2 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
@@ -83,6 +83,9 @@ public interface ClusterConnection extends HConnection {
   @Override
   void clearRegionCache();
 
+
+  void cacheLocation(final TableName tableName, final RegionLocations 
location);
+
   /**
* Allows flushing the region cache of all locations that pertain to
* tableName

http://git-wip-us.apache.org/repos/asf/hbase/blob/aef2d17e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
index a549a18..033dbb6 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
@@ -239,6 +239,12 @@ abstract class ConnectionAdapter implements 
ClusterConnection {
 wrappedConnection.clearRegionCache(tableName);
   }
 
+
+  @Override
+  public void cacheLocation(TableName tableName, RegionLocations location) {
+wrappedConnection.cacheLocation(tableName, location);
+  }
+
   @Override
   public void deleteCachedRegionLocation(HRegionLocation location) {
 wrappedConnection.deleteCachedRegionLocation(location);

http://git-wip-us.apache.org/repos/asf/hbase/blob/aef2d17e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index c54eafd..54b7d0e 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -1346,7 +1346,8 @@ class ConnectionManager {
  * @param tableName The table name.
  * @param location the new location
  */
-private void cacheLocation(final TableName tableName, final 
RegionLocations location) {
+@Override
+public void cacheLocation(final TableName tableName, final RegionLocations 
location) {
   metaCache.cacheLocation(tableName, location);
 }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/aef2d17e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 647295e..63861be 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Conne

hbase git commit: HBASE-15674 HRegionLocator#getAllRegionLocations should put the results in cache

2016-04-21 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 adfd8bb48 -> 3af736a95


HBASE-15674 HRegionLocator#getAllRegionLocations should put the results in cache

Conflicts:

hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java

hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3af736a9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3af736a9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3af736a9

Branch: refs/heads/branch-1.2
Commit: 3af736a95ddca817dedb12aaf7e7d9f6a4e97746
Parents: adfd8bb
Author: chenheng <chenh...@apache.org>
Authored: Thu Apr 21 14:16:06 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Apr 21 14:34:12 2016 +0800

--
 .../hadoop/hbase/client/ClusterConnection.java |  3 +++
 .../hadoop/hbase/client/ConnectionAdapter.java |  6 ++
 .../hadoop/hbase/client/ConnectionManager.java |  3 ++-
 .../hadoop/hbase/client/ConnectionUtils.java   |  1 +
 .../apache/hadoop/hbase/client/HRegionLocator.java |  6 +-
 .../hadoop/hbase/client/TestFromClientSide.java| 17 +
 6 files changed, 34 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3af736a9/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
index f4d464f..ec7f471 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
@@ -83,6 +83,9 @@ public interface ClusterConnection extends HConnection {
   @Override
   void clearRegionCache();
 
+
+  void cacheLocation(final TableName tableName, final RegionLocations 
location);
+
   /**
* Allows flushing the region cache of all locations that pertain to
* tableName

http://git-wip-us.apache.org/repos/asf/hbase/blob/3af736a9/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
index 040fa6f..4e3e55e 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
@@ -244,6 +244,12 @@ abstract class ConnectionAdapter implements 
ClusterConnection {
 wrappedConnection.clearRegionCache(tableName);
   }
 
+
+  @Override
+  public void cacheLocation(TableName tableName, RegionLocations location) {
+wrappedConnection.cacheLocation(tableName, location);
+  }
+
   @Override
   public void deleteCachedRegionLocation(HRegionLocation location) {
 wrappedConnection.deleteCachedRegionLocation(location);

http://git-wip-us.apache.org/repos/asf/hbase/blob/3af736a9/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index 46b4fdf..ffef9b6 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -1371,7 +1371,8 @@ class ConnectionManager {
  * @param tableName The table name.
  * @param location the new location
  */
-private void cacheLocation(final TableName tableName, final 
RegionLocations location) {
+@Override
+public void cacheLocation(final TableName tableName, final RegionLocations 
location) {
   metaCache.cacheLocation(tableName, location);
 }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3af736a9/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 647295e..63861be 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Conne

hbase git commit: HBASE-15674 HRegionLocator#getAllRegionLocations should put the results in cache

2016-04-21 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 c4d63d5c4 -> 5456b0d74


HBASE-15674 HRegionLocator#getAllRegionLocations should put the results in cache

Conflicts:

hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java

hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5456b0d7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5456b0d7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5456b0d7

Branch: refs/heads/branch-1.3
Commit: 5456b0d7442bbb35f31363ad9f79fb8226742ddc
Parents: c4d63d5
Author: chenheng <chenh...@apache.org>
Authored: Thu Apr 21 14:16:06 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Apr 21 14:32:45 2016 +0800

--
 .../hadoop/hbase/client/ClusterConnection.java |  3 +++
 .../hadoop/hbase/client/ConnectionAdapter.java |  6 ++
 .../hadoop/hbase/client/ConnectionManager.java |  3 ++-
 .../hadoop/hbase/client/ConnectionUtils.java   |  1 +
 .../apache/hadoop/hbase/client/HRegionLocator.java |  6 +-
 .../hadoop/hbase/client/TestFromClientSide.java| 17 +
 6 files changed, 34 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5456b0d7/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
index f4d464f..ec7f471 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
@@ -83,6 +83,9 @@ public interface ClusterConnection extends HConnection {
   @Override
   void clearRegionCache();
 
+
+  void cacheLocation(final TableName tableName, final RegionLocations 
location);
+
   /**
* Allows flushing the region cache of all locations that pertain to
* tableName

http://git-wip-us.apache.org/repos/asf/hbase/blob/5456b0d7/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
index 040fa6f..4e3e55e 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
@@ -244,6 +244,12 @@ abstract class ConnectionAdapter implements 
ClusterConnection {
 wrappedConnection.clearRegionCache(tableName);
   }
 
+
+  @Override
+  public void cacheLocation(TableName tableName, RegionLocations location) {
+wrappedConnection.cacheLocation(tableName, location);
+  }
+
   @Override
   public void deleteCachedRegionLocation(HRegionLocation location) {
 wrappedConnection.deleteCachedRegionLocation(location);

http://git-wip-us.apache.org/repos/asf/hbase/blob/5456b0d7/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index 15e0a39..a000a41 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -1373,7 +1373,8 @@ class ConnectionManager {
  * @param tableName The table name.
  * @param location the new location
  */
-private void cacheLocation(final TableName tableName, final 
RegionLocations location) {
+@Override
+public void cacheLocation(final TableName tableName, final RegionLocations 
location) {
   metaCache.cacheLocation(tableName, location);
 }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5456b0d7/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 647295e..63861be 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Conne

hbase git commit: HBASE-15674 HRegionLocator#getAllRegionLocations should put the results in cache

2016-04-21 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1 80e324cdc -> 5d497d5ba


HBASE-15674 HRegionLocator#getAllRegionLocations should put the results in cache

Conflicts:

hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java

hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5d497d5b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5d497d5b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5d497d5b

Branch: refs/heads/branch-1
Commit: 5d497d5ba80d130b7071cbf2178183fe8eb0eb0b
Parents: 80e324c
Author: chenheng <chenh...@apache.org>
Authored: Thu Apr 21 14:16:06 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Apr 21 14:28:07 2016 +0800

--
 .../hadoop/hbase/client/ClusterConnection.java |  3 +++
 .../hadoop/hbase/client/ConnectionAdapter.java |  6 ++
 .../hadoop/hbase/client/ConnectionManager.java |  3 ++-
 .../hadoop/hbase/client/ConnectionUtils.java   |  1 +
 .../apache/hadoop/hbase/client/HRegionLocator.java |  6 +-
 .../hadoop/hbase/client/TestFromClientSide.java| 17 +
 6 files changed, 34 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5d497d5b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
index f4d464f..ec7f471 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
@@ -83,6 +83,9 @@ public interface ClusterConnection extends HConnection {
   @Override
   void clearRegionCache();
 
+
+  void cacheLocation(final TableName tableName, final RegionLocations 
location);
+
   /**
* Allows flushing the region cache of all locations that pertain to
* tableName

http://git-wip-us.apache.org/repos/asf/hbase/blob/5d497d5b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
index 040fa6f..4e3e55e 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
@@ -244,6 +244,12 @@ abstract class ConnectionAdapter implements 
ClusterConnection {
 wrappedConnection.clearRegionCache(tableName);
   }
 
+
+  @Override
+  public void cacheLocation(TableName tableName, RegionLocations location) {
+wrappedConnection.cacheLocation(tableName, location);
+  }
+
   @Override
   public void deleteCachedRegionLocation(HRegionLocation location) {
 wrappedConnection.deleteCachedRegionLocation(location);

http://git-wip-us.apache.org/repos/asf/hbase/blob/5d497d5b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index 15e0a39..a000a41 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -1373,7 +1373,8 @@ class ConnectionManager {
  * @param tableName The table name.
  * @param location the new location
  */
-private void cacheLocation(final TableName tableName, final 
RegionLocations location) {
+@Override
+public void cacheLocation(final TableName tableName, final RegionLocations 
location) {
   metaCache.cacheLocation(tableName, location);
 }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5d497d5b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 647295e..63861be 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Conne

hbase git commit: HBASE-15674 HRegionLocator#getAllRegionLocations should put the results in cache

2016-04-21 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master 58f175f0e -> 03f3c392a


HBASE-15674 HRegionLocator#getAllRegionLocations should put the results in cache


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/03f3c392
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/03f3c392
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/03f3c392

Branch: refs/heads/master
Commit: 03f3c392a3ae940f26ddef56e6991a07f2c993f7
Parents: 58f175f
Author: chenheng <chenh...@apache.org>
Authored: Thu Apr 21 14:16:06 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Thu Apr 21 14:19:39 2016 +0800

--
 .../hadoop/hbase/client/ClusterConnection.java  |  3 +++
 .../hbase/client/ConnectionImplementation.java  |  3 ++-
 .../apache/hadoop/hbase/client/HRegionLocator.java  |  7 ++-
 .../hadoop/hbase/client/TestFromClientSide.java | 16 
 4 files changed, 27 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/03f3c392/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
index d348ffc..3027761 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
@@ -86,6 +86,9 @@ public interface ClusterConnection extends HConnection {
   @Override
   void clearRegionCache();
 
+
+  void cacheLocation(final TableName tableName, final RegionLocations 
location);
+
   /**
* Allows flushing the region cache of all locations that pertain to
* tableName

http://git-wip-us.apache.org/repos/asf/hbase/blob/03f3c392/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index 21e7e51..9a7dfc7 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -927,7 +927,8 @@ class ConnectionImplementation implements 
ClusterConnection, Closeable {
* @param tableName The table name.
* @param location the new location
*/
-  private void cacheLocation(final TableName tableName, final RegionLocations 
location) {
+  @Override
+  public void cacheLocation(final TableName tableName, final RegionLocations 
location) {
 metaCache.cacheLocation(tableName, location);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/03f3c392/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java
index 782ab66..4d2311d 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java
@@ -83,11 +83,16 @@ public class HRegionLocator implements RegionLocator {
 
   @Override
   public List getAllRegionLocations() throws IOException {
+TableName tableName = getName();
 List<Pair<HRegionInfo, ServerName>> locations =
-MetaTableAccessor.getTableRegionsAndLocations(this.connection, 
getName());
+MetaTableAccessor.getTableRegionsAndLocations(this.connection, 
tableName);
 ArrayList regions = new ArrayList<>(locations.size());
 for (Pair<HRegionInfo, ServerName> entry : locations) {
   regions.add(new HRegionLocation(entry.getFirst(), entry.getSecond()));
+
+}
+if (regions.size() > 0) {
+  connection.cacheLocation(tableName, new RegionLocations(regions));
 }
 return regions;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/03f3c392/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index 520f210..3549791 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.

[2/3] hbase git commit: HBASE-15406 Split / merge switch left disabled after early termination of hbck

2016-04-15 Thread chenheng
http://git-wip-us.apache.org/repos/asf/hbase/blob/438739dc/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
index 588cc86..39619c4 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
@@ -28114,6 +28114,16 @@ public final class MasterProtos {
  * repeated .hbase.pb.MasterSwitchType switch_types = 3;
  */
 org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterSwitchType 
getSwitchTypes(int index);
+
+// optional bool skip_lock = 4;
+/**
+ * optional bool skip_lock = 4;
+ */
+boolean hasSkipLock();
+/**
+ * optional bool skip_lock = 4;
+ */
+boolean getSkipLock();
   }
   /**
* Protobuf type {@code hbase.pb.SetSplitOrMergeEnabledRequest}
@@ -28209,6 +28219,11 @@ public final class MasterProtos {
   input.popLimit(oldLimit);
   break;
 }
+case 32: {
+  bitField0_ |= 0x0004;
+  skipLock_ = input.readBool();
+  break;
+}
   }
 }
   } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -28306,10 +28321,27 @@ public final class MasterProtos {
   return switchTypes_.get(index);
 }
 
+// optional bool skip_lock = 4;
+public static final int SKIP_LOCK_FIELD_NUMBER = 4;
+private boolean skipLock_;
+/**
+ * optional bool skip_lock = 4;
+ */
+public boolean hasSkipLock() {
+  return ((bitField0_ & 0x0004) == 0x0004);
+}
+/**
+ * optional bool skip_lock = 4;
+ */
+public boolean getSkipLock() {
+  return skipLock_;
+}
+
 private void initFields() {
   enabled_ = false;
   synchronous_ = false;
   switchTypes_ = java.util.Collections.emptyList();
+  skipLock_ = false;
 }
 private byte memoizedIsInitialized = -1;
 public final boolean isInitialized() {
@@ -28336,6 +28368,9 @@ public final class MasterProtos {
   for (int i = 0; i < switchTypes_.size(); i++) {
 output.writeEnum(3, switchTypes_.get(i).getNumber());
   }
+  if (((bitField0_ & 0x0004) == 0x0004)) {
+output.writeBool(4, skipLock_);
+  }
   getUnknownFields().writeTo(output);
 }
 
@@ -28362,6 +28397,10 @@ public final class MasterProtos {
 size += dataSize;
 size += 1 * switchTypes_.size();
   }
+  if (((bitField0_ & 0x0004) == 0x0004)) {
+size += com.google.protobuf.CodedOutputStream
+  .computeBoolSize(4, skipLock_);
+  }
   size += getUnknownFields().getSerializedSize();
   memoizedSerializedSize = size;
   return size;
@@ -28397,6 +28436,11 @@ public final class MasterProtos {
   }
   result = result && getSwitchTypesList()
   .equals(other.getSwitchTypesList());
+  result = result && (hasSkipLock() == other.hasSkipLock());
+  if (hasSkipLock()) {
+result = result && (getSkipLock()
+== other.getSkipLock());
+  }
   result = result &&
   getUnknownFields().equals(other.getUnknownFields());
   return result;
@@ -28422,6 +28466,10 @@ public final class MasterProtos {
 hash = (37 * hash) + SWITCH_TYPES_FIELD_NUMBER;
 hash = (53 * hash) + hashEnumList(getSwitchTypesList());
   }
+  if (hasSkipLock()) {
+hash = (37 * hash) + SKIP_LOCK_FIELD_NUMBER;
+hash = (53 * hash) + hashBoolean(getSkipLock());
+  }
   hash = (29 * hash) + getUnknownFields().hashCode();
   memoizedHashCode = hash;
   return hash;
@@ -28537,6 +28585,8 @@ public final class MasterProtos {
 bitField0_ = (bitField0_ & ~0x0002);
 switchTypes_ = java.util.Collections.emptyList();
 bitField0_ = (bitField0_ & ~0x0004);
+skipLock_ = false;
+bitField0_ = (bitField0_ & ~0x0008);
 return this;
   }
 
@@ -28578,6 +28628,10 @@ public final class MasterProtos {
   bitField0_ = (bitField0_ & ~0x0004);
 }
 result.switchTypes_ = switchTypes_;
+if (((from_bitField0_ & 0x0008) == 0x0008)) {
+  to_bitField0_ |= 0x0004;
+}
+result.skipLock_ = skipLock_;
 result.bitField0_ = to_bitField0_;
 onBuilt();
 return result;
@@ -28610,6 +28664,9 @@ public final class MasterProtos {
   }
   onChanged();
 }
+if (other.hasSkipLock()) {
+  setSkipLock(other.getSkipLock());
+}
 this.mergeUnknownFields(other.getUnknownFields());
 return 

[3/3] hbase git commit: HBASE-15406 Split / merge switch left disabled after early termination of hbck

2016-04-15 Thread chenheng
HBASE-15406 Split / merge switch left disabled after early termination of hbck

Conflicts:
hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java

hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java

hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java

hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java

hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java

hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java

hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java

hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/438739dc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/438739dc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/438739dc

Branch: refs/heads/branch-1.3
Commit: 438739dc15ac17c7d0f8b85fa42aceaa33c5b6cc
Parents: 6d2dc2a
Author: chenheng <chenh...@apache.org>
Authored: Fri Apr 15 15:24:20 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Fri Apr 15 15:36:40 2016 +0800

--
 .../org/apache/hadoop/hbase/client/Admin.java   |   13 +
 .../hadoop/hbase/client/ConnectionManager.java  |7 +
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |   18 +-
 .../hadoop/hbase/protobuf/RequestConverter.java |   11 +-
 .../hbase/zookeeper/ZooKeeperWatcher.java   |   10 +
 .../hbase/protobuf/generated/MasterProtos.java  | 1891 +-
 .../protobuf/generated/ZooKeeperProtos.java |  553 -
 hbase-protocol/src/main/protobuf/Master.proto   |   13 +
 .../src/main/protobuf/ZooKeeper.proto   |   10 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |   24 +
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |  160 +-
 .../hbase/zookeeper/SplitOrMergeTracker.java|   85 +
 .../hbase/client/TestSplitOrMergeStatus.java|   39 +-
 .../apache/hadoop/hbase/util/TestHBaseFsck.java |   59 +-
 hbase-shell/src/main/ruby/hbase/admin.rb|2 +-
 15 files changed, 2273 insertions(+), 622 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/438739dc/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index d9361b3..8fd855c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -1490,10 +1490,15 @@ public interface Admin extends Abortable, Closeable {
*
* @param enabled enabled or not
* @param synchronous If true, it waits until current split() call, if 
outstanding, to return.
+   * @param skipLock if false, we will do lock before change switch.
+   * with the lock, other requests to change the switch will 
be rejected!
+   * And when you set it to be false,
+   * you should call {@link 
#releaseSplitOrMergeLockAndRollback()} by yourself
* @param switchTypes switchType list {@link MasterSwitchType}
* @return Previous switch value array
*/
   boolean[] setSplitOrMergeEnabled(final boolean enabled, final boolean 
synchronous,
+   final boolean skipLock,
final MasterSwitchType... switchTypes) 
throws IOException;
 
   /**
@@ -1503,6 +1508,14 @@ public interface Admin extends Abortable, Closeable {
*/
   boolean isSplitOrMergeEnabled(final MasterSwitchType switchType) throws 
IOException;
 
+  /**
+   *  You should call this method after you call
+   *  {@link #setSplitOrMergeEnabled(boolean, boolean, boolean, 
MasterSwitchType...)}
+   *  with skipLock be false, this method will release the lock created by 
above method
+   *  and rollback the switch state to be original state before you change 
switch
+   * */
+  void releaseSplitOrMergeLockAndRollback() throws IOException;
+
   @InterfaceAudience.Public
   @InterfaceStability.Evolving
   public enum MasterSwitchType {

http://git-wip-us.apache.org/repos/asf/hbase/blob/438739dc/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
 
b/hbase-client/src/main/java/org/apache/hadoop/

[1/3] hbase git commit: HBASE-15406 Split / merge switch left disabled after early termination of hbck

2016-04-15 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 6d2dc2a8b -> 438739dc1


http://git-wip-us.apache.org/repos/asf/hbase/blob/438739dc/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
index 09479c4..b0a844a 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
@@ -9725,6 +9725,540 @@ public final class ZooKeeperProtos {
 // @@protoc_insertion_point(class_scope:hbase.pb.SwitchState)
   }
 
+  public interface SplitAndMergeStateOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// optional bool split_enabled = 1;
+/**
+ * optional bool split_enabled = 1;
+ */
+boolean hasSplitEnabled();
+/**
+ * optional bool split_enabled = 1;
+ */
+boolean getSplitEnabled();
+
+// optional bool merge_enabled = 2;
+/**
+ * optional bool merge_enabled = 2;
+ */
+boolean hasMergeEnabled();
+/**
+ * optional bool merge_enabled = 2;
+ */
+boolean getMergeEnabled();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.SplitAndMergeState}
+   *
+   * 
+   **
+   * State for split and merge, used in hbck
+   * 
+   */
+  public static final class SplitAndMergeState extends
+  com.google.protobuf.GeneratedMessage
+  implements SplitAndMergeStateOrBuilder {
+// Use SplitAndMergeState.newBuilder() to construct.
+private SplitAndMergeState(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private SplitAndMergeState(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final SplitAndMergeState defaultInstance;
+public static SplitAndMergeState getDefaultInstance() {
+  return defaultInstance;
+}
+
+public SplitAndMergeState getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private SplitAndMergeState(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 8: {
+  bitField0_ |= 0x0001;
+  splitEnabled_ = input.readBool();
+  break;
+}
+case 16: {
+  bitField0_ |= 0x0002;
+  mergeEnabled_ = input.readBool();
+  break;
+}
+  }
+}
+  } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new com.google.protobuf.InvalidProtocolBufferException(
+e.getMessage()).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_descriptor;
+}
+
+protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+internalGetFieldAccessorTable() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_fieldAccessorTable
+  .ensureFieldAccessorsInitialized(
+  
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.class,
 
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.Builder.class);
+}
+
+public static com.google.protobuf.Parser PARSER =
+new com.google.protobuf.AbstractParser() 

[1/3] hbase git commit: HBASE-15406 Split / merge switch left disabled after early termination of hbck

2016-04-15 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1 d37897535 -> 96e9c466d


http://git-wip-us.apache.org/repos/asf/hbase/blob/96e9c466/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
index 09479c4..b0a844a 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
@@ -9725,6 +9725,540 @@ public final class ZooKeeperProtos {
 // @@protoc_insertion_point(class_scope:hbase.pb.SwitchState)
   }
 
+  public interface SplitAndMergeStateOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// optional bool split_enabled = 1;
+/**
+ * optional bool split_enabled = 1;
+ */
+boolean hasSplitEnabled();
+/**
+ * optional bool split_enabled = 1;
+ */
+boolean getSplitEnabled();
+
+// optional bool merge_enabled = 2;
+/**
+ * optional bool merge_enabled = 2;
+ */
+boolean hasMergeEnabled();
+/**
+ * optional bool merge_enabled = 2;
+ */
+boolean getMergeEnabled();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.SplitAndMergeState}
+   *
+   * 
+   **
+   * State for split and merge, used in hbck
+   * 
+   */
+  public static final class SplitAndMergeState extends
+  com.google.protobuf.GeneratedMessage
+  implements SplitAndMergeStateOrBuilder {
+// Use SplitAndMergeState.newBuilder() to construct.
+private SplitAndMergeState(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private SplitAndMergeState(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final SplitAndMergeState defaultInstance;
+public static SplitAndMergeState getDefaultInstance() {
+  return defaultInstance;
+}
+
+public SplitAndMergeState getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private SplitAndMergeState(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 8: {
+  bitField0_ |= 0x0001;
+  splitEnabled_ = input.readBool();
+  break;
+}
+case 16: {
+  bitField0_ |= 0x0002;
+  mergeEnabled_ = input.readBool();
+  break;
+}
+  }
+}
+  } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new com.google.protobuf.InvalidProtocolBufferException(
+e.getMessage()).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_descriptor;
+}
+
+protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+internalGetFieldAccessorTable() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_fieldAccessorTable
+  .ensureFieldAccessorsInitialized(
+  
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.class,
 
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.Builder.class);
+}
+
+public static com.google.protobuf.Parser PARSER =
+new com.google.protobuf.AbstractParser() {

[2/3] hbase git commit: HBASE-15406 Split / merge switch left disabled after early termination of hbck

2016-04-15 Thread chenheng
http://git-wip-us.apache.org/repos/asf/hbase/blob/96e9c466/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
index 588cc86..39619c4 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
@@ -28114,6 +28114,16 @@ public final class MasterProtos {
  * repeated .hbase.pb.MasterSwitchType switch_types = 3;
  */
 org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterSwitchType 
getSwitchTypes(int index);
+
+// optional bool skip_lock = 4;
+/**
+ * optional bool skip_lock = 4;
+ */
+boolean hasSkipLock();
+/**
+ * optional bool skip_lock = 4;
+ */
+boolean getSkipLock();
   }
   /**
* Protobuf type {@code hbase.pb.SetSplitOrMergeEnabledRequest}
@@ -28209,6 +28219,11 @@ public final class MasterProtos {
   input.popLimit(oldLimit);
   break;
 }
+case 32: {
+  bitField0_ |= 0x0004;
+  skipLock_ = input.readBool();
+  break;
+}
   }
 }
   } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -28306,10 +28321,27 @@ public final class MasterProtos {
   return switchTypes_.get(index);
 }
 
+// optional bool skip_lock = 4;
+public static final int SKIP_LOCK_FIELD_NUMBER = 4;
+private boolean skipLock_;
+/**
+ * optional bool skip_lock = 4;
+ */
+public boolean hasSkipLock() {
+  return ((bitField0_ & 0x0004) == 0x0004);
+}
+/**
+ * optional bool skip_lock = 4;
+ */
+public boolean getSkipLock() {
+  return skipLock_;
+}
+
 private void initFields() {
   enabled_ = false;
   synchronous_ = false;
   switchTypes_ = java.util.Collections.emptyList();
+  skipLock_ = false;
 }
 private byte memoizedIsInitialized = -1;
 public final boolean isInitialized() {
@@ -28336,6 +28368,9 @@ public final class MasterProtos {
   for (int i = 0; i < switchTypes_.size(); i++) {
 output.writeEnum(3, switchTypes_.get(i).getNumber());
   }
+  if (((bitField0_ & 0x0004) == 0x0004)) {
+output.writeBool(4, skipLock_);
+  }
   getUnknownFields().writeTo(output);
 }
 
@@ -28362,6 +28397,10 @@ public final class MasterProtos {
 size += dataSize;
 size += 1 * switchTypes_.size();
   }
+  if (((bitField0_ & 0x0004) == 0x0004)) {
+size += com.google.protobuf.CodedOutputStream
+  .computeBoolSize(4, skipLock_);
+  }
   size += getUnknownFields().getSerializedSize();
   memoizedSerializedSize = size;
   return size;
@@ -28397,6 +28436,11 @@ public final class MasterProtos {
   }
   result = result && getSwitchTypesList()
   .equals(other.getSwitchTypesList());
+  result = result && (hasSkipLock() == other.hasSkipLock());
+  if (hasSkipLock()) {
+result = result && (getSkipLock()
+== other.getSkipLock());
+  }
   result = result &&
   getUnknownFields().equals(other.getUnknownFields());
   return result;
@@ -28422,6 +28466,10 @@ public final class MasterProtos {
 hash = (37 * hash) + SWITCH_TYPES_FIELD_NUMBER;
 hash = (53 * hash) + hashEnumList(getSwitchTypesList());
   }
+  if (hasSkipLock()) {
+hash = (37 * hash) + SKIP_LOCK_FIELD_NUMBER;
+hash = (53 * hash) + hashBoolean(getSkipLock());
+  }
   hash = (29 * hash) + getUnknownFields().hashCode();
   memoizedHashCode = hash;
   return hash;
@@ -28537,6 +28585,8 @@ public final class MasterProtos {
 bitField0_ = (bitField0_ & ~0x0002);
 switchTypes_ = java.util.Collections.emptyList();
 bitField0_ = (bitField0_ & ~0x0004);
+skipLock_ = false;
+bitField0_ = (bitField0_ & ~0x0008);
 return this;
   }
 
@@ -28578,6 +28628,10 @@ public final class MasterProtos {
   bitField0_ = (bitField0_ & ~0x0004);
 }
 result.switchTypes_ = switchTypes_;
+if (((from_bitField0_ & 0x0008) == 0x0008)) {
+  to_bitField0_ |= 0x0004;
+}
+result.skipLock_ = skipLock_;
 result.bitField0_ = to_bitField0_;
 onBuilt();
 return result;
@@ -28610,6 +28664,9 @@ public final class MasterProtos {
   }
   onChanged();
 }
+if (other.hasSkipLock()) {
+  setSkipLock(other.getSkipLock());
+}
 this.mergeUnknownFields(other.getUnknownFields());
 return 

[3/3] hbase git commit: HBASE-15406 Split / merge switch left disabled after early termination of hbck

2016-04-15 Thread chenheng
HBASE-15406 Split / merge switch left disabled after early termination of hbck

Conflicts:
hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java

hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java

hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java

hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java

hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java

hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java

hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java

hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/96e9c466
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/96e9c466
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/96e9c466

Branch: refs/heads/branch-1
Commit: 96e9c466d2d69cc1d0ea4d64d0ed2bada2ec3391
Parents: d378975
Author: chenheng <chenh...@apache.org>
Authored: Fri Apr 15 15:24:20 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Fri Apr 15 15:34:43 2016 +0800

--
 .../org/apache/hadoop/hbase/client/Admin.java   |   13 +
 .../hadoop/hbase/client/ConnectionManager.java  |7 +
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |   18 +-
 .../hadoop/hbase/protobuf/RequestConverter.java |   11 +-
 .../hbase/zookeeper/ZooKeeperWatcher.java   |   10 +
 .../hbase/protobuf/generated/MasterProtos.java  | 1891 +-
 .../protobuf/generated/ZooKeeperProtos.java |  553 -
 hbase-protocol/src/main/protobuf/Master.proto   |   13 +
 .../src/main/protobuf/ZooKeeper.proto   |   10 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |   24 +
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |  160 +-
 .../hbase/zookeeper/SplitOrMergeTracker.java|   85 +
 .../hbase/client/TestSplitOrMergeStatus.java|   39 +-
 .../apache/hadoop/hbase/util/TestHBaseFsck.java |   59 +-
 hbase-shell/src/main/ruby/hbase/admin.rb|2 +-
 15 files changed, 2273 insertions(+), 622 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/96e9c466/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index d9361b3..8fd855c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -1490,10 +1490,15 @@ public interface Admin extends Abortable, Closeable {
*
* @param enabled enabled or not
* @param synchronous If true, it waits until current split() call, if 
outstanding, to return.
+   * @param skipLock if false, we will do lock before change switch.
+   * with the lock, other requests to change the switch will 
be rejected!
+   * And when you set it to be false,
+   * you should call {@link 
#releaseSplitOrMergeLockAndRollback()} by yourself
* @param switchTypes switchType list {@link MasterSwitchType}
* @return Previous switch value array
*/
   boolean[] setSplitOrMergeEnabled(final boolean enabled, final boolean 
synchronous,
+   final boolean skipLock,
final MasterSwitchType... switchTypes) 
throws IOException;
 
   /**
@@ -1503,6 +1508,14 @@ public interface Admin extends Abortable, Closeable {
*/
   boolean isSplitOrMergeEnabled(final MasterSwitchType switchType) throws 
IOException;
 
+  /**
+   *  You should call this method after you call
+   *  {@link #setSplitOrMergeEnabled(boolean, boolean, boolean, 
MasterSwitchType...)}
+   *  with skipLock be false, this method will release the lock created by 
above method
+   *  and rollback the switch state to be original state before you change 
switch
+   * */
+  void releaseSplitOrMergeLockAndRollback() throws IOException;
+
   @InterfaceAudience.Public
   @InterfaceStability.Evolving
   public enum MasterSwitchType {

http://git-wip-us.apache.org/repos/asf/hbase/blob/96e9c466/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
 
b/hbase-client/src/main/java/org/apache/hadoop/

[3/3] hbase git commit: HBASE-15406 Split / merge switch left disabled after early termination of hbck

2016-04-15 Thread chenheng
HBASE-15406 Split / merge switch left disabled after early termination of hbck


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a9f1c4a8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a9f1c4a8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a9f1c4a8

Branch: refs/heads/master
Commit: a9f1c4a87153002629c291312bac4b758ad58204
Parents: 7efb9ed
Author: chenheng <chenh...@apache.org>
Authored: Thu Apr 7 14:25:18 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Fri Apr 15 14:09:52 2016 +0800

--
 .../org/apache/hadoop/hbase/client/Admin.java   |   13 +
 .../hbase/client/ConnectionImplementation.java  |7 +
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |   18 +-
 .../hadoop/hbase/protobuf/RequestConverter.java |   11 +-
 .../hbase/zookeeper/ZooKeeperWatcher.java   |   10 +
 .../hbase/protobuf/generated/MasterProtos.java  | 1903 +-
 .../protobuf/generated/ZooKeeperProtos.java |  552 -
 hbase-protocol/src/main/protobuf/Master.proto   |   13 +
 .../src/main/protobuf/ZooKeeper.proto   |   10 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |   23 +
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |   61 +-
 .../hbase/zookeeper/SplitOrMergeTracker.java|   85 +
 .../hbase/client/TestSplitOrMergeStatus.java|   39 +-
 .../hadoop/hbase/util/TestHBaseFsckOneRS.java   |   55 +
 hbase-shell/src/main/ruby/hbase/admin.rb|2 +-
 15 files changed, 2235 insertions(+), 567 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a9f1c4a8/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 54d2cb9..97356a2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -1716,10 +1716,15 @@ public interface Admin extends Abortable, Closeable {
*
* @param enabled enabled or not
* @param synchronous If true, it waits until current split() call, if 
outstanding, to return.
+   * @param skipLock if false, we will do lock before change switch.
+   * with the lock, other requests to change the switch will 
be rejected!
+   * And when you set it to be false,
+   * you should call {@link 
#releaseSplitOrMergeLockAndRollback()} by yourself
* @param switchTypes switchType list {@link MasterSwitchType}
* @return Previous switch value array
*/
   boolean[] setSplitOrMergeEnabled(final boolean enabled, final boolean 
synchronous,
+   final boolean skipLock,
final MasterSwitchType... switchTypes) 
throws IOException;
 
   /**
@@ -1730,6 +1735,14 @@ public interface Admin extends Abortable, Closeable {
   boolean isSplitOrMergeEnabled(final MasterSwitchType switchType) throws 
IOException;
 
   /**
+   *  You should call this method after you call
+   *  {@link #setSplitOrMergeEnabled(boolean, boolean, boolean, 
MasterSwitchType...)}
+   *  with skipLock be false, this method will release the lock created by 
above method
+   *  and rollback the switch state to be original state before you change 
switch
+   * */
+  void releaseSplitOrMergeLockAndRollback() throws IOException;
+
+  /**
* Currently, there are only two compact types:
* {@code NORMAL} means do store files compaction;
* {@code MOB} means do mob files compaction.

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9f1c4a8/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index e43a712..21e7e51 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -1751,6 +1751,13 @@ class ConnectionImplementation implements 
ClusterConnection, Closeable {
   }
 
   @Override
+  public MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse
+releaseSplitOrMergeLockAndRollback(RpcController controller,
+MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest request) throws 
ServiceException {
+return stub.releaseSplitOrMergeLockAndRollback(controller, request);
+  }
+
+  @Override
   public IsNormalizerEna

[1/3] hbase git commit: HBASE-15406 Split / merge switch left disabled after early termination of hbck

2016-04-15 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master 7efb9edec -> a9f1c4a87


http://git-wip-us.apache.org/repos/asf/hbase/blob/a9f1c4a8/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
index f64d0c1..a45c421 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
@@ -9733,6 +9733,540 @@ public final class ZooKeeperProtos {
 // @@protoc_insertion_point(class_scope:hbase.pb.SwitchState)
   }
 
+  public interface SplitAndMergeStateOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// optional bool split_enabled = 1;
+/**
+ * optional bool split_enabled = 1;
+ */
+boolean hasSplitEnabled();
+/**
+ * optional bool split_enabled = 1;
+ */
+boolean getSplitEnabled();
+
+// optional bool merge_enabled = 2;
+/**
+ * optional bool merge_enabled = 2;
+ */
+boolean hasMergeEnabled();
+/**
+ * optional bool merge_enabled = 2;
+ */
+boolean getMergeEnabled();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.SplitAndMergeState}
+   *
+   * 
+   **
+   * State for split and merge, used in hbck
+   * 
+   */
+  public static final class SplitAndMergeState extends
+  com.google.protobuf.GeneratedMessage
+  implements SplitAndMergeStateOrBuilder {
+// Use SplitAndMergeState.newBuilder() to construct.
+private SplitAndMergeState(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private SplitAndMergeState(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final SplitAndMergeState defaultInstance;
+public static SplitAndMergeState getDefaultInstance() {
+  return defaultInstance;
+}
+
+public SplitAndMergeState getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private SplitAndMergeState(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 8: {
+  bitField0_ |= 0x0001;
+  splitEnabled_ = input.readBool();
+  break;
+}
+case 16: {
+  bitField0_ |= 0x0002;
+  mergeEnabled_ = input.readBool();
+  break;
+}
+  }
+}
+  } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new com.google.protobuf.InvalidProtocolBufferException(
+e.getMessage()).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_descriptor;
+}
+
+protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+internalGetFieldAccessorTable() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_fieldAccessorTable
+  .ensureFieldAccessorsInitialized(
+  
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.class,
 
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.Builder.class);
+}
+
+public static com.google.protobuf.Parser PARSER =
+new com.google.protobuf.AbstractParser() {
+  

[2/3] hbase git commit: HBASE-15406 Split / merge switch left disabled after early termination of hbck

2016-04-15 Thread chenheng
http://git-wip-us.apache.org/repos/asf/hbase/blob/a9f1c4a8/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
index b91a36b..b4bd7af 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
@@ -28882,6 +28882,16 @@ public final class MasterProtos {
  * repeated .hbase.pb.MasterSwitchType switch_types = 3;
  */
 org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterSwitchType 
getSwitchTypes(int index);
+
+// optional bool skip_lock = 4;
+/**
+ * optional bool skip_lock = 4;
+ */
+boolean hasSkipLock();
+/**
+ * optional bool skip_lock = 4;
+ */
+boolean getSkipLock();
   }
   /**
* Protobuf type {@code hbase.pb.SetSplitOrMergeEnabledRequest}
@@ -28977,6 +28987,11 @@ public final class MasterProtos {
   input.popLimit(oldLimit);
   break;
 }
+case 32: {
+  bitField0_ |= 0x0004;
+  skipLock_ = input.readBool();
+  break;
+}
   }
 }
   } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -29074,10 +29089,27 @@ public final class MasterProtos {
   return switchTypes_.get(index);
 }
 
+// optional bool skip_lock = 4;
+public static final int SKIP_LOCK_FIELD_NUMBER = 4;
+private boolean skipLock_;
+/**
+ * optional bool skip_lock = 4;
+ */
+public boolean hasSkipLock() {
+  return ((bitField0_ & 0x0004) == 0x0004);
+}
+/**
+ * optional bool skip_lock = 4;
+ */
+public boolean getSkipLock() {
+  return skipLock_;
+}
+
 private void initFields() {
   enabled_ = false;
   synchronous_ = false;
   switchTypes_ = java.util.Collections.emptyList();
+  skipLock_ = false;
 }
 private byte memoizedIsInitialized = -1;
 public final boolean isInitialized() {
@@ -29104,6 +29136,9 @@ public final class MasterProtos {
   for (int i = 0; i < switchTypes_.size(); i++) {
 output.writeEnum(3, switchTypes_.get(i).getNumber());
   }
+  if (((bitField0_ & 0x0004) == 0x0004)) {
+output.writeBool(4, skipLock_);
+  }
   getUnknownFields().writeTo(output);
 }
 
@@ -29130,6 +29165,10 @@ public final class MasterProtos {
 size += dataSize;
 size += 1 * switchTypes_.size();
   }
+  if (((bitField0_ & 0x0004) == 0x0004)) {
+size += com.google.protobuf.CodedOutputStream
+  .computeBoolSize(4, skipLock_);
+  }
   size += getUnknownFields().getSerializedSize();
   memoizedSerializedSize = size;
   return size;
@@ -29165,6 +29204,11 @@ public final class MasterProtos {
   }
   result = result && getSwitchTypesList()
   .equals(other.getSwitchTypesList());
+  result = result && (hasSkipLock() == other.hasSkipLock());
+  if (hasSkipLock()) {
+result = result && (getSkipLock()
+== other.getSkipLock());
+  }
   result = result &&
   getUnknownFields().equals(other.getUnknownFields());
   return result;
@@ -29190,6 +29234,10 @@ public final class MasterProtos {
 hash = (37 * hash) + SWITCH_TYPES_FIELD_NUMBER;
 hash = (53 * hash) + hashEnumList(getSwitchTypesList());
   }
+  if (hasSkipLock()) {
+hash = (37 * hash) + SKIP_LOCK_FIELD_NUMBER;
+hash = (53 * hash) + hashBoolean(getSkipLock());
+  }
   hash = (29 * hash) + getUnknownFields().hashCode();
   memoizedHashCode = hash;
   return hash;
@@ -29305,6 +29353,8 @@ public final class MasterProtos {
 bitField0_ = (bitField0_ & ~0x0002);
 switchTypes_ = java.util.Collections.emptyList();
 bitField0_ = (bitField0_ & ~0x0004);
+skipLock_ = false;
+bitField0_ = (bitField0_ & ~0x0008);
 return this;
   }
 
@@ -29346,6 +29396,10 @@ public final class MasterProtos {
   bitField0_ = (bitField0_ & ~0x0004);
 }
 result.switchTypes_ = switchTypes_;
+if (((from_bitField0_ & 0x0008) == 0x0008)) {
+  to_bitField0_ |= 0x0004;
+}
+result.skipLock_ = skipLock_;
 result.bitField0_ = to_bitField0_;
 onBuilt();
 return result;
@@ -29378,6 +29432,9 @@ public final class MasterProtos {
   }
   onChanged();
 }
+if (other.hasSkipLock()) {
+  setSkipLock(other.getSkipLock());
+}
 this.mergeUnknownFields(other.getUnknownFields());
 return 

hbase git commit: HBASE-15384 Avoid using '/tmp' directory in TestBulkLoad

2016-03-28 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 6a80087f4 -> c33e2352f


HBASE-15384 Avoid using '/tmp' directory in TestBulkLoad


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c33e2352
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c33e2352
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c33e2352

Branch: refs/heads/branch-1.2
Commit: c33e2352f688e23c665216a6fa8ae99c0d284134
Parents: 6a80087
Author: chenheng <chenh...@apache.org>
Authored: Thu Mar 24 15:19:02 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Tue Mar 29 11:13:34 2016 +0800

--
 .../apache/hadoop/hbase/regionserver/TestBulkLoad.java   | 11 +--
 1 file changed, 9 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c33e2352/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java
index 092f3ef..735fec8 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
@@ -81,6 +82,7 @@ public class TestBulkLoad {
 
   @ClassRule
   public static TemporaryFolder testFolder = new TemporaryFolder();
+  private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
   private final WAL log = mock(WAL.class);
   private final Configuration conf = HBaseConfiguration.create();
   private final Random random = new Random();
@@ -216,13 +218,18 @@ public class TestBulkLoad {
   }
 
   private Pair<byte[], String> withMissingHFileForFamily(byte[] family) {
-return new Pair<byte[], String>(family, "/tmp/does_not_exist");
+return new Pair<byte[], String>(family, getNotExistFilePath());
+  }
+
+  private String getNotExistFilePath() {
+Path path = new Path(TEST_UTIL.getDataTestDir(), "does_not_exist");
+return path.toUri().getPath();
   }
 
   private Pair<byte[], String> 
withInvalidColumnFamilyButProperHFileLocation(byte[] family)
   throws IOException {
 createHFileForFamilies(family);
-return new Pair<byte[], String>(new byte[]{0x00, 0x01, 0x02}, 
"/tmp/does_not_exist");
+return new Pair<byte[], String>(new byte[]{0x00, 0x01, 0x02}, 
getNotExistFilePath());
   }
 
 



hbase git commit: HBASE-15384 Avoid using '/tmp' directory in TestBulkLoad

2016-03-28 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 fc47cc38b -> 0f72597db


HBASE-15384 Avoid using '/tmp' directory in TestBulkLoad


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0f72597d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0f72597d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0f72597d

Branch: refs/heads/branch-1.3
Commit: 0f72597db161ca8876821a2a939cf052f6f42f5c
Parents: fc47cc3
Author: chenheng <chenh...@apache.org>
Authored: Thu Mar 24 15:19:02 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Tue Mar 29 11:11:37 2016 +0800

--
 .../apache/hadoop/hbase/regionserver/TestBulkLoad.java   | 11 +--
 1 file changed, 9 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0f72597d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java
index 092f3ef..735fec8 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
@@ -81,6 +82,7 @@ public class TestBulkLoad {
 
   @ClassRule
   public static TemporaryFolder testFolder = new TemporaryFolder();
+  private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
   private final WAL log = mock(WAL.class);
   private final Configuration conf = HBaseConfiguration.create();
   private final Random random = new Random();
@@ -216,13 +218,18 @@ public class TestBulkLoad {
   }
 
   private Pair<byte[], String> withMissingHFileForFamily(byte[] family) {
-return new Pair<byte[], String>(family, "/tmp/does_not_exist");
+return new Pair<byte[], String>(family, getNotExistFilePath());
+  }
+
+  private String getNotExistFilePath() {
+Path path = new Path(TEST_UTIL.getDataTestDir(), "does_not_exist");
+return path.toUri().getPath();
   }
 
   private Pair<byte[], String> 
withInvalidColumnFamilyButProperHFileLocation(byte[] family)
   throws IOException {
 createHFileForFamilies(family);
-return new Pair<byte[], String>(new byte[]{0x00, 0x01, 0x02}, 
"/tmp/does_not_exist");
+return new Pair<byte[], String>(new byte[]{0x00, 0x01, 0x02}, 
getNotExistFilePath());
   }
 
 



hbase git commit: HBASE-15384 Avoid using '/tmp' directory in TestBulkLoad

2016-03-28 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/branch-1 5202d3c25 -> 13df65321


HBASE-15384 Avoid using '/tmp' directory in TestBulkLoad


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/13df6532
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/13df6532
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/13df6532

Branch: refs/heads/branch-1
Commit: 13df65321522831640ec08f209133b5ee7b8a6bf
Parents: 5202d3c
Author: chenheng <chenh...@apache.org>
Authored: Thu Mar 24 15:19:02 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Tue Mar 29 11:10:44 2016 +0800

--
 .../apache/hadoop/hbase/regionserver/TestBulkLoad.java   | 11 +--
 1 file changed, 9 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/13df6532/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java
index 092f3ef..735fec8 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
@@ -81,6 +82,7 @@ public class TestBulkLoad {
 
   @ClassRule
   public static TemporaryFolder testFolder = new TemporaryFolder();
+  private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
   private final WAL log = mock(WAL.class);
   private final Configuration conf = HBaseConfiguration.create();
   private final Random random = new Random();
@@ -216,13 +218,18 @@ public class TestBulkLoad {
   }
 
   private Pair<byte[], String> withMissingHFileForFamily(byte[] family) {
-return new Pair<byte[], String>(family, "/tmp/does_not_exist");
+return new Pair<byte[], String>(family, getNotExistFilePath());
+  }
+
+  private String getNotExistFilePath() {
+Path path = new Path(TEST_UTIL.getDataTestDir(), "does_not_exist");
+return path.toUri().getPath();
   }
 
   private Pair<byte[], String> 
withInvalidColumnFamilyButProperHFileLocation(byte[] family)
   throws IOException {
 createHFileForFamilies(family);
-return new Pair<byte[], String>(new byte[]{0x00, 0x01, 0x02}, 
"/tmp/does_not_exist");
+return new Pair<byte[], String>(new byte[]{0x00, 0x01, 0x02}, 
getNotExistFilePath());
   }
 
 



[3/3] hbase git commit: HBASE-11393 Replication TableCfs should be a PB object rather than a string

2016-03-28 Thread chenheng
HBASE-11393 Replication TableCfs should be a PB object rather than a string


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7f39baf0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7f39baf0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7f39baf0

Branch: refs/heads/master
Commit: 7f39baf0f4572ff209837d7de5d37554851ecbb7
Parents: 0520097
Author: chenheng <chenh...@apache.org>
Authored: Fri Mar 25 14:16:47 2016 +0800
Committer: chenheng <chenh...@apache.org>
Committed: Tue Mar 29 10:25:29 2016 +0800

--
 .../client/replication/ReplicationAdmin.java|  170 +--
 .../replication/ReplicationSerDeHelper.java |  315 +
 .../hbase/replication/ReplicationPeer.java  |1 +
 .../replication/ReplicationPeerConfig.java  |   16 +-
 .../replication/ReplicationPeerZKImpl.java  |   76 +-
 .../hbase/replication/ReplicationPeers.java |   19 +-
 .../replication/ReplicationPeersZKImpl.java |  163 +--
 .../replication/ReplicationStateZKBase.java |   19 +
 .../protobuf/generated/ZooKeeperProtos.java | 1155 +-
 .../src/main/protobuf/ZooKeeper.proto   |6 +
 .../org/apache/hadoop/hbase/master/HMaster.java |   10 +-
 .../replication/master/TableCFsUpdater.java |  122 ++
 .../hbase/client/TestReplicaWithCluster.java|6 +-
 .../replication/TestReplicationAdmin.java   |  195 +--
 .../cleaner/TestReplicationHFileCleaner.java|2 +-
 .../replication/TestMasterReplication.java  |   11 +-
 .../replication/TestMultiSlaveReplication.java  |   10 +-
 .../replication/TestPerTableCFReplication.java  |  158 ++-
 .../hbase/replication/TestReplicationBase.java  |4 +-
 .../replication/TestReplicationSmallTests.java  |5 +-
 .../replication/TestReplicationStateBasic.java  |   14 +-
 .../replication/TestReplicationSyncUpTool.java  |4 +-
 .../TestReplicationTrackerZKImpl.java   |   10 +-
 .../replication/TestReplicationWithTags.java|6 +-
 .../replication/master/TestTableCFsUpdater.java |  210 
 ...sibilityLabelReplicationWithExpAsString.java |9 +-
 .../TestVisibilityLabelsReplication.java|5 +-
 .../hadoop/hbase/util/TestHBaseFsckOneRS.java   |5 +-
 .../src/main/ruby/hbase/replication_admin.rb|   44 +-
 .../src/main/ruby/shell/commands/add_peer.rb|4 +-
 .../ruby/shell/commands/append_peer_tableCFs.rb |2 +-
 .../ruby/shell/commands/remove_peer_tableCFs.rb |4 +-
 .../ruby/shell/commands/set_peer_tableCFs.rb|5 +-
 33 files changed, 2309 insertions(+), 476 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7f39baf0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
index dcf1957..8ee3a22 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
@@ -32,7 +32,6 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
 
-import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -48,6 +47,7 @@ import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationFactory;
 import org.apache.hadoop.hbase.replication.ReplicationPeer;
@@ -166,27 +166,6 @@ public class ReplicationAdmin implements Closeable {
   }
 
   /**
-   * Add a new peer cluster to replicate to.
-   * @param id a short name that identifies the cluster
-   * @param clusterKey the concatenation of the slave cluster's
-   * 
hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
-   * @throws IllegalStateException if there's already one slave since
-   * multi-slave isn't supported yet.
-   * @deprecated Use addPeer(String, ReplicationPeerConfig, Map) instead.
-   */
-  @Deprecated
-  public void addPeer(String id, String clusterKey) throws 
ReplicationException {
-this.addPeer(id, new ReplicationPeerConfig().setClusterKey(clusterKey), 
null);
-  }
-
-  @Deprecated
-  public void addPeer(String id, String clusterKey, String tableCFs)

[1/3] hbase git commit: HBASE-11393 Replication TableCfs should be a PB object rather than a string

2016-03-28 Thread chenheng
Repository: hbase
Updated Branches:
  refs/heads/master 052009761 -> 7f39baf0f


http://git-wip-us.apache.org/repos/asf/hbase/blob/7f39baf0/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java
index 29a052b..8b7c0a5 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java
@@ -19,13 +19,9 @@
  */
 package org.apache.hadoop.hbase.replication;
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
@@ -47,7 +43,9 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
+import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.testclassification.FlakeyTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -58,6 +56,8 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import static org.junit.Assert.*;
+
 @Category({FlakeyTests.class, LargeTests.class})
 public class TestPerTableCFReplication {
 
@@ -184,13 +184,13 @@ public class TestPerTableCFReplication {
 Map tabCFsMap = null;
 
 // 1. null or empty string, result should be null
-tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig(null);
+tabCFsMap = ReplicationSerDeHelper.parseTableCFsFromConfig(null);
 assertEquals(null, tabCFsMap);
 
-tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig("");
+tabCFsMap = ReplicationSerDeHelper.parseTableCFsFromConfig("");
 assertEquals(null, tabCFsMap);
 
-tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig("   ");
+tabCFsMap = ReplicationSerDeHelper.parseTableCFsFromConfig("   ");
 assertEquals(null, tabCFsMap);
 
 TableName tab1 = TableName.valueOf("tab1");
@@ -198,20 +198,20 @@ public class TestPerTableCFReplication {
 TableName tab3 = TableName.valueOf("tab3");
 
 // 2. single table: "tab1" / "tab2:cf1" / "tab3:cf1,cf3"
-tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig("tab1");
+tabCFsMap = ReplicationSerDeHelper.parseTableCFsFromConfig("tab1");
 assertEquals(1, tabCFsMap.size()); // only one table
 assertTrue(tabCFsMap.containsKey(tab1));   // its table name is "tab1"
 assertFalse(tabCFsMap.containsKey(tab2));  // not other table
 assertEquals(null, tabCFsMap.get(tab1));   // null cf-list,
 
-tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig("tab2:cf1");
+tabCFsMap = ReplicationSerDeHelper.parseTableCFsFromConfig("tab2:cf1");
 assertEquals(1, tabCFsMap.size()); // only one table
 assertTrue(tabCFsMap.containsKey(tab2));   // its table name is "tab2"
 assertFalse(tabCFsMap.containsKey(tab1));  // not other table
 assertEquals(1, tabCFsMap.get(tab2).size());   // cf-list contains only 1 
cf
 assertEquals("cf1", tabCFsMap.get(tab2).get(0));// the only cf is "cf1"
 
-tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig("tab3 : cf1 , cf3");
+tabCFsMap = ReplicationSerDeHelper.parseTableCFsFromConfig("tab3 : cf1 , 
cf3");
 assertEquals(1, tabCFsMap.size()); // only one table
 assertTrue(tabCFsMap.containsKey(tab3));   // its table name is "tab2"
 assertFalse(tabCFsMap.containsKey(tab1));  // not other table
@@ -220,7 +220,7 @@ public class TestPerTableCFReplication {
 assertTrue(tabCFsMap.get(tab3).contains("cf3"));// contains "cf3"
 
 // 3. multiple tables: "tab1 ; tab2:cf1 ; tab3:cf1,cf3"
-tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig("tab1 ; tab2:cf1 ; 
tab3:cf1,cf3");
+tabCFsMap = ReplicationSerDeHelper.parseTableCFsFromConfig("tab1 ; 
tab2:cf1 ; tab3:cf1,cf3");
 // 3.1 contains 3 tables : "tab1", "tab2" and "tab3"
 assertEquals(3, tabCFsMap.size());
 assertTrue(tabCFsMap.containsKey(tab1));
@@ -238,7 +238,7 @@ public class TestPerTableCFReplication {
 
 // 4. contiguous or additional ";"(table delimiter) or ","(cf delimiter) 
can be tolerated
 // still use the example of multiple tables: "tab1 ; tab2:cf1 ; 

  1   2   >