hbase git commit: HBASE-18555: Remove redundant familyMap.put() from addxxx() of sub-classes of Mutation and Query

2017-08-11 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-2 b5d4a0aa4 -> 8cebf7f1a


HBASE-18555: Remove redundant familyMap.put() from addxxx() of sub-classes of 
Mutation and Query

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8cebf7f1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8cebf7f1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8cebf7f1

Branch: refs/heads/branch-2
Commit: 8cebf7f1a818a419ef32a48a24c3b5df27dd980d
Parents: b5d4a0a
Author: Xiang Li 
Authored: Fri Aug 11 00:07:11 2017 +0800
Committer: Jerry He 
Committed: Fri Aug 11 22:53:37 2017 -0700

--
 .../main/java/org/apache/hadoop/hbase/client/Append.java  |  2 +-
 .../main/java/org/apache/hadoop/hbase/client/Delete.java  | 10 +-
 .../src/main/java/org/apache/hadoop/hbase/client/Get.java |  2 +-
 .../java/org/apache/hadoop/hbase/client/Increment.java|  2 --
 .../java/org/apache/hadoop/hbase/client/Mutation.java |  1 +
 .../src/main/java/org/apache/hadoop/hbase/client/Put.java |  7 ---
 .../main/java/org/apache/hadoop/hbase/client/Scan.java|  2 +-
 7 files changed, 9 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8cebf7f1/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
index 02ec770..2bd0860 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
@@ -137,10 +137,10 @@ public class Append extends Mutation {
 List list = this.familyMap.get(family);
 if (list == null) {
   list  = new ArrayList<>(1);
+  this.familyMap.put(family, list);
 }
 // find where the new entry should be placed in the List
 list.add(cell);
-this.familyMap.put(family, list);
 return this;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/8cebf7f1/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
index 395c277..bf5241c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
@@ -183,9 +183,9 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if (list == null) {
   list = new ArrayList<>(1);
+  familyMap.put(family, list);
 }
 list.add(kv);
-familyMap.put(family, list);
 return this;
   }
 
@@ -219,12 +219,12 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if(list == null) {
   list = new ArrayList<>(1);
+  familyMap.put(family, list);
 } else if(!list.isEmpty()) {
   list.clear();
 }
 KeyValue kv = new KeyValue(row, family, null, timestamp, 
KeyValue.Type.DeleteFamily);
 list.add(kv);
-familyMap.put(family, list);
 return this;
   }
 
@@ -239,10 +239,10 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if(list == null) {
   list = new ArrayList<>(1);
+  familyMap.put(family, list);
 }
 list.add(new KeyValue(row, family, null, timestamp,
   KeyValue.Type.DeleteFamilyVersion));
-familyMap.put(family, list);
 return this;
   }
 
@@ -272,10 +272,10 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if (list == null) {
   list = new ArrayList<>(1);
+  familyMap.put(family, list);
 }
 list.add(new KeyValue(this.row, family, qualifier, timestamp,
 KeyValue.Type.DeleteColumn));
-familyMap.put(family, list);
 return this;
   }
 
@@ -307,10 +307,10 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if(list == null) {
   list = new ArrayList<>(1);
+  familyMap.put(family, list);
 }
 KeyValue kv = new KeyValue(this.row, family, qualifier, timestamp, 
KeyValue.Type.Delete);
 list.add(kv);
-familyMap.put(family, list);
 return this;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/8cebf7f1/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
--
diff 

hbase git commit: HBASE-18555: Remove redundant familyMap.put() from addxxx() of sub-classes of Mutation and Query

2017-08-11 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master 95e883967 -> 173dce734


HBASE-18555: Remove redundant familyMap.put() from addxxx() of sub-classes of 
Mutation and Query

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/173dce73
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/173dce73
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/173dce73

Branch: refs/heads/master
Commit: 173dce73471da005fb6780a7e7b65b43bad481e2
Parents: 95e8839
Author: Xiang Li 
Authored: Fri Aug 11 00:07:11 2017 +0800
Committer: Jerry He 
Committed: Fri Aug 11 22:49:38 2017 -0700

--
 .../main/java/org/apache/hadoop/hbase/client/Append.java  |  2 +-
 .../main/java/org/apache/hadoop/hbase/client/Delete.java  | 10 +-
 .../src/main/java/org/apache/hadoop/hbase/client/Get.java |  2 +-
 .../java/org/apache/hadoop/hbase/client/Increment.java|  2 --
 .../java/org/apache/hadoop/hbase/client/Mutation.java |  1 +
 .../src/main/java/org/apache/hadoop/hbase/client/Put.java |  7 ---
 .../main/java/org/apache/hadoop/hbase/client/Scan.java|  2 +-
 7 files changed, 9 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/173dce73/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
index 02ec770..2bd0860 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
@@ -137,10 +137,10 @@ public class Append extends Mutation {
 List list = this.familyMap.get(family);
 if (list == null) {
   list  = new ArrayList<>(1);
+  this.familyMap.put(family, list);
 }
 // find where the new entry should be placed in the List
 list.add(cell);
-this.familyMap.put(family, list);
 return this;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/173dce73/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
index 395c277..bf5241c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
@@ -183,9 +183,9 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if (list == null) {
   list = new ArrayList<>(1);
+  familyMap.put(family, list);
 }
 list.add(kv);
-familyMap.put(family, list);
 return this;
   }
 
@@ -219,12 +219,12 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if(list == null) {
   list = new ArrayList<>(1);
+  familyMap.put(family, list);
 } else if(!list.isEmpty()) {
   list.clear();
 }
 KeyValue kv = new KeyValue(row, family, null, timestamp, 
KeyValue.Type.DeleteFamily);
 list.add(kv);
-familyMap.put(family, list);
 return this;
   }
 
@@ -239,10 +239,10 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if(list == null) {
   list = new ArrayList<>(1);
+  familyMap.put(family, list);
 }
 list.add(new KeyValue(row, family, null, timestamp,
   KeyValue.Type.DeleteFamilyVersion));
-familyMap.put(family, list);
 return this;
   }
 
@@ -272,10 +272,10 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if (list == null) {
   list = new ArrayList<>(1);
+  familyMap.put(family, list);
 }
 list.add(new KeyValue(this.row, family, qualifier, timestamp,
 KeyValue.Type.DeleteColumn));
-familyMap.put(family, list);
 return this;
   }
 
@@ -307,10 +307,10 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if(list == null) {
   list = new ArrayList<>(1);
+  familyMap.put(family, list);
 }
 KeyValue kv = new KeyValue(this.row, family, qualifier, timestamp, 
KeyValue.Type.Delete);
 list.add(kv);
-familyMap.put(family, list);
 return this;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/173dce73/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
--
diff --git 

hbase git commit: HBASE-18557: Change splitable to mergeable in MergeTableRegionsProcedure

2017-08-11 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master aa8f67a14 -> 95e883967


HBASE-18557: Change splitable to mergeable in MergeTableRegionsProcedure

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/95e88396
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/95e88396
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/95e88396

Branch: refs/heads/master
Commit: 95e883967cbb383b48d8fae548fb55b88c7f0529
Parents: aa8f67a
Author: Yi Liang 
Authored: Thu Aug 10 11:15:59 2017 -0700
Committer: Jerry He 
Committed: Fri Aug 11 22:45:22 2017 -0700

--
 .../hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/95e88396/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index 74d9b75..9aaf297 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
@@ -513,7 +513,7 @@ public class MergeTableRegionsProcedure
   throws IOException {
 GetRegionInfoResponse response =
   Util.getRegionInfoResponse(env, rs.getServerName(), rs.getRegion());
-return response.hasSplittable() && response.getSplittable();
+return response.hasMergeable() && response.getMergeable();
   }
 
   /**



hbase git commit: HBASE-18576. [C++] Add ping for RPC test

2017-08-11 Thread enis
Repository: hbase
Updated Branches:
  refs/heads/HBASE-14850 e2a1cad3e -> aff0336ec


HBASE-18576. [C++] Add ping for RPC test

Signed-off-by: Enis Soztutar 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/aff0336e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/aff0336e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/aff0336e

Branch: refs/heads/HBASE-14850
Commit: aff0336ec339d6b8b61d6bfbdff4df68ed7668e9
Parents: e2a1cad
Author: Xiaobing Zhou 
Authored: Fri Aug 11 15:02:58 2017 -0700
Committer: Enis Soztutar 
Committed: Fri Aug 11 16:38:26 2017 -0700

--
 .../connection/rpc-test-server.cc   |  3 ++
 hbase-native-client/connection/rpc-test.cc  | 30 
 2 files changed, 33 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/aff0336e/hbase-native-client/connection/rpc-test-server.cc
--
diff --git a/hbase-native-client/connection/rpc-test-server.cc 
b/hbase-native-client/connection/rpc-test-server.cc
index f350d6a..6132fbb 100644
--- a/hbase-native-client/connection/rpc-test-server.cc
+++ b/hbase-native-client/connection/rpc-test-server.cc
@@ -68,6 +68,9 @@ Future 
RpcTestService::operator()(std::unique_ptr();
 response->set_resp_msg(pb_resp_msg);
+VLOG(1) << "RPC server:"
+<< " ping called.";
+
   } else if (method_name == "echo") {
 auto pb_resp_msg = std::make_shared();
 /* get msg from client */

http://git-wip-us.apache.org/repos/asf/hbase/blob/aff0336e/hbase-native-client/connection/rpc-test.cc
--
diff --git a/hbase-native-client/connection/rpc-test.cc 
b/hbase-native-client/connection/rpc-test.cc
index e7f678d..4688950 100644
--- a/hbase-native-client/connection/rpc-test.cc
+++ b/hbase-native-client/connection/rpc-test.cc
@@ -88,6 +88,36 @@ std::shared_ptr 
CreateRpcClient(std::shared_ptr conf,
 }
 
 /**
+* test ping
+*/
+TEST_F(RpcTest, Ping) {
+  auto conf = CreateConf();
+  auto server = CreateRpcServer();
+  auto server_addr = GetRpcServerAddress(server);
+  auto client = CreateRpcClient(conf);
+
+  auto method = "ping";
+  auto request = 
std::make_unique(std::make_shared(),
+   
std::make_shared(), method);
+
+  /* sending out request */
+  client
+  ->AsyncCall(server_addr->getAddressStr(), server_addr->getPort(), 
std::move(request),
+  hbase::security::User::defaultUser())
+  .then([&](std::unique_ptr response) {
+auto pb_resp = 
std::static_pointer_cast(response->resp_msg());
+EXPECT_TRUE(pb_resp != nullptr);
+VLOG(1) << folly::sformat(FLAGS_result_format, method, "");
+  })
+  .onError([&](const folly::exception_wrapper& ew) {
+FAIL() << folly::sformat(FLAGS_fail_format, method);
+  });
+
+  server->stop();
+  server->join();
+}
+
+/**
  * test echo
  */
 TEST_F(RpcTest, Echo) {



hbase git commit: HBASE-18526 FIFOCompactionPolicy pre-check uses wrong scope (Vladimir Rodionov)

2017-08-11 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-2 7197b40cb -> b5d4a0aa4


HBASE-18526 FIFOCompactionPolicy pre-check uses wrong scope (Vladimir Rodionov)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b5d4a0aa
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b5d4a0aa
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b5d4a0aa

Branch: refs/heads/branch-2
Commit: b5d4a0aa412c5cd086d03c99f35c9bf66958cc79
Parents: 7197b40
Author: tedyu 
Authored: Fri Aug 11 16:42:29 2017 -0700
Committer: tedyu 
Committed: Fri Aug 11 16:42:29 2017 -0700

--
 .../org/apache/hadoop/hbase/master/HMaster.java   | 18 +-
 1 file changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b5d4a0aa/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 96bf859..421ae8b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -53,7 +53,6 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
@@ -79,6 +78,7 @@ import org.apache.hadoop.hbase.UnknownRegionException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.coprocessor.BypassCoprocessorException;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
@@ -164,6 +164,9 @@ import 
org.apache.hadoop.hbase.replication.master.TableCFsUpdater;
 import org.apache.hadoop.hbase.replication.regionserver.Replication;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.security.UserProvider;
+import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo;
@@ -200,9 +203,6 @@ import org.eclipse.jetty.server.ServerConnector;
 import org.eclipse.jetty.servlet.ServletHolder;
 import org.eclipse.jetty.webapp.WebAppContext;
 
-import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
 import com.google.protobuf.Descriptors;
 import com.google.protobuf.Service;
 
@@ -1937,14 +1937,14 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   }
 
   // 3. blocking file count
-  String sbfc = htd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY);
-  if (sbfc != null) {
-blockingFileCount = Integer.parseInt(sbfc);
+  sv = hcd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY);
+  if (sv != null) {
+blockingFileCount = Integer.parseInt(sv);
   }
   if (blockingFileCount < 1000) {
 message =
-"blocking file count '" + HStore.BLOCKING_STOREFILES_KEY + "' " + 
blockingFileCount
-+ " is below recommended minimum of 1000";
+"Blocking file count '" + HStore.BLOCKING_STOREFILES_KEY + "' " + 
blockingFileCount
++ " is below recommended minimum of 1000 for column family "+ 
hcd.getNameAsString();
 throw new IOException(message);
   }
 }



hbase git commit: HBASE-18526 FIFOCompactionPolicy pre-check uses wrong scope (Vladimir Rodionov)

2017-08-11 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master 1070888ff -> aa8f67a14


HBASE-18526 FIFOCompactionPolicy pre-check uses wrong scope (Vladimir Rodionov)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/aa8f67a1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/aa8f67a1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/aa8f67a1

Branch: refs/heads/master
Commit: aa8f67a148cbefbfc4bfdc25b2dc48c7ed947212
Parents: 1070888
Author: tedyu 
Authored: Fri Aug 11 16:41:40 2017 -0700
Committer: tedyu 
Committed: Fri Aug 11 16:41:40 2017 -0700

--
 .../org/apache/hadoop/hbase/master/HMaster.java   | 18 +-
 1 file changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/aa8f67a1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 96bf859..421ae8b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -53,7 +53,6 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
@@ -79,6 +78,7 @@ import org.apache.hadoop.hbase.UnknownRegionException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.coprocessor.BypassCoprocessorException;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
@@ -164,6 +164,9 @@ import 
org.apache.hadoop.hbase.replication.master.TableCFsUpdater;
 import org.apache.hadoop.hbase.replication.regionserver.Replication;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.security.UserProvider;
+import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo;
@@ -200,9 +203,6 @@ import org.eclipse.jetty.server.ServerConnector;
 import org.eclipse.jetty.servlet.ServletHolder;
 import org.eclipse.jetty.webapp.WebAppContext;
 
-import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
 import com.google.protobuf.Descriptors;
 import com.google.protobuf.Service;
 
@@ -1937,14 +1937,14 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   }
 
   // 3. blocking file count
-  String sbfc = htd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY);
-  if (sbfc != null) {
-blockingFileCount = Integer.parseInt(sbfc);
+  sv = hcd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY);
+  if (sv != null) {
+blockingFileCount = Integer.parseInt(sv);
   }
   if (blockingFileCount < 1000) {
 message =
-"blocking file count '" + HStore.BLOCKING_STOREFILES_KEY + "' " + 
blockingFileCount
-+ " is below recommended minimum of 1000";
+"Blocking file count '" + HStore.BLOCKING_STOREFILES_KEY + "' " + 
blockingFileCount
++ " is below recommended minimum of 1000 for column family "+ 
hcd.getNameAsString();
 throw new IOException(message);
   }
 }



hbase git commit: HBASE-18551 [AMv2] UnassignProcedure and crashed regionservers; AMENDMENT -- disable TestAM#testSocketTimeout... mock is insufficent for new processing

2017-08-11 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 b5a4e07c6 -> 7197b40cb


HBASE-18551 [AMv2] UnassignProcedure and crashed regionservers; AMENDMENT -- 
disable TestAM#testSocketTimeout... mock is insufficent for new processing


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7197b40c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7197b40c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7197b40c

Branch: refs/heads/branch-2
Commit: 7197b40cbfe0599fa792b8152ed94761377e75e3
Parents: b5a4e07
Author: Michael Stack 
Authored: Fri Aug 11 14:20:06 2017 -0700
Committer: Michael Stack 
Committed: Fri Aug 11 14:21:07 2017 -0700

--
 .../hadoop/hbase/master/assignment/TestAssignmentManager.java | 7 +++
 1 file changed, 3 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7197b40c/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
index d18c12a..4d2a894 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
@@ -243,7 +243,7 @@ public class TestAssignmentManager {
 }
   }
 
-  @Test
+  @Ignore @Test // Disabled for now. Since HBASE-18551, this mock is 
insufficient.
   public void testSocketTimeout() throws Exception {
 final TableName tableName = TableName.valueOf(this.name.getMethodName());
 final HRegionInfo hri = createRegionInfo(tableName, 1);
@@ -254,9 +254,8 @@ public class TestAssignmentManager {
 rsDispatcher.setMockRsExecutor(new SocketTimeoutRsExecutor(20, 3));
 waitOnFuture(submitProcedure(am.createAssignProcedure(hri, false)));
 
-rsDispatcher.setMockRsExecutor(new SocketTimeoutRsExecutor(20, 3));
-
-exception.expect(ServerCrashException.class);
+rsDispatcher.setMockRsExecutor(new SocketTimeoutRsExecutor(20, 1));
+// exception.expect(ServerCrashException.class);
 waitOnFuture(submitProcedure(am.createUnassignProcedure(hri, null, 
false)));
 
 assertEquals(assignSubmittedCount + 1, 
assignProcMetrics.getSubmittedCounter().getCount());



hbase git commit: HBASE-18551 [AMv2] UnassignProcedure and crashed regionservers; AMENDMENT -- disable TestAM#testSocketTimeout... mock is insufficent for new processing

2017-08-11 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 71a9a9a94 -> 1070888ff


HBASE-18551 [AMv2] UnassignProcedure and crashed regionservers; AMENDMENT -- 
disable TestAM#testSocketTimeout... mock is insufficent for new processing


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1070888f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1070888f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1070888f

Branch: refs/heads/master
Commit: 1070888fff3a89d435018f11bfb2fd5609be8bab
Parents: 71a9a9a
Author: Michael Stack 
Authored: Fri Aug 11 14:20:06 2017 -0700
Committer: Michael Stack 
Committed: Fri Aug 11 14:20:35 2017 -0700

--
 .../hadoop/hbase/master/assignment/TestAssignmentManager.java | 7 +++
 1 file changed, 3 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1070888f/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
index d18c12a..4d2a894 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
@@ -243,7 +243,7 @@ public class TestAssignmentManager {
 }
   }
 
-  @Test
+  @Ignore @Test // Disabled for now. Since HBASE-18551, this mock is 
insufficient.
   public void testSocketTimeout() throws Exception {
 final TableName tableName = TableName.valueOf(this.name.getMethodName());
 final HRegionInfo hri = createRegionInfo(tableName, 1);
@@ -254,9 +254,8 @@ public class TestAssignmentManager {
 rsDispatcher.setMockRsExecutor(new SocketTimeoutRsExecutor(20, 3));
 waitOnFuture(submitProcedure(am.createAssignProcedure(hri, false)));
 
-rsDispatcher.setMockRsExecutor(new SocketTimeoutRsExecutor(20, 3));
-
-exception.expect(ServerCrashException.class);
+rsDispatcher.setMockRsExecutor(new SocketTimeoutRsExecutor(20, 1));
+// exception.expect(ServerCrashException.class);
 waitOnFuture(submitProcedure(am.createUnassignProcedure(hri, null, 
false)));
 
 assertEquals(assignSubmittedCount + 1, 
assignProcMetrics.getSubmittedCounter().getCount());



hbase git commit: HBASE-18557: Change splitable to mergeable in MergeTableRegionsProcedure

2017-08-11 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-2 b627cfad3 -> b5a4e07c6


HBASE-18557: Change splitable to mergeable in MergeTableRegionsProcedure

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b5a4e07c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b5a4e07c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b5a4e07c

Branch: refs/heads/branch-2
Commit: b5a4e07c6a521b13146daccb7a9d502317fde427
Parents: b627cfa
Author: Yi Liang 
Authored: Thu Aug 10 11:15:59 2017 -0700
Committer: Jerry He 
Committed: Fri Aug 11 13:29:13 2017 -0700

--
 .../hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b5a4e07c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index 74d9b75..9aaf297 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
@@ -513,7 +513,7 @@ public class MergeTableRegionsProcedure
   throws IOException {
 GetRegionInfoResponse response =
   Util.getRegionInfoResponse(env, rs.getServerName(), rs.getRegion());
-return response.hasSplittable() && response.getSplittable();
+return response.hasMergeable() && response.getMergeable();
   }
 
   /**



[2/6] hbase git commit: HBASE-18479 should apply HBASE-18255 to HBASE_MASTER_OPTS too

2017-08-11 Thread apurtell
HBASE-18479 should apply HBASE-18255 to HBASE_MASTER_OPTS too

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1ce51032
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1ce51032
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1ce51032

Branch: refs/heads/branch-1.4
Commit: 1ce510322b712f3f5b6c2239a85f6df5dffcac91
Parents: 8ca1bf9
Author: chenyechao 
Authored: Sun Jul 30 14:07:38 2017 +0800
Committer: Andrew Purtell 
Committed: Fri Aug 11 13:08:28 2017 -0700

--
 conf/hbase-env.cmd | 2 +-
 conf/hbase-env.sh  | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1ce51032/conf/hbase-env.cmd
--
diff --git a/conf/hbase-env.cmd b/conf/hbase-env.cmd
index 2ffadbf..b10e934 100644
--- a/conf/hbase-env.cmd
+++ b/conf/hbase-env.cmd
@@ -43,7 +43,7 @@
 set HBASE_OPTS="-XX:+UseConcMarkSweepGC" "-Djava.net.preferIPv4Stack=true"
 
 @rem Configure PermSize. Only needed in JDK7. You can safely remove it for 
JDK8+
-set HBASE_MASTER_OPTS=%HBASE_MASTER_OPTS% "-XX:PermSize=128m" 
"-XX:MaxPermSize=128m"
+set HBASE_MASTER_OPTS=%HBASE_MASTER_OPTS% "-XX:PermSize=128m" 
"-XX:MaxPermSize=128m" "-XX:ReservedCodeCacheSize=256m"
 set HBASE_REGIONSERVER_OPTS=%HBASE_REGIONSERVER_OPTS% "-XX:PermSize=128m" 
"-XX:MaxPermSize=128m" "-XX:ReservedCodeCacheSize=256m"
 
 @rem Uncomment below to enable java garbage collection logging for the 
server-side processes

http://git-wip-us.apache.org/repos/asf/hbase/blob/1ce51032/conf/hbase-env.sh
--
diff --git a/conf/hbase-env.sh b/conf/hbase-env.sh
index 599a2f1..f2195da 100644
--- a/conf/hbase-env.sh
+++ b/conf/hbase-env.sh
@@ -43,7 +43,7 @@
 export HBASE_OPTS="-XX:+UseConcMarkSweepGC"
 
 # Configure PermSize. Only needed in JDK7. You can safely remove it for JDK8+
-export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -XX:PermSize=128m 
-XX:MaxPermSize=128m"
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -XX:PermSize=128m 
-XX:MaxPermSize=128m -XX:ReservedCodeCacheSize=256m"
 export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -XX:PermSize=128m 
-XX:MaxPermSize=128m -XX:ReservedCodeCacheSize=256m"
 
 # Uncomment one of the below three options to enable java garbage collection 
logging for the server-side processes.



[4/6] hbase git commit: HBASE-18398: Snapshot operation fails with FileNotFoundException

2017-08-11 Thread apurtell
HBASE-18398: Snapshot operation fails with FileNotFoundException


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e894e875
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e894e875
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e894e875

Branch: refs/heads/branch-1.4
Commit: e894e8753ce770a73140a3549d0e90d8701f19b1
Parents: a048e6e
Author: Ashu Pachauri 
Authored: Mon Aug 7 18:10:33 2017 -0700
Committer: Andrew Purtell 
Committed: Fri Aug 11 13:08:28 2017 -0700

--
 .../hadoop/hbase/regionserver/HRegion.java  |  29 ++-
 .../hadoop/hbase/regionserver/HStore.java   |  16 ++
 .../hadoop/hbase/regionserver/Region.java   |   9 +-
 .../snapshot/FlushSnapshotSubprocedure.java |  31 ++-
 .../hadoop/hbase/snapshot/SnapshotManifest.java |  22 +-
 .../hbase/snapshot/TestRegionSnapshotTask.java  | 205 +++
 6 files changed, 289 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e894e875/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index d6ad5a4..dfb7b71 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -8497,11 +8497,12 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 case DELETE:
 case BATCH_MUTATE:
 case COMPACT_REGION:
-  // when a region is in recovering state, no read, split or merge is 
allowed
+case SNAPSHOT:
+  // when a region is in recovering state, no read, split, merge or 
snapshot is allowed
   if (isRecovering() && (this.disallowWritesInRecovering ||
-  (op != Operation.PUT && op != Operation.DELETE && op != 
Operation.BATCH_MUTATE))) {
+  (op != Operation.PUT && op != Operation.DELETE && op != 
Operation.BATCH_MUTATE))) {
 throw new 
RegionInRecoveryException(getRegionInfo().getRegionNameAsString() +
-  " is recovering; cannot take reads");
+" is recovering; cannot take reads");
   }
   break;
 default:
@@ -8521,6 +8522,15 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   lock.readLock().unlock();
   throw new 
NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is 
closed");
 }
+// The unit for snapshot is a region. So, all stores for this region must 
be
+// prepared for snapshot operation before proceeding.
+if (op == Operation.SNAPSHOT) {
+  for (Store store : stores.values()) {
+if (store instanceof HStore) {
+  ((HStore)store).preSnapshotOperation();
+}
+  }
+}
 try {
   if (coprocessorHost != null) {
 coprocessorHost.postStartRegionOperation(op);
@@ -8536,12 +8546,15 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 closeRegionOperation(Operation.ANY);
   }
 
-  /**
-   * Closes the lock. This needs to be called in the finally block 
corresponding
-   * to the try block of {@link #startRegionOperation(Operation)}
-   * @throws IOException
-   */
+  @Override
   public void closeRegionOperation(Operation operation) throws IOException {
+if (operation == Operation.SNAPSHOT) {
+  for (Store store: stores.values()) {
+if (store instanceof HStore) {
+  ((HStore)store).postSnapshotOperation();
+}
+  }
+}
 lock.readLock().unlock();
 if (coprocessorHost != null) {
   coprocessorHost.postCloseRegionOperation(operation);

http://git-wip-us.apache.org/repos/asf/hbase/blob/e894e875/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index c211736..de95aeb 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -2682,6 +2682,22 @@ public class HStore implements Store {
   return getRegionInfo().getReplicaId() == 
HRegionInfo.DEFAULT_REPLICA_ID;
   }
 
+  /**
+   * Sets the store up for a region level snapshot operation.
+   * @see #postSnapshotOperation()
+   */
+  public void preSnapshotOperation() {
+archiveLock.lock();
+  }
+
+  /**
+   * Perform tasks needed 

[6/6] hbase git commit: HBASE-18024 HRegion#initializeRegionInternals should not re-create .hregioninfo file when the region directory no longer exists

2017-08-11 Thread apurtell
HBASE-18024 HRegion#initializeRegionInternals should not re-create .hregioninfo 
file when the region directory no longer exists


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8ca1bf96
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8ca1bf96
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8ca1bf96

Branch: refs/heads/branch-1.4
Commit: 8ca1bf96f4b3ca5ad3ed05e6f5ed2a2ad30d8bbb
Parents: e894e87
Author: Esteban Gutierrez 
Authored: Fri Jul 21 13:13:00 2017 -0500
Committer: Andrew Purtell 
Committed: Fri Aug 11 13:08:28 2017 -0700

--
 .../hadoop/hbase/regionserver/HRegion.java  | 11 +++-
 .../hbase/regionserver/HRegionFileSystem.java   | 31 +--
 .../hadoop/hbase/regionserver/TestHRegion.java  |  7 ++-
 .../hbase/regionserver/TestRegionOpen.java  | 56 +++-
 .../TestStoreFileRefresherChore.java|  2 +
 5 files changed, 99 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8ca1bf96/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index dfb7b71..1fac683 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -902,8 +902,15 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 }
 
 // Write HRI to a file in case we need to recover hbase:meta
-status.setStatus("Writing region info on filesystem");
-fs.checkRegionInfoOnFilesystem();
+// Only the primary replica should write .regioninfo
+if (this.getRegionInfo().getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) 
{
+  status.setStatus("Writing region info on filesystem");
+  fs.checkRegionInfoOnFilesystem();
+} else {
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Skipping creation of .regioninfo file for " + 
this.getRegionInfo());
+  }
+}
 
 // Initialize all the HStores
 status.setStatus("Initializing all the Stores");

http://git-wip-us.apache.org/repos/asf/hbase/blob/8ca1bf96/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
index 33c03ca..3a0b30a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
@@ -858,9 +858,19 @@ public class HRegionFileSystem {
 // only should be sufficient. I don't want to read the file every time to 
check if it pb
 // serialized.
 byte[] content = getRegionInfoFileContent(regionInfoForFs);
+
+// Verify if the region directory exists before opening a region. We need 
to do this since if
+// the region directory doesn't exist we will re-create the region 
directory and a new HRI
+// when HRegion.openHRegion() is called.
 try {
-  Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
+  FileStatus status = fs.getFileStatus(getRegionDir());
+} catch (FileNotFoundException e) {
+  LOG.warn(getRegionDir() + " doesn't exist for region: " + 
regionInfoForFs.getEncodedName() +
+  " on table " + regionInfo.getTable());
+}
 
+try {
+  Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
   FileStatus status = fs.getFileStatus(regionInfoFile);
   if (status != null && status.getLen() == content.length) {
 // Then assume the content good and move on.
@@ -953,7 +963,13 @@ public class HRegionFileSystem {
 }
 
 // Write HRI to a file in case we need to recover hbase:meta
-regionFs.writeRegionInfoOnFilesystem(false);
+// Only primary replicas should write region info
+if (regionInfo.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
+  regionFs.writeRegionInfoOnFilesystem(false);
+} else {
+  if (LOG.isDebugEnabled())
+LOG.debug("Skipping creation of .regioninfo file for " + regionInfo);
+}
 return regionFs;
   }
 
@@ -983,8 +999,15 @@ public class HRegionFileSystem {
   regionFs.cleanupSplitsDir();
   regionFs.cleanupMergesDir();
 
-  // if it doesn't exists, Write HRI to a file, in case we need to recover 
hbase:meta
-  

[3/6] hbase git commit: HBASE-18248 Warn if monitored RPC task has been tied up beyond a configurable threshold

2017-08-11 Thread apurtell
HBASE-18248 Warn if monitored RPC task has been tied up beyond a configurable 
threshold


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a048e6ed
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a048e6ed
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a048e6ed

Branch: refs/heads/branch-1.4
Commit: a048e6ed53b19573499a9ef81a531ed2e8f6b0dc
Parents: 8d826b8
Author: Andrew Purtell 
Authored: Wed Aug 9 18:11:28 2017 -0700
Committer: Andrew Purtell 
Committed: Fri Aug 11 13:08:28 2017 -0700

--
 .../monitoring/MonitoredRPCHandlerImpl.java |  8 +-
 .../hadoop/hbase/monitoring/MonitoredTask.java  |  2 +
 .../hbase/monitoring/MonitoredTaskImpl.java | 16 +++-
 .../hadoop/hbase/monitoring/TaskMonitor.java| 88 +---
 .../hbase/monitoring/TestTaskMonitor.java   | 44 +++---
 5 files changed, 130 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a048e6ed/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java
index a29595b..08c8c9f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredRPCHandlerImpl.java
@@ -251,6 +251,12 @@ public class MonitoredRPCHandlerImpl extends 
MonitoredTaskImpl
 if (getState() != State.RUNNING) {
   return super.toString();
 }
-return super.toString() + ", rpcMethod=" + getRPC();
+return super.toString()
++ ", queuetimems=" + getRPCQueueTime()
++ ", starttimems=" + getRPCStartTime()
++ ", clientaddress=" + clientAddress
++ ", remoteport=" + remotePort
++ ", packetlength=" + getRPCPacketLength()
++ ", rpcMethod=" + getRPC();
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a048e6ed/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTask.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTask.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTask.java
index ff3667b..48fba1b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTask.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTask.java
@@ -39,6 +39,7 @@ public interface MonitoredTask extends Cloneable {
   State getState();
   long getStateTime();
   long getCompletionTimestamp();
+  long getWarnTime();
 
   void markComplete(String msg);
   void pause(String msg);
@@ -48,6 +49,7 @@ public interface MonitoredTask extends Cloneable {
 
   void setStatus(String status);
   void setDescription(String description);
+  void setWarnTime(final long t);
 
   /**
* Explicitly mark this status as able to be cleaned up,

http://git-wip-us.apache.org/repos/asf/hbase/blob/a048e6ed/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java
index 27aaceb..0cee4c8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java
@@ -30,7 +30,8 @@ class MonitoredTaskImpl implements MonitoredTask {
   private long startTime;
   private long statusTime;
   private long stateTime;
-  
+  private long warnTime;
+
   private volatile String status;
   private volatile String description;
   
@@ -42,6 +43,7 @@ class MonitoredTaskImpl implements MonitoredTask {
 startTime = System.currentTimeMillis();
 statusTime = startTime;
 stateTime = startTime;
+warnTime = startTime;
   }
 
   @Override
@@ -82,7 +84,12 @@ class MonitoredTaskImpl implements MonitoredTask {
   public long getStateTime() {
 return stateTime;
   }
-  
+
+  @Override
+  public long getWarnTime() {
+return warnTime;
+  }
+
   @Override
   public long getCompletionTimestamp() {
 if (state == State.COMPLETE || state == State.ABORTED) {
@@ -132,6 +139,11 @@ class MonitoredTaskImpl implements MonitoredTask {
   }
 
   @Override
+  public void setWarnTime(long t) {
+this.warnTime = t;
+  }
+
+  @Override
   

[1/6] hbase git commit: HBASE-18025 CatalogJanitor should collect outdated RegionStates from the AM [Forced Update!]

2017-08-11 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 b5ae0ffca -> aaece0ba5 (forced update)


HBASE-18025 CatalogJanitor should collect outdated RegionStates from the AM


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/aaece0ba
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/aaece0ba
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/aaece0ba

Branch: refs/heads/branch-1.4
Commit: aaece0ba5e399f248c8255fe509cdb1a862bf299
Parents: 3b9c58b
Author: Esteban Gutierrez 
Authored: Fri Aug 11 12:56:20 2017 -0500
Committer: Andrew Purtell 
Committed: Fri Aug 11 13:08:28 2017 -0700

--
 .../hadoop/hbase/master/CatalogJanitor.java |   4 +
 .../hadoop/hbase/master/RegionStates.java   |   6 +
 .../hadoop/hbase/master/ServerManager.java  |   7 +
 .../TestCatalogJanitorInMemoryStates.java   | 188 +++
 4 files changed, 205 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/aaece0ba/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index 17644eb..00dc4a5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -217,6 +217,8 @@ public class CatalogJanitor extends ScheduledChore {
   HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, 
regionA);
   HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, 
regionB);
   MetaTableAccessor.deleteMergeQualifiers(services.getConnection(), 
mergedRegion);
+  services.getAssignmentManager().getRegionStates().deleteRegion(regionA);
+  services.getAssignmentManager().getRegionStates().deleteRegion(regionB);
   services.getServerManager().removeRegion(regionA);
   services.getServerManager().removeRegion(regionB);
   return true;
@@ -361,6 +363,8 @@ public class CatalogJanitor extends ScheduledChore {
   if (LOG.isTraceEnabled()) LOG.trace("Archiving parent region: " + 
parent);
   HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, 
parent);
   MetaTableAccessor.deleteRegion(this.connection, parent);
+  if (services.getAssignmentManager().getRegionStates() != null)
+services.getAssignmentManager().getRegionStates().deleteRegion(parent);
   services.getServerManager().removeRegion(parent);
   result = true;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/aaece0ba/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
index 082b5cc..599e649 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
@@ -899,6 +899,12 @@ public class RegionStates {
 }
   }
 
+  @VisibleForTesting
+  public boolean isRegionInRegionStates(final HRegionInfo hri) {
+return (getRegionState(hri) != null || isRegionOnline(hri)) || 
isRegionInTransition(hri)
+|| isRegionInState(hri, State.OFFLINE, State.CLOSED);
+ }
+
   /**
* Checking if a region was assigned to a server which is not online now.
* If so, we should hold re-assign this region till SSH has split its wals.

http://git-wip-us.apache.org/repos/asf/hbase/blob/aaece0ba/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
index 93e532b..040342f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -1311,6 +1311,13 @@ public class ServerManager {
 flushedSequenceIdByRegion.remove(encodedName);
   }
 
+  @VisibleForTesting
+  public boolean isRegionInServerManagerStates(final HRegionInfo hri) {
+final byte[] encodedName = hri.getEncodedNameAsBytes();
+return (storeFlushedSequenceIdsByRegion.containsKey(encodedName)
+|| flushedSequenceIdByRegion.containsKey(encodedName));
+  }
+
   /**
* Called by delete table and similar to notify the 

[5/6] hbase git commit: HBASE-18197 Avoided to call job.waitForCompletion(true) two times

2017-08-11 Thread apurtell
HBASE-18197 Avoided to call job.waitForCompletion(true) two times

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3b9c58be
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3b9c58be
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3b9c58be

Branch: refs/heads/branch-1.4
Commit: 3b9c58be46e6eec4993650b27e667e5cda11742a
Parents: 1ce5103
Author: Jan Hentschel 
Authored: Sat Jun 10 22:17:00 2017 +0200
Committer: Andrew Purtell 
Committed: Fri Aug 11 13:08:28 2017 -0700

--
 .../src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3b9c58be/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
index d315b81..8a80d15 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
@@ -750,6 +750,6 @@ public class Import {
   }
 }
 
-System.exit(job.waitForCompletion(true) ? 0 : 1);
+System.exit(isJobSuccessful ? 0 : 1);
   }
 }



hbase git commit: HBASE-18025 CatalogJanitor should collect outdated RegionStates from the AM

2017-08-11 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 226d8026b -> b5ae0ffca


HBASE-18025 CatalogJanitor should collect outdated RegionStates from the AM


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b5ae0ffc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b5ae0ffc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b5ae0ffc

Branch: refs/heads/branch-1.4
Commit: b5ae0ffca593677c0bbf1916bd6b377d4a703dbd
Parents: 226d802
Author: Esteban Gutierrez 
Authored: Fri Aug 11 12:56:20 2017 -0500
Committer: Andrew Purtell 
Committed: Fri Aug 11 13:06:31 2017 -0700

--
 .../hadoop/hbase/master/CatalogJanitor.java |   4 +
 .../hadoop/hbase/master/RegionStates.java   |   6 +
 .../hadoop/hbase/master/ServerManager.java  |   7 +
 .../TestCatalogJanitorInMemoryStates.java   | 188 +++
 4 files changed, 205 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b5ae0ffc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index 17644eb..00dc4a5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -217,6 +217,8 @@ public class CatalogJanitor extends ScheduledChore {
   HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, 
regionA);
   HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, 
regionB);
   MetaTableAccessor.deleteMergeQualifiers(services.getConnection(), 
mergedRegion);
+  services.getAssignmentManager().getRegionStates().deleteRegion(regionA);
+  services.getAssignmentManager().getRegionStates().deleteRegion(regionB);
   services.getServerManager().removeRegion(regionA);
   services.getServerManager().removeRegion(regionB);
   return true;
@@ -361,6 +363,8 @@ public class CatalogJanitor extends ScheduledChore {
   if (LOG.isTraceEnabled()) LOG.trace("Archiving parent region: " + 
parent);
   HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, 
parent);
   MetaTableAccessor.deleteRegion(this.connection, parent);
+  if (services.getAssignmentManager().getRegionStates() != null)
+services.getAssignmentManager().getRegionStates().deleteRegion(parent);
   services.getServerManager().removeRegion(parent);
   result = true;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5ae0ffc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
index 082b5cc..599e649 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
@@ -899,6 +899,12 @@ public class RegionStates {
 }
   }
 
+  @VisibleForTesting
+  public boolean isRegionInRegionStates(final HRegionInfo hri) {
+return (getRegionState(hri) != null || isRegionOnline(hri)) || 
isRegionInTransition(hri)
+|| isRegionInState(hri, State.OFFLINE, State.CLOSED);
+ }
+
   /**
* Checking if a region was assigned to a server which is not online now.
* If so, we should hold re-assign this region till SSH has split its wals.

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5ae0ffc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
index 93e532b..040342f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -1311,6 +1311,13 @@ public class ServerManager {
 flushedSequenceIdByRegion.remove(encodedName);
   }
 
+  @VisibleForTesting
+  public boolean isRegionInServerManagerStates(final HRegionInfo hri) {
+final byte[] encodedName = hri.getEncodedNameAsBytes();
+return (storeFlushedSequenceIdsByRegion.containsKey(encodedName)
+|| flushedSequenceIdByRegion.containsKey(encodedName));
+  }
+
   /**
* Called by delete table and similar to notify the ServerManager that a 

hbase git commit: HBASE-18025 CatalogJanitor should collect outdated RegionStates from the AM

2017-08-11 Thread esteban
Repository: hbase
Updated Branches:
  refs/heads/branch-2 5940f4224 -> b627cfad3


HBASE-18025 CatalogJanitor should collect outdated RegionStates from the AM


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b627cfad
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b627cfad
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b627cfad

Branch: refs/heads/branch-2
Commit: b627cfad35bb7d925506d043f62ff69b0d57869d
Parents: 5940f42
Author: Esteban Gutierrez 
Authored: Fri Jul 21 14:13:13 2017 -0500
Committer: Esteban Gutierrez 
Committed: Fri Aug 11 14:42:22 2017 -0500

--
 .../hadoop/hbase/master/CatalogJanitor.java |  13 +-
 .../hadoop/hbase/master/ServerManager.java  |   7 +
 .../hbase/master/assignment/RegionStates.java   |   6 +
 .../TestCatalogJanitorInMemoryStates.java   | 185 +++
 4 files changed, 209 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b627cfad/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index ba92c76..8daa7db 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -221,6 +221,11 @@ public class CatalogJanitor extends ScheduledChore {
   ProcedureExecutor pe = 
this.services.getMasterProcedureExecutor();
   pe.submitProcedure(new GCMergedRegionsProcedure(pe.getEnvironment(),
   mergedRegion, regionA, regionB));
+  // Remove from in-memory states
+  
this.services.getAssignmentManager().getRegionStates().deleteRegion(regionA);
+  
this.services.getAssignmentManager().getRegionStates().deleteRegion(regionB);
+  this.services.getServerManager().removeRegion(regionA);
+  this.services.getServerManager().removeRegion(regionB);
   return true;
 }
 return false;
@@ -234,6 +239,7 @@ public class CatalogJanitor extends ScheduledChore {
*/
   int scan() throws IOException {
 int result = 0;
+
 try {
   if (!alreadyRunning.compareAndSet(false, true)) {
 LOG.debug("CatalogJanitor already running");
@@ -281,8 +287,8 @@ public class CatalogJanitor extends ScheduledChore {
 }
 
 if (!parentNotCleaned.contains(e.getKey().getEncodedName()) &&
-  cleanParent(e.getKey(), e.getValue())) {
-result++;
+cleanParent(e.getKey(), e.getValue())) {
+  result++;
 } else {
   // We could not clean the parent, so it's daughters should not be
   // cleaned either (HBASE-6160)
@@ -355,6 +361,9 @@ public class CatalogJanitor extends ScheduledChore {
 " -- no longer hold references");
   ProcedureExecutor pe = 
this.services.getMasterProcedureExecutor();
   pe.submitProcedure(new GCRegionProcedure(pe.getEnvironment(), parent));
+  // Remove from in-memory states
+  
this.services.getAssignmentManager().getRegionStates().deleteRegion(parent);
+  this.services.getServerManager().removeRegion(parent);
   return true;
 }
 return false;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b627cfad/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
index c9c792a..f0e9b88 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -1028,6 +1028,13 @@ public class ServerManager {
 flushedSequenceIdByRegion.remove(encodedName);
   }
 
+  @VisibleForTesting
+  public boolean isRegionInServerManagerStates(final HRegionInfo hri) {
+final byte[] encodedName = hri.getEncodedNameAsBytes();
+return (storeFlushedSequenceIdsByRegion.containsKey(encodedName)
+|| flushedSequenceIdByRegion.containsKey(encodedName));
+  }
+
   /**
* Called by delete table and similar to notify the ServerManager that a 
region was removed.
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/b627cfad/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java

hbase git commit: HBASE-18025 CatalogJanitor should collect outdated RegionStates from the AM

2017-08-11 Thread esteban
Repository: hbase
Updated Branches:
  refs/heads/branch-1 b181f172e -> 578e29f96


HBASE-18025 CatalogJanitor should collect outdated RegionStates from the AM


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/578e29f9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/578e29f9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/578e29f9

Branch: refs/heads/branch-1
Commit: 578e29f96b37875cd8092f7c6b3baf2b511148d0
Parents: b181f17
Author: Esteban Gutierrez 
Authored: Fri Aug 11 12:56:20 2017 -0500
Committer: Esteban Gutierrez 
Committed: Fri Aug 11 14:39:16 2017 -0500

--
 .../hadoop/hbase/master/CatalogJanitor.java |   4 +
 .../hadoop/hbase/master/RegionStates.java   |   6 +
 .../hadoop/hbase/master/ServerManager.java  |   7 +
 .../TestCatalogJanitorInMemoryStates.java   | 188 +++
 4 files changed, 205 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/578e29f9/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index 17644eb..00dc4a5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -217,6 +217,8 @@ public class CatalogJanitor extends ScheduledChore {
   HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, 
regionA);
   HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, 
regionB);
   MetaTableAccessor.deleteMergeQualifiers(services.getConnection(), 
mergedRegion);
+  services.getAssignmentManager().getRegionStates().deleteRegion(regionA);
+  services.getAssignmentManager().getRegionStates().deleteRegion(regionB);
   services.getServerManager().removeRegion(regionA);
   services.getServerManager().removeRegion(regionB);
   return true;
@@ -361,6 +363,8 @@ public class CatalogJanitor extends ScheduledChore {
   if (LOG.isTraceEnabled()) LOG.trace("Archiving parent region: " + 
parent);
   HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, 
parent);
   MetaTableAccessor.deleteRegion(this.connection, parent);
+  if (services.getAssignmentManager().getRegionStates() != null)
+services.getAssignmentManager().getRegionStates().deleteRegion(parent);
   services.getServerManager().removeRegion(parent);
   result = true;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/578e29f9/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
index 082b5cc..599e649 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
@@ -899,6 +899,12 @@ public class RegionStates {
 }
   }
 
+  @VisibleForTesting
+  public boolean isRegionInRegionStates(final HRegionInfo hri) {
+return (getRegionState(hri) != null || isRegionOnline(hri)) || 
isRegionInTransition(hri)
+|| isRegionInState(hri, State.OFFLINE, State.CLOSED);
+ }
+
   /**
* Checking if a region was assigned to a server which is not online now.
* If so, we should hold re-assign this region till SSH has split its wals.

http://git-wip-us.apache.org/repos/asf/hbase/blob/578e29f9/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
index 93e532b..040342f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -1311,6 +1311,13 @@ public class ServerManager {
 flushedSequenceIdByRegion.remove(encodedName);
   }
 
+  @VisibleForTesting
+  public boolean isRegionInServerManagerStates(final HRegionInfo hri) {
+final byte[] encodedName = hri.getEncodedNameAsBytes();
+return (storeFlushedSequenceIdsByRegion.containsKey(encodedName)
+|| flushedSequenceIdByRegion.containsKey(encodedName));
+  }
+
   /**
* Called by delete table and similar to notify the ServerManager that a 
region 

hbase git commit: HBASE-18025 CatalogJanitor should collect outdated RegionStates from the AM

2017-08-11 Thread esteban
Repository: hbase
Updated Branches:
  refs/heads/master 043ec9b37 -> 71a9a9a94


HBASE-18025 CatalogJanitor should collect outdated RegionStates from the AM


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/71a9a9a9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/71a9a9a9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/71a9a9a9

Branch: refs/heads/master
Commit: 71a9a9a9440c9f2e2e9dd301dd372197e38e70c5
Parents: 043ec9b
Author: Esteban Gutierrez 
Authored: Fri Jul 21 14:13:13 2017 -0500
Committer: Esteban Gutierrez 
Committed: Fri Aug 11 13:36:38 2017 -0500

--
 .../hadoop/hbase/master/CatalogJanitor.java |  13 +-
 .../hadoop/hbase/master/ServerManager.java  |   7 +
 .../hbase/master/assignment/RegionStates.java   |   6 +
 .../TestCatalogJanitorInMemoryStates.java   | 185 +++
 4 files changed, 209 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/71a9a9a9/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index ba92c76..8daa7db 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -221,6 +221,11 @@ public class CatalogJanitor extends ScheduledChore {
   ProcedureExecutor pe = 
this.services.getMasterProcedureExecutor();
   pe.submitProcedure(new GCMergedRegionsProcedure(pe.getEnvironment(),
   mergedRegion, regionA, regionB));
+  // Remove from in-memory states
+  
this.services.getAssignmentManager().getRegionStates().deleteRegion(regionA);
+  
this.services.getAssignmentManager().getRegionStates().deleteRegion(regionB);
+  this.services.getServerManager().removeRegion(regionA);
+  this.services.getServerManager().removeRegion(regionB);
   return true;
 }
 return false;
@@ -234,6 +239,7 @@ public class CatalogJanitor extends ScheduledChore {
*/
   int scan() throws IOException {
 int result = 0;
+
 try {
   if (!alreadyRunning.compareAndSet(false, true)) {
 LOG.debug("CatalogJanitor already running");
@@ -281,8 +287,8 @@ public class CatalogJanitor extends ScheduledChore {
 }
 
 if (!parentNotCleaned.contains(e.getKey().getEncodedName()) &&
-  cleanParent(e.getKey(), e.getValue())) {
-result++;
+cleanParent(e.getKey(), e.getValue())) {
+  result++;
 } else {
   // We could not clean the parent, so it's daughters should not be
   // cleaned either (HBASE-6160)
@@ -355,6 +361,9 @@ public class CatalogJanitor extends ScheduledChore {
 " -- no longer hold references");
   ProcedureExecutor pe = 
this.services.getMasterProcedureExecutor();
   pe.submitProcedure(new GCRegionProcedure(pe.getEnvironment(), parent));
+  // Remove from in-memory states
+  
this.services.getAssignmentManager().getRegionStates().deleteRegion(parent);
+  this.services.getServerManager().removeRegion(parent);
   return true;
 }
 return false;

http://git-wip-us.apache.org/repos/asf/hbase/blob/71a9a9a9/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
index c9c792a..f0e9b88 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -1028,6 +1028,13 @@ public class ServerManager {
 flushedSequenceIdByRegion.remove(encodedName);
   }
 
+  @VisibleForTesting
+  public boolean isRegionInServerManagerStates(final HRegionInfo hri) {
+final byte[] encodedName = hri.getEncodedNameAsBytes();
+return (storeFlushedSequenceIdsByRegion.containsKey(encodedName)
+|| flushedSequenceIdByRegion.containsKey(encodedName));
+  }
+
   /**
* Called by delete table and similar to notify the ServerManager that a 
region was removed.
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/71a9a9a9/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
 

hbase git commit: HBASE-18565 [C++] Fix deadlock in AsyncScanRetryingCaller and other RPCs

2017-08-11 Thread enis
Repository: hbase
Updated Branches:
  refs/heads/HBASE-14850 e5643e863 -> e2a1cad3e


HBASE-18565 [C++] Fix deadlock in AsyncScanRetryingCaller and other RPCs


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e2a1cad3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e2a1cad3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e2a1cad3

Branch: refs/heads/HBASE-14850
Commit: e2a1cad3e3b8e75c3569eda07d20c18155549009
Parents: e5643e8
Author: Enis Soztutar 
Authored: Fri Aug 11 12:23:43 2017 -0700
Committer: Enis Soztutar 
Committed: Fri Aug 11 12:23:43 2017 -0700

--
 hbase-native-client/core/async-rpc-retrying-caller.cc  | 4 ++--
 hbase-native-client/core/async-scan-rpc-retrying-caller.cc | 1 +
 2 files changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e2a1cad3/hbase-native-client/core/async-rpc-retrying-caller.cc
--
diff --git a/hbase-native-client/core/async-rpc-retrying-caller.cc 
b/hbase-native-client/core/async-rpc-retrying-caller.cc
index cb058b1..8e60991 100644
--- a/hbase-native-client/core/async-rpc-retrying-caller.cc
+++ b/hbase-native-client/core/async-rpc-retrying-caller.cc
@@ -148,9 +148,9 @@ void AsyncSingleRequestRpcRetryingCaller::OnError(
* establishment time (see ConnectionFactory::Connect()), otherwise, the 
IOThreadPool thread
* just hangs because it deadlocks itself.
*/
-  conn_->retry_executor()->add([&]() {
+  conn_->retry_executor()->add([=]() {
 retry_timer_->scheduleTimeoutFn(
-[this]() { conn_->cpu_executor()->add([&]() { LocateThenCall(); }); },
+[=]() { conn_->cpu_executor()->add([&]() { LocateThenCall(); }); },
 std::chrono::milliseconds(TimeUtil::ToMillis(delay_ns)));
   });
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e2a1cad3/hbase-native-client/core/async-scan-rpc-retrying-caller.cc
--
diff --git a/hbase-native-client/core/async-scan-rpc-retrying-caller.cc 
b/hbase-native-client/core/async-scan-rpc-retrying-caller.cc
index fbdf17a..a1e8362 100644
--- a/hbase-native-client/core/async-scan-rpc-retrying-caller.cc
+++ b/hbase-native-client/core/async-scan-rpc-retrying-caller.cc
@@ -406,6 +406,7 @@ void AsyncScanRpcRetryingCaller::Call() {
   ->AsyncCall(region_location_->server_name().host_name(),
   region_location_->server_name().port(), std::move(req),
   security::User::defaultUser(), "ClientService")
+  .via(conn_->cpu_executor().get())
   .then([self, this](const std::unique_ptr& resp) {
 auto scan_resp = 
std::static_pointer_cast(resp->resp_msg());
 return OnComplete(controller_, scan_resp, resp->cell_scanner());



hbase git commit: HBASE-18197 Avoided to call job.waitForCompletion(true) two times

2017-08-11 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 a732b67ce -> b9a57f455


HBASE-18197 Avoided to call job.waitForCompletion(true) two times

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b9a57f45
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b9a57f45
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b9a57f45

Branch: refs/heads/branch-1.2
Commit: b9a57f45578f0d21aab04d95c241d7c7e80e2552
Parents: a732b67
Author: Jan Hentschel 
Authored: Sat Jun 10 22:17:00 2017 +0200
Committer: Chia-Ping Tsai 
Committed: Sat Aug 12 02:46:24 2017 +0800

--
 .../src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b9a57f45/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
index c7bdac9..9b5b2af 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
@@ -559,6 +559,6 @@ public class Import {
   }
 }
 
-System.exit(job.waitForCompletion(true) ? 0 : 1);
+System.exit(isJobSuccessful ? 0 : 1);
   }
 }



hbase git commit: HBASE-18197 Avoided to call job.waitForCompletion(true) two times

2017-08-11 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 a2a28a780 -> 18726b370


HBASE-18197 Avoided to call job.waitForCompletion(true) two times

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/18726b37
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/18726b37
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/18726b37

Branch: refs/heads/branch-1.3
Commit: 18726b370e0f9c178ae2b7a82803f3c53b6fd2eb
Parents: a2a28a7
Author: Jan Hentschel 
Authored: Sat Jun 10 22:17:00 2017 +0200
Committer: Chia-Ping Tsai 
Committed: Sat Aug 12 02:47:00 2017 +0800

--
 .../src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/18726b37/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
index 9c8e02f..e758b88 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
@@ -749,6 +749,6 @@ public class Import {
   }
 }
 
-System.exit(job.waitForCompletion(true) ? 0 : 1);
+System.exit(isJobSuccessful ? 0 : 1);
   }
 }



hbase git commit: HBASE-18197 Avoided to call job.waitForCompletion(true) two times

2017-08-11 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 a1438e1be -> 226d8026b


HBASE-18197 Avoided to call job.waitForCompletion(true) two times

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/226d8026
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/226d8026
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/226d8026

Branch: refs/heads/branch-1.4
Commit: 226d8026b7b984c1f65b2917df2f93d83b0a7e73
Parents: a1438e1
Author: Jan Hentschel 
Authored: Sat Jun 10 22:17:00 2017 +0200
Committer: Chia-Ping Tsai 
Committed: Sat Aug 12 02:47:13 2017 +0800

--
 .../src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/226d8026/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
index d315b81..8a80d15 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
@@ -750,6 +750,6 @@ public class Import {
   }
 }
 
-System.exit(job.waitForCompletion(true) ? 0 : 1);
+System.exit(isJobSuccessful ? 0 : 1);
   }
 }



hbase git commit: HBASE-18197 Avoided to call job.waitForCompletion(true) two times

2017-08-11 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-1 a03c2036e -> b181f172e


HBASE-18197 Avoided to call job.waitForCompletion(true) two times

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b181f172
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b181f172
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b181f172

Branch: refs/heads/branch-1
Commit: b181f172ef92ff1fb3bf2a2907c7c143bcd5ac75
Parents: a03c203
Author: Jan Hentschel 
Authored: Sat Jun 10 22:17:00 2017 +0200
Committer: Chia-Ping Tsai 
Committed: Sat Aug 12 02:47:25 2017 +0800

--
 .../src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b181f172/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
index d315b81..8a80d15 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
@@ -750,6 +750,6 @@ public class Import {
   }
 }
 
-System.exit(job.waitForCompletion(true) ? 0 : 1);
+System.exit(isJobSuccessful ? 0 : 1);
   }
 }



hbase git commit: HBASE-18537 [C++] Improvements to load-client

2017-08-11 Thread enis
Repository: hbase
Updated Branches:
  refs/heads/HBASE-14850 9786a07ee -> e5643e863


HBASE-18537 [C++] Improvements to load-client


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e5643e86
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e5643e86
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e5643e86

Branch: refs/heads/HBASE-14850
Commit: e5643e8632f8d4c916f1e1ca9537612b7370ace1
Parents: 9786a07
Author: Enis Soztutar 
Authored: Fri Aug 11 11:09:34 2017 -0700
Committer: Enis Soztutar 
Committed: Fri Aug 11 11:09:34 2017 -0700

--
 hbase-native-client/core/load-client.cc   | 213 +++--
 hbase-native-client/core/simple-client.cc |   4 +-
 2 files changed, 135 insertions(+), 82 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e5643e86/hbase-native-client/core/load-client.cc
--
diff --git a/hbase-native-client/core/load-client.cc 
b/hbase-native-client/core/load-client.cc
index 67f0d57..8cceeef 100644
--- a/hbase-native-client/core/load-client.cc
+++ b/hbase-native-client/core/load-client.cc
@@ -63,16 +63,17 @@ static constexpr const char *appendPrefix = "a";
 
 std::string PrefixZero(int total_width, int num) {
   std::string str = std::to_string(num);
-  auto prefix_len = total_width - str.length();
-  if (prefix_len > 0) return std::string(total_width - str.length(), '0') + 
str;
+  int prefix_len = total_width - str.length();
+  if (prefix_len > 0) {
+return std::string(prefix_len, '0') + str;
+  }
   return str;
 }
 
 bool Verify(std::shared_ptr result, std::string family, int m) {
   auto col = std::to_string(m);
-  auto int_val = hbase::BytesUtil::ToInt64(*(result->Value(family, incrPrefix 
+ col)));
-  if (int_val != m) {
-LOG(ERROR) << "value is not " << col << " for " << result->Row();
+  if (!result->Value(family, col)) {
+LOG(ERROR) << "Column:" << col << " is not found for " << result->Row();
 return false;
   }
   auto l = *(result->Value(family, col));
@@ -80,11 +81,52 @@ bool Verify(std::shared_ptr result, 
std::string family, int m) {
 LOG(ERROR) << "value " << *(result->Value(family, "1")) << " is not " << 
col;
 return false;
   }
-  l = *(result->Value(family, appendPrefix + col));
-  if (l != col) {
-LOG(ERROR) << "value " << *(result->Value(family, "1")) << " is not " << 
col;
+  if (FLAGS_appends) {
+if (!result->Value(family, incrPrefix + col)) {
+  LOG(ERROR) << "Column:" << (incrPrefix + col) << " is not found for " << 
result->Row();
+  return false;
+}
+auto int_val = hbase::BytesUtil::ToInt64(*(result->Value(family, 
incrPrefix + col)));
+if (int_val != m) {
+  LOG(ERROR) << "value is not " << col << " for " << result->Row();
+  return false;
+}
+if (!result->Value(family, appendPrefix + col)) {
+  LOG(ERROR) << "Column:" << (appendPrefix + col) << " is not found for " 
<< result->Row();
+  return false;
+}
+l = *(result->Value(family, appendPrefix + col));
+if (l != col) {
+  LOG(ERROR) << "value " << *(result->Value(family, "1")) << " is not " << 
col;
+  return false;
+}
+  }
+
+  return true;
+}
+
+bool Verify(std::shared_ptr result, const std::string ,
+const std::vector ) {
+  if (result == nullptr || result->IsEmpty()) {
+LOG(ERROR) << "didn't get result";
 return false;
   }
+  if (result->Row().compare(row) != 0) {
+LOG(ERROR) << "row " << result->Row() << " is not the expected: " << row;
+return false;
+  }
+  // Test the values
+  for (auto family : families) {
+if (!result->Value(family, kNumColumn)) {
+  LOG(ERROR) << "Column:" << kNumColumn << " is not found for " << 
result->Row();
+  return false;
+}
+auto cols = std::stoi(*(result->Value(family, kNumColumn)));
+VLOG(3) << "Result for row:" << row << " contains " << 
std::to_string(cols) << " columns";
+for (int m = 1; m <= cols; m++) {
+  if (!Verify(result, family, m)) return false;
+}
+  }
   return true;
 }
 
@@ -95,35 +137,46 @@ bool DoScan(int iteration, uint64_t max_row, uint64_t 
rows, std::unique_ptrScan(scan);
 
-  auto cnt = start;
+  auto cnt = 0;
   auto r = scanner->Next();
   while (r != nullptr) {
-auto row = PrefixZero(width, cnt);
-if (r->Row().compare(row) != 0) {
-  LOG(ERROR) << "row " << r->Row() << " is not the expected: " << row;
+auto row = PrefixZero(width, start + cnt);
+if (!Verify(r, row, families)) {
   return false;
 }
-for (auto family : families) {
-  auto cols = std::stoi(*(r->Value(family, kNumColumn)));
-  VLOG(3) << "scan gets " << std::to_string(cols) << " columns";
-  for (int m = 1; m <= cols; m++) {
-if 

hbase git commit: HBASE-18564 [C++] Problems compiling with GCC

2017-08-11 Thread enis
Repository: hbase
Updated Branches:
  refs/heads/HBASE-14850 5261c67b5 -> 9786a07ee


HBASE-18564 [C++] Problems compiling with GCC


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9786a07e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9786a07e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9786a07e

Branch: refs/heads/HBASE-14850
Commit: 9786a07ee0625a722f546da150182e9c88e7
Parents: 5261c67
Author: Enis Soztutar 
Authored: Fri Aug 11 10:47:56 2017 -0700
Committer: Enis Soztutar 
Committed: Fri Aug 11 10:47:56 2017 -0700

--
 hbase-native-client/Makefile|  2 +-
 .../core/async-batch-rpc-retrying-test.cc   |  1 -
 .../core/async-rpc-retrying-test.cc |  1 -
 hbase-native-client/core/client-test.cc | 27 +++
 hbase-native-client/core/filter-test.cc |  2 --
 .../core/hbase-configuration-test.cc| 35 ++--
 .../core/location-cache-retry-test.cc   |  2 +-
 hbase-native-client/core/result-test.cc |  3 +-
 8 files changed, 36 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9786a07e/hbase-native-client/Makefile
--
diff --git a/hbase-native-client/Makefile b/hbase-native-client/Makefile
index 143c00c..708e907 100644
--- a/hbase-native-client/Makefile
+++ b/hbase-native-client/Makefile
@@ -38,7 +38,7 @@ LINKFLAG := -shared
 
 #define list of source files and object files
 ALLSRC := $(foreach sdir,$(SRC_DIR),$(wildcard $(sdir)/*.cc))
-EXCLUDE_SRC := $(foreach sdir,$(SRC_DIR),$(wildcard $(sdir)/*-test.cc)) 
core/simple-client.cc
+EXCLUDE_SRC := $(foreach sdir,$(SRC_DIR),$(wildcard $(sdir)/*-test.cc)) 
core/simple-client.cc core/load-client.cc
 SRC := $(filter-out $(EXCLUDE_SRC), $(ALLSRC))
 PROTOSRC := $(patsubst %.proto, $(addprefix $(BUILD_PATH)/,%.pb.cc),$(wildcard 
if/*.proto))
 PROTOHDR := $(patsubst %.proto, $(addprefix $(BUILD_PATH)/,%.pb.h),$(wildcard 
if/*.proto))

http://git-wip-us.apache.org/repos/asf/hbase/blob/9786a07e/hbase-native-client/core/async-batch-rpc-retrying-test.cc
--
diff --git a/hbase-native-client/core/async-batch-rpc-retrying-test.cc 
b/hbase-native-client/core/async-batch-rpc-retrying-test.cc
index c186276..0d186b4 100644
--- a/hbase-native-client/core/async-batch-rpc-retrying-test.cc
+++ b/hbase-native-client/core/async-batch-rpc-retrying-test.cc
@@ -296,7 +296,6 @@ void runMultiTest(std::shared_ptr 
region_locator,
 
   // Get connection to HBase Table
   auto table = client.Table(tn);
-  ASSERT_TRUE(table) << "Unable to get connection to Table.";
 
   for (uint64_t i = 0; i < num_rows; i++) {
 table->Put(Put{"test" + std::to_string(i)}.AddColumn("d", 
std::to_string(i),

http://git-wip-us.apache.org/repos/asf/hbase/blob/9786a07e/hbase-native-client/core/async-rpc-retrying-test.cc
--
diff --git a/hbase-native-client/core/async-rpc-retrying-test.cc 
b/hbase-native-client/core/async-rpc-retrying-test.cc
index f887815..95b7143 100644
--- a/hbase-native-client/core/async-rpc-retrying-test.cc
+++ b/hbase-native-client/core/async-rpc-retrying-test.cc
@@ -304,7 +304,6 @@ void runTest(std::shared_ptr 
region_locator, std::string
 
   // Get connection to HBase Table
   auto table = client.Table(tn);
-  ASSERT_TRUE(table) << "Unable to get connection to Table.";
 
   table->Put(Put{"test2"}.AddColumn("d", "2", "value2"));
   table->Put(Put{"test2"}.AddColumn("d", "extra", "value for extra"));

http://git-wip-us.apache.org/repos/asf/hbase/blob/9786a07e/hbase-native-client/core/client-test.cc
--
diff --git a/hbase-native-client/core/client-test.cc 
b/hbase-native-client/core/client-test.cc
index 9efe0b6..1c9b709 100644
--- a/hbase-native-client/core/client-test.cc
+++ b/hbase-native-client/core/client-test.cc
@@ -42,6 +42,7 @@ using hbase::RetriesExhaustedException;
 using hbase::Put;
 using hbase::Table;
 using hbase::TestUtil;
+using std::experimental::nullopt;
 
 class ClientTest : public ::testing::Test {
  public:
@@ -134,7 +135,7 @@ TEST_F(ClientTest, Append) {
 
   // Get connection to HBase Table
   auto table = client.Table(tn);
-  ASSERT_TRUE(table) << "Unable to get connection to Table.";
+  ASSERT_TRUE(table != nullptr) << "Unable to get connection to Table.";
   std::string val1 = "a";
   auto result = table->Append(hbase::Append{row}.Add("d", "1", val1));
 
@@ -164,7 +165,7 @@ TEST_F(ClientTest, PutGetDelete) {
 
   // Get connection to HBase Table
   auto table = client.Table(tn);
-  ASSERT_TRUE(table) << "Unable 

[49/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/apidocs/deprecated-list.html
--
diff --git a/apidocs/deprecated-list.html b/apidocs/deprecated-list.html
index daa6cc4..ac84380 100644
--- a/apidocs/deprecated-list.html
+++ b/apidocs/deprecated-list.html
@@ -561,95 +561,90 @@
 
 
 
-org.apache.hadoop.hbase.client.Table.getWriteBufferSize()
-as of 1.0.1 (should not 
have been in 1.0.0). Replaced by BufferedMutator.getWriteBufferSize()
-
-
-
 org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.initCredentialsForCluster(Job,
 String)
 Since 1.2.0, use TableMapReduceUtil.initCredentialsForCluster(Job,
 Configuration) instead.
 
 
-
+
 org.apache.hadoop.hbase.client.Get.isClosestRowBefore()
 since 2.0.0 and will be 
removed in 3.0.0
 
 
-
+
 org.apache.hadoop.hbase.HColumnDescriptor.isLegalFamilyName(byte[])
 Use ColumnFamilyDescriptorBuilder.isLegalColumnFamilyName(byte[]).
 
 
-
+
 org.apache.hadoop.hbase.client.Result.isPartial()
 the word 'partial' 
ambiguous, use Result.mayHaveMoreCellsInRow()
 instead.
  Deprecated since 1.4.0.
 
 
-
+
 org.apache.hadoop.hbase.client.Scan.isSmall()
 since 2.0.0. See the 
comment of Scan.setSmall(boolean)
 
 
-
+
 org.apache.hadoop.hbase.client.replication.ReplicationAdmin.listPeerConfigs()
 use Admin.listReplicationPeers()
 instead
 
 
-
+
 org.apache.hadoop.hbase.client.replication.ReplicationAdmin.listReplicated()
 use Admin.listReplicatedTableCFs()
 instead
 
 
-
+
 org.apache.hadoop.hbase.client.Admin.listTableDescriptorsByNamespace(String)
 since 2.0 version and will 
be removed in 3.0 version.
  use Admin.listTableDescriptorsByNamespace(byte[])
 
 
-
+
 org.apache.hadoop.hbase.client.Admin.listTables()
 since 2.0 version and will 
be removed in 3.0 version.
  use Admin.listTableDescriptors()
 
 
-
+
 org.apache.hadoop.hbase.client.Admin.listTables(Pattern)
 since 2.0 version and will 
be removed in 3.0 version.
  use Admin.listTableDescriptors(java.util.regex.Pattern)
 
 
-
+
 org.apache.hadoop.hbase.client.Admin.listTables(Pattern,
 boolean)
 since 2.0 version and will 
be removed in 3.0 version.
  use Admin.listTableDescriptors(java.util.regex.Pattern,
 boolean)
 
 
-
+
 org.apache.hadoop.hbase.client.Admin.listTables(String)
 since 2.0 version and will 
be removed in 3.0 version.
  use Admin.listTableDescriptors(java.lang.String)
 
 
-
+
 org.apache.hadoop.hbase.client.Admin.listTables(String,
 boolean)
 since 2.0 version and will 
be removed in 3.0 version.
  use Admin.listTableDescriptors(java.lang.String,
 boolean)
 
 
-
+
 org.apache.hadoop.hbase.CellUtil.matchingRow(Cell,
 Cell)
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
  Instead use CellUtil.matchingRows(Cell,
 Cell)
 
 
-
+
 org.apache.hadoop.hbase.client.Admin.mergeRegions(byte[],
 byte[], boolean)
 Since 2.0. Will be removed 
in 3.0. Use
  Admin.mergeRegionsAsync(byte[],
 byte[], boolean) instead.
 
 
-
+
 org.apache.hadoop.hbase.client.Admin.modifyColumn(TableName,
 HColumnDescriptor)
 As of release 2.0.0.
  (https://issues.apache.org/jira/browse/HBASE-1989;>HBASE-1989).
@@ -657,129 +652,153 @@
  Use Admin.modifyColumnFamily(TableName,
 ColumnFamilyDescriptor).
 
 
-
+
 org.apache.hadoop.hbase.client.Admin.modifyTable(TableName,
 HTableDescriptor)
 since 2.0 version and will 
be removed in 3.0 version.
  use Admin.modifyTable(TableDescriptor)
 
 
-
+
 org.apache.hadoop.hbase.client.Admin.modifyTableAsync(TableName,
 HTableDescriptor)
 since 2.0 version and will 
be removed in 3.0 version.
  use Admin.modifyTableAsync(TableDescriptor)
 
 
-
+
 org.apache.hadoop.hbase.ServerName.parseHostname(String)
 Since 2.0. Use ServerName.valueOf(String)
 
 
-
+
 org.apache.hadoop.hbase.ServerName.parsePort(String)
 Since 2.0. Use ServerName.valueOf(String)
 
 
-
+
 org.apache.hadoop.hbase.ServerName.parseStartcode(String)
 Since 2.0. Use ServerName.valueOf(String)
 
 
-
+
 org.apache.hadoop.hbase.client.replication.ReplicationAdmin.parseTableCFsFromConfig(String)
 as release of 2.0.0, and 
it will be removed in 3.0.0
 
 
-
+
 org.apache.hadoop.hbase.client.replication.ReplicationAdmin.peerAdded(String)
 
-
+
 org.apache.hadoop.hbase.client.HTableMultiplexer.put(byte[],
 ListPut)
 Use HTableMultiplexer.put(TableName,
 List) instead.
 
 
-
+
 org.apache.hadoop.hbase.client.HTableMultiplexer.put(byte[],
 Put)
 Use HTableMultiplexer.put(TableName,
 Put) instead.
 
 
-
+
 org.apache.hadoop.hbase.client.HTableMultiplexer.put(byte[],
 Put, int)
 Use HTableMultiplexer.put(TableName,
 Put) instead.
 
 
-
+
 org.apache.hadoop.hbase.util.Bytes.putIntUnsafe(byte[],
 int, int)
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
 
 
-
+
 org.apache.hadoop.hbase.util.Bytes.putLongUnsafe(byte[],
 int, long)
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
 
 
-
+
 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
index ec8d970..78ff5df 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
@@ -332,7 +332,7 @@ implements MetricsRegionServerSource
-APPEND_KEY,
 AVERAGE_REGION_SIZE,
 AVERAGE_REGION_SIZE_DESC,
 AVG_STORE_FILE_AGE,
 AVG_STORE_FILE_AGE_DESC,
 BLOCK_CACHE_BLOOM_CHUNK_HIT_COUNT,
 BLOCK_CAC
 HE_BLOOM_CHUNK_MISS_COUNT, BLOCK_CACHE_COUNT,
 BLOCK_CACHE_COUNT_DESC,
 BLOCK_CACHE_DATA_HIT_COUNT,
 BLOCK_CACHE_DATA_MISS_COUNT,
 BLOCK_CACHE_DELETE_FAMILY_BLOOM_HIT_COUNT,
 BLOCK_CACHE_DELETE_FAMILY_BLOOM_MISS_COUNT,
 BLOCK_CACHE_ENCODED_DATA_HIT_COUNT,
 BLOCK_CACHE_ENCODED_DATA_MISS_COUNT,
 BLOCK_CACHE_EVICTION_COUNT,
 BLOCK_CACHE_EVICTION_COUNT_DESC,
 BLOCK_CACHE_EXPRESS_HIT_PERCENT,
 BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC,
 BLOCK_CACHE_FAILED_INSERTION_COUNT,
 BLOCK_CACHE_FAILED_INSERTION_COUNT_DESC,
 BLOCK_CACHE_FILE_INFO_HIT_COUNT,
 BLOCK_CACHE_FILE_INFO_MISS_COUNT,
 BLOCK_CACHE_FREE_DESC,
 BLOCK_CACHE_FREE_SIZE,
 BLOCK_CACHE_GENERAL_BLOOM_META_HIT_COUNT,
 BLOCK_CACHE_GENERAL_BLOOM_META_MISS_COUNT,
 BLOCK_CACHE_HIT_COUNT,
 BLOCK_CACHE_HIT_COUNT_DESC,
 BLOCK_CACHE_HIT_PERCENT,
 BLOCK_CACHE_HIT_PERCENT_DESC,
 BLOCK_CACHE_INTERMEDIATE_INDEX_HIT_COUNT, BLOCK_CACHE_INTERMEDIATE_INDEX_MISS_COUNT,
 BLOCK_CACHE_LEAF_INDEX_HIT_COUNT,
 BLOCK_CACHE_LEAF_INDEX_MISS_COUNT,
 BLOCK_CACHE_META_HIT_COUNT,
 BLOCK_CACHE_META_MISS_COUNT,
 BLOCK_CACHE_MISS_COUNT, BLOCK_CACHE_PRIMARY_EVICTION_COUNT,
 BLOCK_CACHE_PRIMARY_EVICTION_COUNT_DESC,
 BLOCK_CACHE_PRIMARY_HIT_COUNT,
 BLOCK_CACHE_PRIMARY_HIT_COUNT_DESC,
 BLOCK_CACHE_PRIMARY_MISS_COUNT,
 B
 LOCK_CACHE_ROOT_INDEX_HIT_COUNT, BLOCK_CACHE_ROOT_INDEX_MISS_COUNT,
 BLOCK_CACHE_SIZE,
 BLOCK_CACHE_SIZE_DESC,
 BLOCK_CACHE_TRAILER_HIT_COUNT,
 BLOCK_CACHE_TRAILER_MISS_COUNT,
 BLOCK_COUNT_MISS_COUNT_DESC,
 BLOCK_COUNT_PRIMARY_MISS_COUNT_DESC,
 BLOCKED_REQUESTS_COUNT,
 BLOCKED_REQUESTS_COUNT_DESC,
 CELLS_COUNT_COMPACTED_FROM_MOB,
 CELLS_COUNT_COMPACTED_FROM_MOB_DESC,
 CELLS_COUNT_COMPACTED_TO_MOB,
 CELLS_COUNT_COMPACTED_TO_MOB_DESC, CELLS_SIZE_COMPACTED_FROM_MOB,
 CELLS_SIZE_COMPACTED_FROM_MOB_DESC,
 CELLS_SIZE_COMPACTED_TO_MOB,
 CELLS_SIZE_COMPACTED_TO_MOB_DESC,
 CHECK_AND_DELETE_KEY,
 CHECK_AND_PUT_KEY,
 CHECK_MUTATE_FAILED_COUNT,
 CHECK_MUTATE_FAILED_COUNT_DESC,
 CHECK_MUTATE_PASSED_COUNT,
 CHECK_MUTATE_PASSED_COUNT_DESC,
 CLUSTER_ID_DESC,
 CLUSTER_ID_NAME,
 COMPACTED_CE
 LLS, COMPACTED_CELLS_DESC,
 COMPACTED_CELLS_SIZE,
 COMPACTED_CELLS_SIZE_DESC,
 COMPACTED_INPUT_BYTES,
 COMPACTED_INPUT_BYTES_DESC,
 COMPACTED_OUTPUT_BYTES,
 COMPACTED_OUTPUT_BYTES_DESC, href="../../../../../org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html#COMPACTION_INPUT_FILE_COUNT">COMPACTION_INPUT_FILE_COUNT,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html#COMPACTION_INPUT_FILE_COUNT_DESC">COMPACTION_INPUT_FILE_COUNT_DESC,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html#COMPACTION_INPUT_SIZE">COMPACTION_INPUT_SIZE,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html#COMPACTION_INPUT_SIZE_DESC">COMPACTION_INPUT_SIZE_DESC,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html#COMPACTION_OUTPUT_FILE_COUNT">COMPACTION_OUTPUT_FILE_COUNT,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html#COMPACTION_OUTPUT_FILE_COUNT_DESC">COMPACTION_OUTPUT_FILE_COUNT_DESC,
 > COMPACTION_OUTPUT_SIZE,
 COMPACTION_OUTPUT_SIZE_DESC,
 COMPACTION_QUEUE_LENGTH,
 COMPACTION_QUEUE_LENGTH_DESC,
 COMPACTION_TIME,
 COMPACTION_TIME_DESC,
 DATA_SIZE_WITHOUT_WAL,
 DATA_SIZE_WITHOUT_WAL_DESC,
 DELETE_BATCH_KEY,
 DELETE_KEY,
 FILTERED_READ_REQUEST_COUNT,
 FILTERED_READ_REQUEST_COUNT_DESC,
 FLUSH_MEMSTORE_SIZE,
 FLUSH_MEMSTORE_SIZE_DESC,
 FLUSH_OUTPUT_SIZE,
 FLUSH_OUTPUT_SIZE_DESC,
 FLUSH_QUEUE_LENGTH,
 FLUSH_QUEUE_LENGTH_DESC,
 FLUSH_TIME,
 FLUSH_TIME_DESC,
 FLUSHED_CELLS,
 FLUSHED_CELLS_DESC, FLUSHED_CELLS_SIZE,
 FLUSHED_CELLS_SIZE_DESC,
 FLUSHED_MEMSTORE_BYTES,
 FLUSHED_MEMSTORE_BYTES_DESC,
 FLUSHED_OUTPUT_BYTES,
 FLUSHED_OUTPUT_BYTES_DESC,
 GET_KEY, GET_SIZE_KEY,
 HEDGED_READ_WINS,
 HEDGED_READ_WINS_DESC,
 HEDGED_READS,
 HEDGED_READS_DESC,
 INCREMENT_KEY,
 LARGE_COMPACTION_QUEUE_LENGTH
 , LARGE_COMPACTION_QUEUE_LENGTH_DESC,
 MAJOR_COMPACTED_CELLS,
 MAJOR_COMPACTED_CELLS_DESC,
 MAJOR_COMPACTED_CELLS_SIZE,
 MAJOR_COMPACTED_CELLS_SIZE_DESC,
 MAJOR_COMPACTED_INPUT_BYTES,
 MAJOR_COMPACTED_INPUT_BYTES_DESC,
 

[38/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/deprecated-list.html
--
diff --git a/devapidocs/deprecated-list.html b/devapidocs/deprecated-list.html
index 350c5ad..ad2a40d 100644
--- a/devapidocs/deprecated-list.html
+++ b/devapidocs/deprecated-list.html
@@ -976,140 +976,135 @@
 
 
 
-org.apache.hadoop.hbase.client.Table.getWriteBufferSize()
-as of 1.0.1 (should not 
have been in 1.0.0). Replaced by BufferedMutator.getWriteBufferSize()
-
-
-
 org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.initCredentialsForCluster(Job,
 String)
 Since 1.2.0, use TableMapReduceUtil.initCredentialsForCluster(Job,
 Configuration) instead.
 
 
-
+
 org.apache.hadoop.hbase.regionserver.HRegion.initialize()
 use 
HRegion.createHRegion() or HRegion.openHRegion()
 
 
-
+
 org.apache.hadoop.hbase.client.Get.isClosestRowBefore()
 since 2.0.0 and will be 
removed in 3.0.0
 
 
-
+
 org.apache.hadoop.hbase.KeyValue.iscreate(InputStream)
 
-
+
 org.apache.hadoop.hbase.client.ClusterConnection.isDeadServer(ServerName)
 internal method, do not 
use thru ClusterConnection
 
 
-
+
 org.apache.hadoop.hbase.KeyValue.isDelete()
 
-
+
 org.apache.hadoop.hbase.HColumnDescriptor.isLegalFamilyName(byte[])
 Use ColumnFamilyDescriptorBuilder.isLegalColumnFamilyName(byte[]).
 
 
-
+
 org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate.isLogDeletable(FileStatus)
 
-
+
 org.apache.hadoop.hbase.client.ConnectionImplementation.isMasterRunning()
 this has been deprecated 
without a replacement
 
 
-
+
 org.apache.hadoop.hbase.client.ClusterConnection.isMasterRunning()
 this has been deprecated 
without a replacement
 
 
-
+
 org.apache.hadoop.hbase.client.Result.isPartial()
 the word 'partial' 
ambiguous, use Result.mayHaveMoreCellsInRow()
 instead.
  Deprecated since 1.4.0.
 
 
-
+
 org.apache.hadoop.hbase.client.Scan.isSmall()
 since 2.0.0. See the 
comment of Scan.setSmall(boolean)
 
 
-
+
 org.apache.hadoop.hbase.security.visibility.VisibilityClient.listLabels(Configuration,
 String)
 Use VisibilityClient.listLabels(Connection,String)
 instead.
 
 
-
+
 org.apache.hadoop.hbase.client.replication.ReplicationAdmin.listPeerConfigs()
 use Admin.listReplicationPeers()
 instead
 
 
-
+
 org.apache.hadoop.hbase.client.replication.ReplicationAdmin.listReplicated()
 use Admin.listReplicatedTableCFs()
 instead
 
 
-
+
 org.apache.hadoop.hbase.client.replication.ReplicationAdmin.listReplicationPeers()
 use Admin.listReplicationPeers()
 instead
 
 
-
+
 org.apache.hadoop.hbase.client.Admin.listTableDescriptorsByNamespace(String)
 since 2.0 version and will 
be removed in 3.0 version.
  use Admin.listTableDescriptorsByNamespace(byte[])
 
 
-
+
 org.apache.hadoop.hbase.client.Admin.listTables()
 since 2.0 version and will 
be removed in 3.0 version.
  use Admin.listTableDescriptors()
 
 
-
+
 org.apache.hadoop.hbase.client.Admin.listTables(Pattern)
 since 2.0 version and will 
be removed in 3.0 version.
  use Admin.listTableDescriptors(java.util.regex.Pattern)
 
 
-
+
 org.apache.hadoop.hbase.client.Admin.listTables(Pattern,
 boolean)
 since 2.0 version and will 
be removed in 3.0 version.
  use Admin.listTableDescriptors(java.util.regex.Pattern,
 boolean)
 
 
-
+
 org.apache.hadoop.hbase.client.Admin.listTables(String)
 since 2.0 version and will 
be removed in 3.0 version.
  use Admin.listTableDescriptors(java.lang.String)
 
 
-
+
 org.apache.hadoop.hbase.client.Admin.listTables(String,
 boolean)
 since 2.0 version and will 
be removed in 3.0 version.
  use Admin.listTableDescriptors(java.lang.String,
 boolean)
 
 
-
+
 org.apache.hadoop.hbase.CellUtil.matchingRow(Cell,
 Cell)
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
  Instead use CellUtil.matchingRows(Cell,
 Cell)
 
 
-
+
 org.apache.hadoop.hbase.client.Admin.mergeRegions(byte[],
 byte[], boolean)
 Since 2.0. Will be removed 
in 3.0. Use
  Admin.mergeRegionsAsync(byte[],
 byte[], boolean) instead.
 
 
-
+
 org.apache.hadoop.hbase.client.HBaseAdmin.mergeRegions(byte[],
 byte[], boolean)
 Since 2.0. Will be removed 
in 3.0. Use
  HBaseAdmin.mergeRegionsAsync(byte[],
 byte[], boolean) instead.
 
 
-
+
 org.apache.hadoop.hbase.client.Admin.modifyColumn(TableName,
 HColumnDescriptor)
 As of release 2.0.0.
  (https://issues.apache.org/jira/browse/HBASE-1989;>HBASE-1989).
@@ -1117,537 +1112,561 @@
  Use Admin.modifyColumnFamily(TableName,
 ColumnFamilyDescriptor).
 
 
-
+
 org.apache.hadoop.hbase.client.HBaseAdmin.modifyColumn(TableName,
 HColumnDescriptor)
 As of 2.0. Will be removed 
in 3.0. Use
  HBaseAdmin.modifyColumnFamily(TableName,
 ColumnFamilyDescriptor) instead.
 
 
-
+
 org.apache.hadoop.hbase.client.Admin.modifyTable(TableName,
 HTableDescriptor)
 since 2.0 version and will 
be removed in 3.0 version.
  use 

[44/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html
index 8f0943d..63ba3f6 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html
@@ -601,634 +601,657 @@
 593  /**
 594   * Get all available versions.
 595   * @return this
-596   */
-597  public Scan setMaxVersions() {
-598this.maxVersions = 
Integer.MAX_VALUE;
-599return this;
-600  }
-601
-602  /**
-603   * Get up to the specified number of 
versions of each column.
-604   * @param maxVersions maximum versions 
for each column
-605   * @return this
-606   */
-607  public Scan setMaxVersions(int 
maxVersions) {
-608this.maxVersions = maxVersions;
-609return this;
-610  }
-611
-612  /**
-613   * Set the maximum number of cells to 
return for each call to next(). Callers should be aware
-614   * that this is not equivalent to 
calling {@link #setAllowPartialResults(boolean)}.
-615   * If you don't allow partial results, 
the number of cells in each Result must equal to your
-616   * batch setting unless it is the last 
Result for current row. So this method is helpful in paging
-617   * queries. If you just want to prevent 
OOM at client, use setAllowPartialResults(true) is better.
-618   * @param batch the maximum number of 
values
-619   * @see 
Result#mayHaveMoreCellsInRow()
-620   */
-621  public Scan setBatch(int batch) {
-622if (this.hasFilter()  
this.filter.hasFilterRow()) {
-623  throw new 
IncompatibleFilterException(
-624"Cannot set batch on a scan using 
a filter" +
-625" that returns true for 
filter.hasFilterRow");
-626}
-627this.batch = batch;
-628return this;
-629  }
-630
-631  /**
-632   * Set the maximum number of values to 
return per row per Column Family
-633   * @param limit the maximum number of 
values returned / row / CF
-634   */
-635  public Scan 
setMaxResultsPerColumnFamily(int limit) {
-636this.storeLimit = limit;
-637return this;
-638  }
-639
-640  /**
-641   * Set offset for the row per Column 
Family.
-642   * @param offset is the number of kvs 
that will be skipped.
+596   * @deprecated It is easy to 
misunderstand with column family's max versions, so use
+597   * {@link 
#readAllVersions()} instead.
+598   */
+599  @Deprecated
+600  public Scan setMaxVersions() {
+601return readAllVersions();
+602  }
+603
+604  /**
+605   * Get up to the specified number of 
versions of each column.
+606   * @param maxVersions maximum versions 
for each column
+607   * @return this
+608   * @deprecated It is easy to 
misunderstand with column family's max versions, so use
+609   * {@link 
#readVersions(int)} instead.
+610   */
+611  @Deprecated
+612  public Scan setMaxVersions(int 
maxVersions) {
+613return readVersions(maxVersions);
+614  }
+615
+616  /**
+617   * Get all available versions.
+618   * @return this
+619   */
+620  public Scan readAllVersions() {
+621this.maxVersions = 
Integer.MAX_VALUE;
+622return this;
+623  }
+624
+625  /**
+626   * Get up to the specified number of 
versions of each column.
+627   * @param versions specified number of 
versions for each column
+628   * @return this
+629   */
+630  public Scan readVersions(int versions) 
{
+631this.maxVersions = versions;
+632return this;
+633  }
+634
+635  /**
+636   * Set the maximum number of cells to 
return for each call to next(). Callers should be aware
+637   * that this is not equivalent to 
calling {@link #setAllowPartialResults(boolean)}.
+638   * If you don't allow partial results, 
the number of cells in each Result must equal to your
+639   * batch setting unless it is the last 
Result for current row. So this method is helpful in paging
+640   * queries. If you just want to prevent 
OOM at client, use setAllowPartialResults(true) is better.
+641   * @param batch the maximum number of 
values
+642   * @see 
Result#mayHaveMoreCellsInRow()
 643   */
-644  public Scan 
setRowOffsetPerColumnFamily(int offset) {
-645this.storeOffset = offset;
-646return this;
-647  }
-648
-649  /**
-650   * Set the number of rows for caching 
that will be passed to scanners.
-651   * If not set, the Configuration 
setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will
-652   * apply.
-653   * Higher caching values will enable 
faster scanners but will use more memory.
-654   * @param caching the number of rows 
for caching
-655   */
-656  public Scan setCaching(int caching) {
-657this.caching = caching;
-658return this;
-659  }
-660
-661  /**
-662   * @return the maximum result size in 
bytes. See {@link #setMaxResultSize(long)}
-663   */
-664  public long 

[19/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
index 9a49ec0..b74ce5f 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
@@ -267,54 +267,58 @@ implements requestCount
 
 
+(package private) http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/LongAdder.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">LongAdder
+requestRowActionCount
+
+
 private int
 rowSizeWarnThreshold
 Row size threshold for multi requests above which a warning 
is logged
 
 
-
+
 (package private) http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/LongAdder.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">LongAdder
 rpcGetRequestCount
 
-
+
 (package private) http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/LongAdder.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">LongAdder
 rpcMultiRequestCount
 
-
+
 (package private) http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/LongAdder.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">LongAdder
 rpcMutateRequestCount
 
-
+
 (package private) http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/LongAdder.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">LongAdder
 rpcScanRequestCount
 
-
+
 (package private) RpcServerInterface
 rpcServer
 
-
+
 private int
 rpcTimeout
 The RPC timeout period (milliseconds)
 
 
-
+
 private static http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 SCANNER_ALREADY_CLOSED
 Deprecated.
 
 
-
+
 private ScannerIdGenerator
 scannerIdGenerator
 
-
+
 private int
 scannerLeaseTimeoutPeriod
 The lease timeout period for client scanners 
(milliseconds).
 
 
-
+
 private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true;
 title="class or interface in java.util.concurrent">ConcurrentMaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,RSRpcServices.RegionScannerHolder
 scanners
 
@@ -971,7 +975,16 @@ implements 
 
 requestCount
-finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/LongAdder.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">LongAdder requestCount
+finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/LongAdder.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">LongAdder requestCount
+
+
+
+
+
+
+
+requestRowActionCount
+finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/LongAdder.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">LongAdder requestRowActionCount
 
 
 
@@ -980,7 +993,7 @@ implements 
 
 rpcGetRequestCount
-finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/LongAdder.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">LongAdder rpcGetRequestCount
+finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/LongAdder.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">LongAdder rpcGetRequestCount
 
 
 
@@ -989,7 +1002,7 @@ implements 
 
 rpcScanRequestCount
-finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/LongAdder.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">LongAdder rpcScanRequestCount
+finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/LongAdder.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">LongAdder rpcScanRequestCount
 
 
 
@@ -998,7 +1011,7 @@ implements 
 
 rpcMultiRequestCount
-finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/LongAdder.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">LongAdder rpcMultiRequestCount
+finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/LongAdder.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">LongAdder rpcMultiRequestCount
 
 
 
@@ -1007,7 +1020,7 @@ implements 
 
 rpcMutateRequestCount
-finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/LongAdder.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">LongAdder rpcMutateRequestCount

[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/client/Scan.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/Scan.html 
b/devapidocs/org/apache/hadoop/hbase/client/Scan.html
index 9bc6240..2a20fad 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Scan.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Scan.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":9,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":42,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":42,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":42,"i69":42,"i70":42,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":9,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":42,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":42,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":42,"i58":42,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":42,"i71":42,"i72":42,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -598,63 +598,75 @@ extends numFamilies()
 
 
+Scan
+readAllVersions()
+Get all available versions.
+
+
+
+Scan
+readVersions(intversions)
+Get up to the specified number of versions of each 
column.
+
+
+
 (package private) Scan
 resetMvccReadPoint()
 Set the mvcc read point to -1 which means do not use 
it.
 
 
-
+
 Scan
 setACL(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,Permissionperms)
 
-
+
 Scan
 setACL(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringuser,
   Permissionperms)
 
-
+
 Scan
 setAllowPartialResults(booleanallowPartialResults)
 Setting whether the caller wants to see the partial results 
when server returns
  less-than-expected cells.
 
 
-
+
 Scan
 setAsyncPrefetch(booleanasyncPrefetch)
 
-
+
 Scan
 setAttribute(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname,
 byte[]value)
 Sets an attribute.
 
 
-
+
 Scan
 setAuthorizations(Authorizationsauthorizations)
 Sets the authorizations to be used by this Query
 
 
-
+
 Scan
 setBatch(intbatch)
 Set the maximum number of cells to return for each call to 
next().
 
 
-
+
 Scan
 setCacheBlocks(booleancacheBlocks)
 Set whether blocks should be cached for this Scan.
 
 
-
+
 Scan
 setCaching(intcaching)
 Set the number of rows for caching that will be passed to 
scanners.
 
 
-
+
 Scan
 setColumnFamilyTimeRange(byte[]cf,
 longminStamp,
@@ -663,145 +675,151 @@ extends 
 
 
-
+
 Scan
 setColumnFamilyTimeRange(byte[]cf,
 TimeRangetr)
 
-
+
 Scan
 setConsistency(Consistencyconsistency)
 Sets the consistency level for this operation
 
 
-
+
 Scan
 setFamilyMap(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/NavigableSet.html?is-external=true;
 title="class or interface in 
java.util">NavigableSetbyte[]familyMap)
 Setting the familyMap
 
 
-
+
 Scan
 setFilter(Filterfilter)
 Apply the specified server-side filter when performing the 
Query.
 
 
-
+
 Scan
 setId(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringid)
 This method allows you to set an identifier on an 
operation.
 
 
-
+
 Scan
 setIsolationLevel(IsolationLevellevel)
 Set the isolation level for this query.
 
 
-
+
 Scan
 setLimit(intlimit)
 Set the limit of rows for this scan.
 
 
-
+
 Scan
 

[48/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/apidocs/org/apache/hadoop/hbase/client/Query.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/Query.html 
b/apidocs/org/apache/hadoop/hbase/client/Query.html
index 98c304b..a1cfe33 100644
--- a/apidocs/org/apache/hadoop/hbase/client/Query.html
+++ b/apidocs/org/apache/hadoop/hbase/client/Query.html
@@ -468,9 +468,9 @@ extends 
 setFilter
 publicQuerysetFilter(Filterfilter)
-Apply the specified server-side filter when performing the 
Query.
- Only Filter.filterKeyValue(org.apache.hadoop.hbase.Cell)
 is called AFTER all tests
- for ttl, column match, deletes and max versions have been run.
+Apply the specified server-side filter when performing the 
Query. Only
+ Filter.filterKeyValue(org.apache.hadoop.hbase.Cell)
 is called AFTER all tests for ttl,
+ column match, deletes and column family's max versions have been run.
 
 Parameters:
 filter - filter to run on the server

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/apidocs/org/apache/hadoop/hbase/client/Scan.ReadType.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/Scan.ReadType.html 
b/apidocs/org/apache/hadoop/hbase/client/Scan.ReadType.html
index c90f409..e8bfff2 100644
--- a/apidocs/org/apache/hadoop/hbase/client/Scan.ReadType.html
+++ b/apidocs/org/apache/hadoop/hbase/client/Scan.ReadType.html
@@ -123,7 +123,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public static enum Scan.ReadType
+public static enum Scan.ReadType
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumScan.ReadType
 
 
@@ -214,7 +214,7 @@ the order they are declared.
 
 
 DEFAULT
-public static finalScan.ReadType DEFAULT
+public static finalScan.ReadType DEFAULT
 
 
 
@@ -223,7 +223,7 @@ the order they are declared.
 
 
 STREAM
-public static finalScan.ReadType STREAM
+public static finalScan.ReadType STREAM
 
 
 
@@ -232,7 +232,7 @@ the order they are declared.
 
 
 PREAD
-public static finalScan.ReadType PREAD
+public static finalScan.ReadType PREAD
 
 
 



[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html
index fe83874..41f6202 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html
@@ -1064,6 +1064,14 @@ extends 
 
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+TOTAL_ROW_ACTION_REQUEST_COUNT
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+TOTAL_ROW_ACTION_REQUEST_COUNT_DESC
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 UPDATES_BLOCKED_DESC
 
 
@@ -1701,13 +1709,39 @@ extends 
 
 
+
+
+
+
+
+TOTAL_ROW_ACTION_REQUEST_COUNT
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String TOTAL_ROW_ACTION_REQUEST_COUNT
+
+See Also:
+Constant
 Field Values
+
+
+
+
+
+
+
+
+TOTAL_ROW_ACTION_REQUEST_COUNT_DESC
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String TOTAL_ROW_ACTION_REQUEST_COUNT_DESC
+
+See Also:
+Constant
 Field Values
+
+
+
 
 
 
 
 
 READ_REQUEST_COUNT
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String READ_REQUEST_COUNT
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String READ_REQUEST_COUNT
 
 See Also:
 Constant
 Field Values
@@ -1720,7 +1754,7 @@ extends 
 
 READ_REQUEST_COUNT_DESC
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String READ_REQUEST_COUNT_DESC
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String READ_REQUEST_COUNT_DESC
 
 See Also:
 Constant
 Field Values
@@ -1733,7 +1767,7 @@ extends 
 
 FILTERED_READ_REQUEST_COUNT
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FILTERED_READ_REQUEST_COUNT
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FILTERED_READ_REQUEST_COUNT
 
 See Also:
 Constant
 Field Values
@@ -1746,7 +1780,7 @@ extends 
 
 FILTERED_READ_REQUEST_COUNT_DESC
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FILTERED_READ_REQUEST_COUNT_DESC
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FILTERED_READ_REQUEST_COUNT_DESC
 
 See Also:
 Constant
 Field Values
@@ -1759,7 +1793,7 @@ extends 
 
 WRITE_REQUEST_COUNT
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String WRITE_REQUEST_COUNT
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String WRITE_REQUEST_COUNT
 
 See Also:
 Constant
 Field Values
@@ -1772,7 +1806,7 @@ extends 
 
 WRITE_REQUEST_COUNT_DESC
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String WRITE_REQUEST_COUNT_DESC
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String WRITE_REQUEST_COUNT_DESC
 
 See Also:
 Constant
 Field Values
@@ -1785,7 +1819,7 @@ extends 
 
 CHECK_MUTATE_FAILED_COUNT
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String CHECK_MUTATE_FAILED_COUNT
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String CHECK_MUTATE_FAILED_COUNT
 
 See Also:
 Constant
 Field Values
@@ -1798,7 +1832,7 @@ extends 
 
 CHECK_MUTATE_FAILED_COUNT_DESC
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String CHECK_MUTATE_FAILED_COUNT_DESC
+static 

[26/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
index ed159f5..b75a6bd 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
@@ -3169,7 +3169,7 @@ implements 
 
 closeLock
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object closeLock
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object closeLock
 
 
 
@@ -3178,7 +3178,7 @@ implements 
 
 MEMSTORE_PERIODIC_FLUSH_INTERVAL
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String MEMSTORE_PERIODIC_FLUSH_INTERVAL
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String MEMSTORE_PERIODIC_FLUSH_INTERVAL
 Conf key for the periodic flush interval
 
 See Also:
@@ -3192,7 +3192,7 @@ implements 
 
 DEFAULT_CACHE_FLUSH_INTERVAL
-public static finalint DEFAULT_CACHE_FLUSH_INTERVAL
+public static finalint DEFAULT_CACHE_FLUSH_INTERVAL
 Default interval for the memstore flush
 
 See Also:
@@ -3206,7 +3206,7 @@ implements 
 
 SYSTEM_CACHE_FLUSH_INTERVAL
-public static finalint SYSTEM_CACHE_FLUSH_INTERVAL
+public static finalint SYSTEM_CACHE_FLUSH_INTERVAL
 Default interval for System tables memstore flush
 
 See Also:
@@ -3220,7 +3220,7 @@ implements 
 
 MEMSTORE_FLUSH_PER_CHANGES
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String MEMSTORE_FLUSH_PER_CHANGES
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String MEMSTORE_FLUSH_PER_CHANGES
 Conf key to force a flush if there are already enough 
changes for one region in memstore
 
 See Also:
@@ -3234,7 +3234,7 @@ implements 
 
 DEFAULT_FLUSH_PER_CHANGES
-public static finallong DEFAULT_FLUSH_PER_CHANGES
+public static finallong DEFAULT_FLUSH_PER_CHANGES
 
 See Also:
 Constant
 Field Values
@@ -3247,7 +3247,7 @@ implements 
 
 MAX_FLUSH_PER_CHANGES
-public static finallong MAX_FLUSH_PER_CHANGES
+public static finallong MAX_FLUSH_PER_CHANGES
 The following MAX_FLUSH_PER_CHANGES is large enough because 
each KeyValue has 20+ bytes
  overhead. Therefore, even 1G empty KVs occupy at least 20GB memstore size for 
a single region
 
@@ -3262,7 +3262,7 @@ implements 
 
 FOR_UNIT_TESTS_ONLY
-private static finalbyte[] FOR_UNIT_TESTS_ONLY
+private static finalbyte[] FOR_UNIT_TESTS_ONLY
 Row needed by below method.
 
 
@@ -3272,7 +3272,7 @@ implements 
 
 FIXED_OVERHEAD
-public static finallong FIXED_OVERHEAD
+public static finallong FIXED_OVERHEAD
 
 
 
@@ -3281,7 +3281,7 @@ implements 
 
 DEEP_OVERHEAD
-public static finallong DEEP_OVERHEAD
+public static finallong DEEP_OVERHEAD
 
 
 
@@ -3290,7 +3290,7 @@ implements 
 
 MOCKED_LIST
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCell MOCKED_LIST
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCell MOCKED_LIST
 A mocked list implementation - discards all updates.
 
 
@@ -3461,7 +3461,7 @@ publiclong
 
 initializeStores
-privatelonginitializeStores(CancelableProgressablereporter,
+privatelonginitializeStores(CancelableProgressablereporter,
   MonitoredTaskstatus)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Open all Stores.
@@ -3482,7 +3482,7 @@ publiclong
 
 initializeWarmup
-privatevoidinitializeWarmup(CancelableProgressablereporter)
+privatevoidinitializeWarmup(CancelableProgressablereporter)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -3496,7 +3496,7 @@ publiclong
 
 getStoreFiles
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.fs.PathgetStoreFiles()
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 

[09/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/src-html/org/apache/hadoop/hbase/client/TableBuilder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/TableBuilder.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/TableBuilder.html
index 8c639fc..5188ab8 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/TableBuilder.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/TableBuilder.html
@@ -65,16 +65,10 @@
 057  TableBuilder setWriteRpcTimeout(int 
timeout);
 058
 059  /**
-060   * Set the write buffer size which by 
default is specified by the
-061   * {@code hbase.client.write.buffer} 
setting.
-062   */
-063  TableBuilder setWriteBufferSize(long 
writeBufferSize);
-064
-065  /**
-066   * Create the {@link Table} instance.
-067   */
-068  Table build();
-069}
+060   * Create the {@link Table} instance.
+061   */
+062  Table build();
+063}
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/src-html/org/apache/hadoop/hbase/client/TableBuilderBase.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/TableBuilderBase.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/TableBuilderBase.html
index 0487e60..c9e2c98 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/TableBuilderBase.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/TableBuilderBase.html
@@ -44,51 +44,42 @@
 036
 037  protected int writeRpcTimeout;
 038
-039  protected long writeBufferSize;
-040
-041  TableBuilderBase(TableName tableName, 
ConnectionConfiguration connConf) {
-042if (tableName == null) {
-043  throw new 
IllegalArgumentException("Given table name is null");
-044}
-045this.tableName = tableName;
-046this.operationTimeout = 
tableName.isSystemTable() ? connConf.getMetaOperationTimeout()
-047: 
connConf.getOperationTimeout();
-048this.rpcTimeout = 
connConf.getRpcTimeout();
-049this.readRpcTimeout = 
connConf.getReadRpcTimeout();
-050this.writeRpcTimeout = 
connConf.getWriteRpcTimeout();
-051this.writeBufferSize = 
connConf.getWriteBufferSize();
-052  }
-053
-054  @Override
-055  public TableBuilderBase 
setOperationTimeout(int timeout) {
-056this.operationTimeout = timeout;
-057return this;
-058  }
-059
-060  @Override
-061  public TableBuilderBase 
setRpcTimeout(int timeout) {
-062this.rpcTimeout = timeout;
-063return this;
-064  }
-065
-066  @Override
-067  public TableBuilderBase 
setReadRpcTimeout(int timeout) {
-068this.readRpcTimeout = timeout;
-069return this;
-070  }
-071
-072  @Override
-073  public TableBuilderBase 
setWriteRpcTimeout(int timeout) {
-074this.writeRpcTimeout = timeout;
-075return this;
-076  }
-077
-078  @Override
-079  public TableBuilder 
setWriteBufferSize(long writeBufferSize) {
-080this.writeBufferSize = 
writeBufferSize;
-081return this;
-082  }
-083}
+039  TableBuilderBase(TableName tableName, 
ConnectionConfiguration connConf) {
+040if (tableName == null) {
+041  throw new 
IllegalArgumentException("Given table name is null");
+042}
+043this.tableName = tableName;
+044this.operationTimeout = 
tableName.isSystemTable() ? connConf.getMetaOperationTimeout()
+045: 
connConf.getOperationTimeout();
+046this.rpcTimeout = 
connConf.getRpcTimeout();
+047this.readRpcTimeout = 
connConf.getReadRpcTimeout();
+048this.writeRpcTimeout = 
connConf.getWriteRpcTimeout();
+049  }
+050
+051  @Override
+052  public TableBuilderBase 
setOperationTimeout(int timeout) {
+053this.operationTimeout = timeout;
+054return this;
+055  }
+056
+057  @Override
+058  public TableBuilderBase 
setRpcTimeout(int timeout) {
+059this.rpcTimeout = timeout;
+060return this;
+061  }
+062
+063  @Override
+064  public TableBuilderBase 
setReadRpcTimeout(int timeout) {
+065this.readRpcTimeout = timeout;
+066return this;
+067  }
+068
+069  @Override
+070  public TableBuilderBase 
setWriteRpcTimeout(int timeout) {
+071this.writeRpcTimeout = timeout;
+072return this;
+073  }
+074}
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
index d7f2ea1..128ae24 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
@@ -1015,7 +1015,7 @@
 1007  @Override
 1008  public 

[50/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/apache_hbase_reference_guide.pdf
--
diff --git a/apache_hbase_reference_guide.pdf b/apache_hbase_reference_guide.pdf
index 2d6b225..69ef0ba 100644
--- a/apache_hbase_reference_guide.pdf
+++ b/apache_hbase_reference_guide.pdf
@@ -5,8 +5,8 @@
 /Author (Apache HBase Team)
 /Creator (Asciidoctor PDF 1.5.0.alpha.15, based on Prawn 2.2.2)
 /Producer (Apache HBase Team)
-/ModDate (D:20170810220854+00'00')
-/CreationDate (D:20170810220854+00'00')
+/ModDate (D:20170811144515+00'00')
+/CreationDate (D:20170811144515+00'00')
 >>
 endobj
 2 0 obj
@@ -27334,7 +27334,7 @@ endobj
 endobj
 136 0 obj
 << /Limits [(__anchor-top) (adding.new.node)]
-/Names [(__anchor-top) 25 0 R (__indexterm-6955512) 3262 0 R 
(__indexterm-6957762) 3264 0 R (__indexterm-6959824) 3265 0 R 
(__indexterm-6961698) 3266 0 R (acid) 891 0 R 
(add-metric-name-and-function-to-hadoop-compat-interface) 3361 0 R 
(add-the-implementation-to-both-hadoop-1-and-hadoop-2-compat-modules) 3362 0 R 
(add.metrics) 3359 0 R (adding-a-new-chapter-to-the-hbase-reference-guide) 3597 
0 R (adding.new.node) 2859 0 R]
+/Names [(__anchor-top) 25 0 R (__indexterm-6955580) 3262 0 R 
(__indexterm-6957830) 3264 0 R (__indexterm-6959892) 3265 0 R 
(__indexterm-6961766) 3266 0 R (acid) 891 0 R 
(add-metric-name-and-function-to-hadoop-compat-interface) 3361 0 R 
(add-the-implementation-to-both-hadoop-1-and-hadoop-2-compat-modules) 3362 0 R 
(add.metrics) 3359 0 R (adding-a-new-chapter-to-the-hbase-reference-guide) 3597 
0 R (adding.new.node) 2859 0 R]
 >>
 endobj
 137 0 obj
@@ -36783,7 +36783,7 @@ endobj
 >>
 endobj
 200 0 obj
-<< /Length 8136
+<< /Length 8165
 >>
 stream
 q
@@ -37004,12 +37004,12 @@ ET
 0.2 0.2 0.2 scn
 0.2 0.2 0.2 SCN
 
-1.4629 Tw
+4.5524 Tw
 
 BT
 63.24 513.153 Td
 /F1.0 10.5 Tf
-[<68626173652e7a6f6f6b> 20.0195 
<65657065722e636c69656e74506f727420636f6e6669672e20616e64207061737320697420696e746f207a6f6f6b>
 20.0195 
<656570657220636f6e7374727563746f722061732074686520636f6e6e656374537472696e67>] 
TJ
+[<68626173652e7a6f6f6b> 20.0195 <65657065722e70726f7065727479> 89.8438 
<2e636c69656e74506f727420636f6e6669672e20616e64207061737320697420696e746f207a6f6f6b>
 20.0195 <656570657220636f6e7374727563746f7220617320746865>] TJ
 ET
 
 
@@ -37022,7 +37022,7 @@ ET
 BT
 63.24 497.373 Td
 /F1.0 10.5 Tf
-[<706172> 20.0195 <616d657465722e>] TJ
+[<636f6e6e656374537472696e6720706172> 20.0195 <616d657465722e>] TJ
 ET
 
 0.0 0.0 0.0 SCN
@@ -755094,4381 +755094,4381 @@ xref
 582095 0 n 
 582141 0 n 
 582745 0 n 
-590935 0 n 
-591294 0 n 
-591341 0 n 
-591388 0 n 
-591435 0 n 
-591482 0 n 
-591527 0 n 
-599540 0 n 
-599899 0 n 
-599946 0 n 
-53 0 n 
-600040 0 n 
-600087 0 n 
-600134 0 n 
-600180 0 n 
-600933 0 n 
-608382 0 n 
-608741 0 n 
-608788 0 n 
-608835 0 n 
-608882 0 n 
-608929 0 n 
-608976 0 n 
-609023 0 n 
-619353 0 n 
-619712 0 n 
-619759 0 n 
-619806 0 n 
-619853 0 n 
-619899 0 n 
-628410 0 n 
-628769 0 n 
-628816 0 n 
-628863 0 n 
-628910 0 n 
-629568 0 n 
-629615 0 n 
-629662 0 n 
-639439 0 n 
-639824 0 n 
-639872 0 n 
-639920 0 n 
-639968 0 n 
-640016 0 n 
-640064 0 n 
-640299 0 n 
-640533 0 n 
-648407 0 n 
-648808 0 n 
-648855 0 n 
-648902 0 n 
-648949 0 n 
-648996 0 n 
-649043 0 n 
-649285 0 n 
-649526 0 n 
-649573 0 n 
-649817 0 n 
-650060 0 n 
-650106 0 n 
-658119 0 n 
-658491 0 n 
-658538 0 n 
-658585 0 n 
-659127 0 n 
-659174 0 n 
-659221 0 n 
-659267 0 n 
-659313 0 n 
-667568 0 n 
-667927 0 n 
-667974 0 n 
-668021 0 n 
-668068 0 n 
-668115 0 n 
-668160 0 n 
-678611 0 n 
-678996 0 n 
-679043 0 n 
-679608 0 n 
-679803 0 n 
-679996 0 n 
-680043 0 n 
-680090 0 n 
-680137 0 n 
-687376 0 n 
-687735 0 n 
-687782 0 n 
-687829 0 n 
-687876 0 n 
-687923 0 n 
-687970 0 n 
-688016 0 n 
-688063 0 n 
-696982 0 n 
-697341 0 n 
-697388 0 n 
-697435 0 n 
-697482 0 n 
-697529 0 n 
-697576 0 n 
-706548 0 n 
-706907 0 n 
-706953 0 n 
-707000 0 n 
-707047 0 n 
-707094 0 n 
-707141 0 n 
-716019 0 n 
-716378 0 n 
-716424 0 n 
-717066 0 n 

[45/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/apidocs/src-html/org/apache/hadoop/hbase/client/Get.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Get.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/Get.html
index 438db17..48420d2 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/Get.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Get.html
@@ -275,297 +275,321 @@
 267  /**
 268   * Get all available versions.
 269   * @return this for invocation 
chaining
-270   */
-271  public Get setMaxVersions() {
-272this.maxVersions = 
Integer.MAX_VALUE;
-273return this;
-274  }
-275
-276  /**
-277   * Get up to the specified number of 
versions of each column.
-278   * @param maxVersions maximum versions 
for each column
-279   * @throws IOException if invalid 
number of versions
-280   * @return this for invocation 
chaining
-281   */
-282  public Get setMaxVersions(int 
maxVersions) throws IOException {
-283if(maxVersions = 0) {
-284  throw new IOException("maxVersions 
must be positive");
-285}
-286this.maxVersions = maxVersions;
-287return this;
-288  }
-289
-290  public Get 
setLoadColumnFamiliesOnDemand(boolean value) {
-291return (Get) 
super.setLoadColumnFamiliesOnDemand(value);
-292  }
-293
-294  /**
-295   * Set the maximum number of values to 
return per row per Column Family
-296   * @param limit the maximum number of 
values returned / row / CF
-297   * @return this for invocation 
chaining
-298   */
-299  public Get 
setMaxResultsPerColumnFamily(int limit) {
-300this.storeLimit = limit;
-301return this;
-302  }
-303
-304  /**
-305   * Set offset for the row per Column 
Family. This offset is only within a particular row/CF
-306   * combination. It gets reset back to 
zero when we move to the next row or CF.
-307   * @param offset is the number of kvs 
that will be skipped.
-308   * @return this for invocation 
chaining
-309   */
-310  public Get 
setRowOffsetPerColumnFamily(int offset) {
-311this.storeOffset = offset;
-312return this;
-313  }
-314
-315  @Override
-316  public Get setFilter(Filter filter) {
-317super.setFilter(filter);
-318return this;
-319  }
-320
-321  /* Accessors */
-322
-323  /**
-324   * Set whether blocks should be cached 
for this Get.
-325   * p
-326   * This is true by default.  When true, 
default settings of the table and
-327   * family are used (this will never 
override caching blocks if the block
-328   * cache is disabled for that family or 
entirely).
-329   *
-330   * @param cacheBlocks if false, default 
settings are overridden and blocks
-331   * will not be cached
-332   */
-333  public Get setCacheBlocks(boolean 
cacheBlocks) {
-334this.cacheBlocks = cacheBlocks;
-335return this;
-336  }
-337
-338  /**
-339   * Get whether blocks should be cached 
for this Get.
-340   * @return true if default caching 
should be used, false if blocks should not
-341   * be cached
-342   */
-343  public boolean getCacheBlocks() {
-344return cacheBlocks;
-345  }
+270   * @deprecated It is easy to 
misunderstand with column family's max versions, so use
+271   * {@link 
#readAllVersions()} instead.
+272   */
+273  @Deprecated
+274  public Get setMaxVersions() {
+275return readAllVersions();
+276  }
+277
+278  /**
+279   * Get up to the specified number of 
versions of each column.
+280   * @param maxVersions maximum versions 
for each column
+281   * @throws IOException if invalid 
number of versions
+282   * @return this for invocation 
chaining
+283   * @deprecated It is easy to 
misunderstand with column family's max versions, so use
+284   * {@link 
#readVersions(int)} instead.
+285   */
+286  @Deprecated
+287  public Get setMaxVersions(int 
maxVersions) throws IOException {
+288return readVersions(maxVersions);
+289  }
+290
+291  /**
+292   * Get all available versions.
+293   * @return this for invocation 
chaining
+294   */
+295  public Get readAllVersions() {
+296this.maxVersions = 
Integer.MAX_VALUE;
+297return this;
+298  }
+299
+300  /**
+301   * Get up to the specified number of 
versions of each column.
+302   * @param versions specified number of 
versions for each column
+303   * @throws IOException if invalid 
number of versions
+304   * @return this for invocation 
chaining
+305   */
+306  public Get readVersions(int versions) 
throws IOException {
+307if (versions = 0) {
+308  throw new IOException("versions 
must be positive");
+309}
+310this.maxVersions = versions;
+311return this;
+312  }
+313
+314  public Get 
setLoadColumnFamiliesOnDemand(boolean value) {
+315return (Get) 
super.setLoadColumnFamiliesOnDemand(value);
+316  }
+317
+318  /**
+319   * Set the maximum number of values to 
return per row per Column Family
+320   * @param limit the maximum number of 
values returned / row / CF
+321   * 

[11/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/src-html/org/apache/hadoop/hbase/client/Scan.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Scan.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Scan.html
index 8f0943d..63ba3f6 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Scan.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Scan.html
@@ -601,634 +601,657 @@
 593  /**
 594   * Get all available versions.
 595   * @return this
-596   */
-597  public Scan setMaxVersions() {
-598this.maxVersions = 
Integer.MAX_VALUE;
-599return this;
-600  }
-601
-602  /**
-603   * Get up to the specified number of 
versions of each column.
-604   * @param maxVersions maximum versions 
for each column
-605   * @return this
-606   */
-607  public Scan setMaxVersions(int 
maxVersions) {
-608this.maxVersions = maxVersions;
-609return this;
-610  }
-611
-612  /**
-613   * Set the maximum number of cells to 
return for each call to next(). Callers should be aware
-614   * that this is not equivalent to 
calling {@link #setAllowPartialResults(boolean)}.
-615   * If you don't allow partial results, 
the number of cells in each Result must equal to your
-616   * batch setting unless it is the last 
Result for current row. So this method is helpful in paging
-617   * queries. If you just want to prevent 
OOM at client, use setAllowPartialResults(true) is better.
-618   * @param batch the maximum number of 
values
-619   * @see 
Result#mayHaveMoreCellsInRow()
-620   */
-621  public Scan setBatch(int batch) {
-622if (this.hasFilter()  
this.filter.hasFilterRow()) {
-623  throw new 
IncompatibleFilterException(
-624"Cannot set batch on a scan using 
a filter" +
-625" that returns true for 
filter.hasFilterRow");
-626}
-627this.batch = batch;
-628return this;
-629  }
-630
-631  /**
-632   * Set the maximum number of values to 
return per row per Column Family
-633   * @param limit the maximum number of 
values returned / row / CF
-634   */
-635  public Scan 
setMaxResultsPerColumnFamily(int limit) {
-636this.storeLimit = limit;
-637return this;
-638  }
-639
-640  /**
-641   * Set offset for the row per Column 
Family.
-642   * @param offset is the number of kvs 
that will be skipped.
+596   * @deprecated It is easy to 
misunderstand with column family's max versions, so use
+597   * {@link 
#readAllVersions()} instead.
+598   */
+599  @Deprecated
+600  public Scan setMaxVersions() {
+601return readAllVersions();
+602  }
+603
+604  /**
+605   * Get up to the specified number of 
versions of each column.
+606   * @param maxVersions maximum versions 
for each column
+607   * @return this
+608   * @deprecated It is easy to 
misunderstand with column family's max versions, so use
+609   * {@link 
#readVersions(int)} instead.
+610   */
+611  @Deprecated
+612  public Scan setMaxVersions(int 
maxVersions) {
+613return readVersions(maxVersions);
+614  }
+615
+616  /**
+617   * Get all available versions.
+618   * @return this
+619   */
+620  public Scan readAllVersions() {
+621this.maxVersions = 
Integer.MAX_VALUE;
+622return this;
+623  }
+624
+625  /**
+626   * Get up to the specified number of 
versions of each column.
+627   * @param versions specified number of 
versions for each column
+628   * @return this
+629   */
+630  public Scan readVersions(int versions) 
{
+631this.maxVersions = versions;
+632return this;
+633  }
+634
+635  /**
+636   * Set the maximum number of cells to 
return for each call to next(). Callers should be aware
+637   * that this is not equivalent to 
calling {@link #setAllowPartialResults(boolean)}.
+638   * If you don't allow partial results, 
the number of cells in each Result must equal to your
+639   * batch setting unless it is the last 
Result for current row. So this method is helpful in paging
+640   * queries. If you just want to prevent 
OOM at client, use setAllowPartialResults(true) is better.
+641   * @param batch the maximum number of 
values
+642   * @see 
Result#mayHaveMoreCellsInRow()
 643   */
-644  public Scan 
setRowOffsetPerColumnFamily(int offset) {
-645this.storeOffset = offset;
-646return this;
-647  }
-648
-649  /**
-650   * Set the number of rows for caching 
that will be passed to scanners.
-651   * If not set, the Configuration 
setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will
-652   * apply.
-653   * Higher caching values will enable 
faster scanners but will use more memory.
-654   * @param caching the number of rows 
for caching
-655   */
-656  public Scan setCaching(int caching) {
-657this.caching = caching;
-658return this;
-659  }
-660
-661  /**
-662   * @return the maximum result size in 
bytes. See {@link #setMaxResultSize(long)}
-663   */
-664  public long getMaxResultSize() {
-665

[06/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.html
index 291256a..752a26f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.html
@@ -30,151 +30,151 @@
 022import java.io.IOException;
 023import java.io.InputStream;
 024import java.io.OutputStream;
-025import 
java.util.concurrent.atomic.AtomicBoolean;
-026
-027import org.apache.commons.logging.Log;
-028import 
org.apache.commons.logging.LogFactory;
-029import 
org.apache.hadoop.hbase.HConstants;
-030import 
org.apache.hadoop.hbase.HRegionInfo;
-031import 
org.apache.hadoop.hbase.NotServingRegionException;
-032import 
org.apache.hadoop.hbase.ServerName;
-033import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-034import 
org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
-035import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-036import 
org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
-037import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-038import 
org.apache.hadoop.hbase.master.procedure.ServerCrashException;
-039import 
org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher.RegionCloseOperation;
-040import 
org.apache.hadoop.hbase.master.RegionState.State;
-041import 
org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
-042import 
org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation;
-043import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-044import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState;
-045import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData;
-046import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-047import 
org.apache.hadoop.hbase.regionserver.RegionServerAbortedException;
-048import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-049
+025import java.net.ConnectException;
+026import 
java.util.concurrent.atomic.AtomicBoolean;
+027
+028import org.apache.commons.logging.Log;
+029import 
org.apache.commons.logging.LogFactory;
+030import 
org.apache.hadoop.hbase.HConstants;
+031import 
org.apache.hadoop.hbase.HRegionInfo;
+032import 
org.apache.hadoop.hbase.NotServingRegionException;
+033import 
org.apache.hadoop.hbase.ServerName;
+034import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+035import 
org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
+036import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
+037import 
org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
+038import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+039import 
org.apache.hadoop.hbase.master.procedure.ServerCrashException;
+040import 
org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher.RegionCloseOperation;
+041import 
org.apache.hadoop.hbase.master.RegionState.State;
+042import 
org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
+043import 
org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation;
+044import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+045import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState;
+046import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData;
+047import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
+048import 
org.apache.hadoop.hbase.regionserver.RegionServerAbortedException;
+049import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
 050
-051/**
-052 * Procedure that describe the 
unassignment of a single region.
-053 * There can only be one 
RegionTransitionProcedure per region running at the time,
-054 * since each procedure takes a lock on 
the region.
-055 *
-056 * pThe Unassign starts by 
placing a "close region" request in the Remote Dispatcher
-057 * queue, and the procedure will then go 
into a "waiting state".
-058 * The Remote Dispatcher will batch the 
various requests for that server and
-059 * they will be sent to the RS for 
execution.
-060 * The RS will complete the open 
operation by calling master.reportRegionStateTransition().
-061 * The AM will intercept the transition 
report, and notify the procedure.
-062 * The procedure will finish the unassign 
by publishing its new state on meta
-063 * or it will retry the unassign.
-064 */

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html
index 1a130a3..7e8fff8 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html
@@ -1761,7 +1761,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.h
 
 
 writeRegionInfoOnFilesystem
-privatevoidwriteRegionInfoOnFilesystem(booleanuseTempDir)
+privatevoidwriteRegionInfoOnFilesystem(booleanuseTempDir)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Write out an info file under the region directory. Useful 
recovering mangled regions.
 
@@ -1778,7 +1778,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.h
 
 
 writeRegionInfoOnFilesystem
-privatevoidwriteRegionInfoOnFilesystem(byte[]regionInfoContent,
+privatevoidwriteRegionInfoOnFilesystem(byte[]regionInfoContent,
  booleanuseTempDir)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Write out an info file under the region directory. Useful 
recovering mangled regions.
@@ -1797,7 +1797,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.h
 
 
 createRegionOnFileSystem
-public staticHRegionFileSystemcreateRegionOnFileSystem(org.apache.hadoop.conf.Configurationconf,
+public staticHRegionFileSystemcreateRegionOnFileSystem(org.apache.hadoop.conf.Configurationconf,
  
org.apache.hadoop.fs.FileSystemfs,
  
org.apache.hadoop.fs.PathtableDir,
  HRegionInforegionInfo)
@@ -1820,7 +1820,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.h
 
 
 openRegionFromFileSystem
-public staticHRegionFileSystemopenRegionFromFileSystem(org.apache.hadoop.conf.Configurationconf,
+public staticHRegionFileSystemopenRegionFromFileSystem(org.apache.hadoop.conf.Configurationconf,
  
org.apache.hadoop.fs.FileSystemfs,
  
org.apache.hadoop.fs.PathtableDir,
  HRegionInforegionInfo,
@@ -1845,7 +1845,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.h
 
 
 deleteRegionFromFileSystem
-public staticvoiddeleteRegionFromFileSystem(org.apache.hadoop.conf.Configurationconf,
+public staticvoiddeleteRegionFromFileSystem(org.apache.hadoop.conf.Configurationconf,
   
org.apache.hadoop.fs.FileSystemfs,
   
org.apache.hadoop.fs.PathtableDir,
   HRegionInforegionInfo)
@@ -1868,7 +1868,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.h
 
 
 createDir
-booleancreateDir(org.apache.hadoop.fs.Pathdir)
+booleancreateDir(org.apache.hadoop.fs.Pathdir)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Creates a directory. Assumes the user has already checked 
for this directory existence.
 
@@ -1888,7 +1888,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.h
 
 
 rename
-booleanrename(org.apache.hadoop.fs.Pathsrcpath,
+booleanrename(org.apache.hadoop.fs.Pathsrcpath,
org.apache.hadoop.fs.PathdstPath)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Renames a directory. Assumes the user has already checked 
for this directory existence.
@@ -1909,7 +1909,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.h
 
 
 deleteDir
-booleandeleteDir(org.apache.hadoop.fs.Pathdir)
+booleandeleteDir(org.apache.hadoop.fs.Pathdir)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Deletes a directory. Assumes the user has already checked 
for this directory existence.
 
@@ -1928,7 +1928,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.h
 
 
 sleepBeforeRetry
-privatevoidsleepBeforeRetry(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmsg,

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site c8e4f1987 -> 346adc371


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
index a701ece..26ee60a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
@@ -896,7376 +896,7383 @@
 888}
 889
 890// Write HRI to a file in case we 
need to recover hbase:meta
-891status.setStatus("Writing region info 
on filesystem");
-892fs.checkRegionInfoOnFilesystem();
-893
-894// Initialize all the HStores
-895status.setStatus("Initializing all 
the Stores");
-896long maxSeqId = 
initializeStores(reporter, status);
-897this.mvcc.advanceTo(maxSeqId);
-898if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
-899  ListStore stores = 
this.getStores();  // update the stores that we are replaying
-900  try {
-901for (Store store : stores) {
-902  ((HStore) 
store).startReplayingFromWAL();
-903}
-904// Recover any edits if 
available.
-905maxSeqId = Math.max(maxSeqId,
-906
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-907// Make sure mvcc is up to max.
-908this.mvcc.advanceTo(maxSeqId);
-909  } finally {
-910for (Store store : stores) {  
  // update the stores that we are done replaying
-911  
((HStore)store).stopReplayingFromWAL();
-912}
-913  }
-914
-915}
-916this.lastReplayedOpenRegionSeqId = 
maxSeqId;
-917
-918
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-919this.writestate.flushRequested = 
false;
-920this.writestate.compacting.set(0);
+891// Only the primary replica should 
write .regioninfo
+892if 
(this.getRegionInfo().getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
+893  status.setStatus("Writing region 
info on filesystem");
+894  fs.checkRegionInfoOnFilesystem();
+895} else {
+896  if (LOG.isDebugEnabled()) {
+897LOG.debug("Skipping creation of 
.regioninfo file for " + this.getRegionInfo());
+898  }
+899}
+900
+901// Initialize all the HStores
+902status.setStatus("Initializing all 
the Stores");
+903long maxSeqId = 
initializeStores(reporter, status);
+904this.mvcc.advanceTo(maxSeqId);
+905if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
+906  ListStore stores = 
this.getStores();  // update the stores that we are replaying
+907  try {
+908for (Store store : stores) {
+909  ((HStore) 
store).startReplayingFromWAL();
+910}
+911// Recover any edits if 
available.
+912maxSeqId = Math.max(maxSeqId,
+913
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
+914// Make sure mvcc is up to max.
+915this.mvcc.advanceTo(maxSeqId);
+916  } finally {
+917for (Store store : stores) {  
  // update the stores that we are done replaying
+918  
((HStore)store).stopReplayingFromWAL();
+919}
+920  }
 921
-922if (this.writestate.writesEnabled) 
{
-923  // Remove temporary data left over 
from old regions
-924  status.setStatus("Cleaning up 
temporary data from old regions");
-925  fs.cleanupTempDir();
-926}
-927
-928if (this.writestate.writesEnabled) 
{
-929  status.setStatus("Cleaning up 
detritus from prior splits");
-930  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-931  // these directories here on open.  
We may be opening a region that was
-932  // being split but we crashed in 
the middle of it all.
-933  fs.cleanupAnySplitDetritus();
-934  fs.cleanupMergesDir();
-935}
-936
-937// Initialize split policy
-938this.splitPolicy = 
RegionSplitPolicy.create(this, conf);
-939
-940// Initialize flush policy
-941this.flushPolicy = 
FlushPolicyFactory.create(this, conf);
-942
-943long lastFlushTime = 
EnvironmentEdgeManager.currentTime();
-944for (Store store: stores.values()) 
{
-945  
this.lastStoreFlushTimeMap.put(store, lastFlushTime);
-946}
-947
-948// Use maximum of log sequenceid or 
that which was found in stores
-949// (particularly if no recovered 
edits, seqid will be -1).
-950long nextSeqid = maxSeqId;
-951
-952// In distributedLogReplay mode, we 
don't know the last change sequence number because region
-953// 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html 
b/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html
index 7e3fe59..6867e53 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html
@@ -110,10 +110,11 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class TableStateManager
+public class TableStateManager
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 This is a helper class used to manage table states.
- States persisted in tableinfo and cached internally.
+ States persisted in tableinfo and cached internally.
+ TODO: Cache state. Cut down on meta looksups.
 
 
 
@@ -270,7 +271,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -279,7 +280,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 lock
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReadWriteLock.html?is-external=true;
 title="class or interface in java.util.concurrent.locks">ReadWriteLock lock
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReadWriteLock.html?is-external=true;
 title="class or interface in java.util.concurrent.locks">ReadWriteLock lock
 
 
 
@@ -288,7 +289,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 master
-private finalMasterServices master
+private finalMasterServices master
 
 
 
@@ -305,7 +306,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TableStateManager
-publicTableStateManager(MasterServicesmaster)
+publicTableStateManager(MasterServicesmaster)
 
 
 
@@ -322,7 +323,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 setTableState
-publicvoidsetTableState(TableNametableName,
+publicvoidsetTableState(TableNametableName,
   TableState.StatenewState)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Set table state to provided.
@@ -342,7 +343,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 setTableStateIfInStates
-publicTableState.StatesetTableStateIfInStates(TableNametableName,
+publicTableState.StatesetTableStateIfInStates(TableNametableName,
 TableState.StatenewState,
 TableState.State...states)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -366,7 +367,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 setTableStateIfNotInStates
-publicbooleansetTableStateIfNotInStates(TableNametableName,
+publicbooleansetTableStateIfNotInStates(TableNametableName,
   TableState.StatenewState,
   TableState.State...states)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -388,7 +389,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 isTableState
-publicbooleanisTableState(TableNametableName,
+publicbooleanisTableState(TableNametableName,
 TableState.State...states)
 
 
@@ -398,7 +399,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 setDeletedTable
-publicvoidsetDeletedTable(TableNametableName)
+publicvoidsetDeletedTable(TableNametableName)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -412,7 +413,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 isTablePresent
-publicbooleanisTablePresent(TableNametableName)
+publicbooleanisTablePresent(TableNametableName)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -426,7 +427,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getTablesInStates
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or 

[43/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.html
index 8f0943d..63ba3f6 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.html
@@ -601,634 +601,657 @@
 593  /**
 594   * Get all available versions.
 595   * @return this
-596   */
-597  public Scan setMaxVersions() {
-598this.maxVersions = 
Integer.MAX_VALUE;
-599return this;
-600  }
-601
-602  /**
-603   * Get up to the specified number of 
versions of each column.
-604   * @param maxVersions maximum versions 
for each column
-605   * @return this
-606   */
-607  public Scan setMaxVersions(int 
maxVersions) {
-608this.maxVersions = maxVersions;
-609return this;
-610  }
-611
-612  /**
-613   * Set the maximum number of cells to 
return for each call to next(). Callers should be aware
-614   * that this is not equivalent to 
calling {@link #setAllowPartialResults(boolean)}.
-615   * If you don't allow partial results, 
the number of cells in each Result must equal to your
-616   * batch setting unless it is the last 
Result for current row. So this method is helpful in paging
-617   * queries. If you just want to prevent 
OOM at client, use setAllowPartialResults(true) is better.
-618   * @param batch the maximum number of 
values
-619   * @see 
Result#mayHaveMoreCellsInRow()
-620   */
-621  public Scan setBatch(int batch) {
-622if (this.hasFilter()  
this.filter.hasFilterRow()) {
-623  throw new 
IncompatibleFilterException(
-624"Cannot set batch on a scan using 
a filter" +
-625" that returns true for 
filter.hasFilterRow");
-626}
-627this.batch = batch;
-628return this;
-629  }
-630
-631  /**
-632   * Set the maximum number of values to 
return per row per Column Family
-633   * @param limit the maximum number of 
values returned / row / CF
-634   */
-635  public Scan 
setMaxResultsPerColumnFamily(int limit) {
-636this.storeLimit = limit;
-637return this;
-638  }
-639
-640  /**
-641   * Set offset for the row per Column 
Family.
-642   * @param offset is the number of kvs 
that will be skipped.
+596   * @deprecated It is easy to 
misunderstand with column family's max versions, so use
+597   * {@link 
#readAllVersions()} instead.
+598   */
+599  @Deprecated
+600  public Scan setMaxVersions() {
+601return readAllVersions();
+602  }
+603
+604  /**
+605   * Get up to the specified number of 
versions of each column.
+606   * @param maxVersions maximum versions 
for each column
+607   * @return this
+608   * @deprecated It is easy to 
misunderstand with column family's max versions, so use
+609   * {@link 
#readVersions(int)} instead.
+610   */
+611  @Deprecated
+612  public Scan setMaxVersions(int 
maxVersions) {
+613return readVersions(maxVersions);
+614  }
+615
+616  /**
+617   * Get all available versions.
+618   * @return this
+619   */
+620  public Scan readAllVersions() {
+621this.maxVersions = 
Integer.MAX_VALUE;
+622return this;
+623  }
+624
+625  /**
+626   * Get up to the specified number of 
versions of each column.
+627   * @param versions specified number of 
versions for each column
+628   * @return this
+629   */
+630  public Scan readVersions(int versions) 
{
+631this.maxVersions = versions;
+632return this;
+633  }
+634
+635  /**
+636   * Set the maximum number of cells to 
return for each call to next(). Callers should be aware
+637   * that this is not equivalent to 
calling {@link #setAllowPartialResults(boolean)}.
+638   * If you don't allow partial results, 
the number of cells in each Result must equal to your
+639   * batch setting unless it is the last 
Result for current row. So this method is helpful in paging
+640   * queries. If you just want to prevent 
OOM at client, use setAllowPartialResults(true) is better.
+641   * @param batch the maximum number of 
values
+642   * @see 
Result#mayHaveMoreCellsInRow()
 643   */
-644  public Scan 
setRowOffsetPerColumnFamily(int offset) {
-645this.storeOffset = offset;
-646return this;
-647  }
-648
-649  /**
-650   * Set the number of rows for caching 
that will be passed to scanners.
-651   * If not set, the Configuration 
setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will
-652   * apply.
-653   * Higher caching values will enable 
faster scanners but will use more memory.
-654   * @param caching the number of rows 
for caching
-655   */
-656  public Scan setCaching(int caching) {
-657this.caching = caching;
-658return this;
-659  }
-660
-661  /**
-662   * @return the maximum result size in 
bytes. See {@link #setMaxResultSize(long)}
-663   */
-664  public long getMaxResultSize() {
-665return 

[51/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/346adc37
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/346adc37
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/346adc37

Branch: refs/heads/asf-site
Commit: 346adc371d7b7c2856ae87fadff5c646906f3e38
Parents: c8e4f19
Author: jenkins 
Authored: Fri Aug 11 15:05:42 2017 +
Committer: jenkins 
Committed: Fri Aug 11 15:05:42 2017 +

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf|  8756 +-
 apidocs/deprecated-list.html|   123 +-
 apidocs/index-all.html  |58 +-
 apidocs/org/apache/hadoop/hbase/client/Get.html |   177 +-
 .../org/apache/hadoop/hbase/client/Query.html   | 6 +-
 .../hadoop/hbase/client/Scan.ReadType.html  | 8 +-
 .../org/apache/hadoop/hbase/client/Scan.html|   270 +-
 .../org/apache/hadoop/hbase/client/Table.html   |   144 +-
 .../hadoop/hbase/client/TableBuilder.html   |22 +-
 .../hadoop/hbase/client/class-use/Get.html  |22 +-
 .../hadoop/hbase/client/class-use/Scan.html |22 +-
 .../hbase/client/class-use/TableBuilder.html| 7 -
 .../hadoop/hbase/rest/client/RemoteHTable.html  |   110 +-
 .../org/apache/hadoop/hbase/client/Get.html |   586 +-
 .../org/apache/hadoop/hbase/client/Query.html   | 6 +-
 .../hadoop/hbase/client/Scan.ReadType.html  |  1223 +-
 .../org/apache/hadoop/hbase/client/Scan.html|  1223 +-
 .../org/apache/hadoop/hbase/client/Table.html   |   869 +-
 .../hadoop/hbase/client/TableBuilder.html   |14 +-
 .../hadoop/hbase/rest/client/RemoteHTable.html  |   130 +-
 book.html   | 2 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   | 11986 +++---
 checkstyle.rss  |18 +-
 coc.html| 4 +-
 cygwin.html | 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/constant-values.html |20 +-
 devapidocs/deprecated-list.html |   301 +-
 devapidocs/index-all.html   |   122 +-
 .../hadoop/hbase/backup/package-tree.html   | 4 +-
 .../org/apache/hadoop/hbase/class-use/Cell.html |32 +-
 .../class-use/InterfaceAudience.Private.html| 2 +-
 .../hbase/classification/package-tree.html  | 6 +-
 .../BufferedMutatorImpl.QueueRowAccess.html |16 +-
 .../hbase/client/BufferedMutatorImpl.html   |52 +-
 .../org/apache/hadoop/hbase/client/Get.html |   177 +-
 .../org/apache/hadoop/hbase/client/HTable.html  |   380 +-
 .../hadoop/hbase/client/HTableWrapper.html  |   110 +-
 .../org/apache/hadoop/hbase/client/Query.html   | 6 +-
 .../hadoop/hbase/client/Scan.ReadType.html  | 8 +-
 .../org/apache/hadoop/hbase/client/Scan.html|   280 +-
 .../org/apache/hadoop/hbase/client/Table.html   |   144 +-
 .../hadoop/hbase/client/TableBuilder.html   |22 +-
 .../hadoop/hbase/client/TableBuilderBase.html   |52 +-
 .../hbase/client/class-use/BufferedMutator.html |10 +-
 .../client/class-use/BufferedMutatorImpl.html   |58 +-
 .../client/class-use/ClusterConnection.html |32 +-
 .../hadoop/hbase/client/class-use/Get.html  |22 +-
 .../RetriesExhaustedWithDetailsException.html   | 8 -
 .../hadoop/hbase/client/class-use/Scan.html |22 +-
 .../hbase/client/class-use/TableBuilder.html|11 -
 .../hadoop/hbase/client/package-tree.html   |26 +-
 .../apache/hadoop/hbase/client/package-use.html |   271 +-
 .../filter/class-use/Filter.ReturnCode.html |26 +-
 .../hadoop/hbase/filter/package-tree.html   |10 +-
 .../apache/hadoop/hbase/filter/package-use.html | 5 +
 .../hadoop/hbase/io/hfile/package-tree.html | 6 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   | 4 +-
 .../hadoop/hbase/mapreduce/package-tree.html| 2 +-
 .../hadoop/hbase/master/TableStateManager.html  |37 +-
 .../master/assignment/AssignProcedure.html  |56 +-
 .../assignment/RegionTransitionProcedure.html   |   104 +-
 .../master/assignment/UnassignProcedure.html|89 +-
 .../class-use/RegionStates.RegionStateNode.html | 6 +-
 .../class-use/RegionTransitionProcedure.html| 2 +-
 .../master/assignment/package-summary.html  | 2 +-
 .../hbase/master/assignment/package-use.html| 2 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
index a701ece..26ee60a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
@@ -896,7376 +896,7383 @@
 888}
 889
 890// Write HRI to a file in case we 
need to recover hbase:meta
-891status.setStatus("Writing region info 
on filesystem");
-892fs.checkRegionInfoOnFilesystem();
-893
-894// Initialize all the HStores
-895status.setStatus("Initializing all 
the Stores");
-896long maxSeqId = 
initializeStores(reporter, status);
-897this.mvcc.advanceTo(maxSeqId);
-898if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
-899  ListStore stores = 
this.getStores();  // update the stores that we are replaying
-900  try {
-901for (Store store : stores) {
-902  ((HStore) 
store).startReplayingFromWAL();
-903}
-904// Recover any edits if 
available.
-905maxSeqId = Math.max(maxSeqId,
-906
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-907// Make sure mvcc is up to max.
-908this.mvcc.advanceTo(maxSeqId);
-909  } finally {
-910for (Store store : stores) {  
  // update the stores that we are done replaying
-911  
((HStore)store).stopReplayingFromWAL();
-912}
-913  }
-914
-915}
-916this.lastReplayedOpenRegionSeqId = 
maxSeqId;
-917
-918
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-919this.writestate.flushRequested = 
false;
-920this.writestate.compacting.set(0);
+891// Only the primary replica should 
write .regioninfo
+892if 
(this.getRegionInfo().getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
+893  status.setStatus("Writing region 
info on filesystem");
+894  fs.checkRegionInfoOnFilesystem();
+895} else {
+896  if (LOG.isDebugEnabled()) {
+897LOG.debug("Skipping creation of 
.regioninfo file for " + this.getRegionInfo());
+898  }
+899}
+900
+901// Initialize all the HStores
+902status.setStatus("Initializing all 
the Stores");
+903long maxSeqId = 
initializeStores(reporter, status);
+904this.mvcc.advanceTo(maxSeqId);
+905if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
+906  ListStore stores = 
this.getStores();  // update the stores that we are replaying
+907  try {
+908for (Store store : stores) {
+909  ((HStore) 
store).startReplayingFromWAL();
+910}
+911// Recover any edits if 
available.
+912maxSeqId = Math.max(maxSeqId,
+913
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
+914// Make sure mvcc is up to max.
+915this.mvcc.advanceTo(maxSeqId);
+916  } finally {
+917for (Store store : stores) {  
  // update the stores that we are done replaying
+918  
((HStore)store).stopReplayingFromWAL();
+919}
+920  }
 921
-922if (this.writestate.writesEnabled) 
{
-923  // Remove temporary data left over 
from old regions
-924  status.setStatus("Cleaning up 
temporary data from old regions");
-925  fs.cleanupTempDir();
-926}
-927
-928if (this.writestate.writesEnabled) 
{
-929  status.setStatus("Cleaning up 
detritus from prior splits");
-930  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-931  // these directories here on open.  
We may be opening a region that was
-932  // being split but we crashed in 
the middle of it all.
-933  fs.cleanupAnySplitDetritus();
-934  fs.cleanupMergesDir();
-935}
-936
-937// Initialize split policy
-938this.splitPolicy = 
RegionSplitPolicy.create(this, conf);
-939
-940// Initialize flush policy
-941this.flushPolicy = 
FlushPolicyFactory.create(this, conf);
-942
-943long lastFlushTime = 
EnvironmentEdgeManager.currentTime();
-944for (Store store: stores.values()) 
{
-945  
this.lastStoreFlushTimeMap.put(store, lastFlushTime);
-946}
-947
-948// Use maximum of log sequenceid or 
that which was found in stores
-949// (particularly if no recovered 
edits, seqid will be -1).
-950long nextSeqid = maxSeqId;
-951
-952// In distributedLogReplay mode, we 
don't know the last change sequence number because region
-953// is opened before recovery 
completes. So we add a safety bumper to avoid new sequence number
-954// overlaps used sequence numbers

[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
index 8e52a62..0ae39da 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -902,56 +902,60 @@ implements 
 long
+getTotalRowActionRequestCount()
+
+
+long
 getTotalStaticBloomSize()
 Get the size (in bytes) of the static bloom filters.
 
 
-
+
 long
 getTotalStaticIndexSize()
 Get the size (in bytes) of of the static indexes including 
the roots.
 
 
-
+
 long
 getTrailerHitCount()
 
-
+
 long
 getTrailerMissCount()
 
-
+
 long
 getUpdatesBlockedTime()
 Get the amount of time that updates were blocked.
 
 
-
+
 long
 getWALFileSize()
 Get the size of WAL files of this region server.
 
 
-
+
 long
 getWriteRequestsCount()
 Get the number of write requests to regions hosted on this 
region server.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 getZookeeperQuorum()
 Get the ZooKeeper Quorum Info
 
 
-
+
 private void
 initBlockCache()
 It's possible that due to threading the block cache could 
not be initialized
  yet (testing multiple region servers in one jvm).
 
 
-
+
 private void
 initMobFileCache()
 Initializes the mob file cache.
@@ -1635,13 +1639,26 @@ implements 
+
+
+
+
+getTotalRowActionRequestCount
+publiclonggetTotalRowActionRequestCount()
+
+Specified by:
+getTotalRowActionRequestCountin
 interfaceMetricsRegionServerWrapper
+
+
+
 
 
 
 
 
 getSplitQueueSize
-publicintgetSplitQueueSize()
+publicintgetSplitQueueSize()
 Description copied from 
interface:MetricsRegionServerWrapper
 Get the size of the split queue
 
@@ -1656,7 +1673,7 @@ implements 
 
 getCompactionQueueSize
-publicintgetCompactionQueueSize()
+publicintgetCompactionQueueSize()
 Description copied from 
interface:MetricsRegionServerWrapper
 Get the size of the compaction queue
 
@@ -1671,7 +1688,7 @@ implements 
 
 getSmallCompactionQueueSize
-publicintgetSmallCompactionQueueSize()
+publicintgetSmallCompactionQueueSize()
 
 Specified by:
 getSmallCompactionQueueSizein
 interfaceMetricsRegionServerWrapper
@@ -1684,7 +1701,7 @@ implements 
 
 getLargeCompactionQueueSize
-publicintgetLargeCompactionQueueSize()
+publicintgetLargeCompactionQueueSize()
 
 Specified by:
 getLargeCompactionQueueSizein
 interfaceMetricsRegionServerWrapper
@@ -1697,7 +1714,7 @@ implements 
 
 getFlushQueueSize
-publicintgetFlushQueueSize()
+publicintgetFlushQueueSize()
 Description copied from 
interface:MetricsRegionServerWrapper
 Get the 

[02/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
index a701ece..26ee60a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
@@ -896,7376 +896,7383 @@
 888}
 889
 890// Write HRI to a file in case we 
need to recover hbase:meta
-891status.setStatus("Writing region info 
on filesystem");
-892fs.checkRegionInfoOnFilesystem();
-893
-894// Initialize all the HStores
-895status.setStatus("Initializing all 
the Stores");
-896long maxSeqId = 
initializeStores(reporter, status);
-897this.mvcc.advanceTo(maxSeqId);
-898if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
-899  ListStore stores = 
this.getStores();  // update the stores that we are replaying
-900  try {
-901for (Store store : stores) {
-902  ((HStore) 
store).startReplayingFromWAL();
-903}
-904// Recover any edits if 
available.
-905maxSeqId = Math.max(maxSeqId,
-906
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-907// Make sure mvcc is up to max.
-908this.mvcc.advanceTo(maxSeqId);
-909  } finally {
-910for (Store store : stores) {  
  // update the stores that we are done replaying
-911  
((HStore)store).stopReplayingFromWAL();
-912}
-913  }
-914
-915}
-916this.lastReplayedOpenRegionSeqId = 
maxSeqId;
-917
-918
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-919this.writestate.flushRequested = 
false;
-920this.writestate.compacting.set(0);
+891// Only the primary replica should 
write .regioninfo
+892if 
(this.getRegionInfo().getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
+893  status.setStatus("Writing region 
info on filesystem");
+894  fs.checkRegionInfoOnFilesystem();
+895} else {
+896  if (LOG.isDebugEnabled()) {
+897LOG.debug("Skipping creation of 
.regioninfo file for " + this.getRegionInfo());
+898  }
+899}
+900
+901// Initialize all the HStores
+902status.setStatus("Initializing all 
the Stores");
+903long maxSeqId = 
initializeStores(reporter, status);
+904this.mvcc.advanceTo(maxSeqId);
+905if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
+906  ListStore stores = 
this.getStores();  // update the stores that we are replaying
+907  try {
+908for (Store store : stores) {
+909  ((HStore) 
store).startReplayingFromWAL();
+910}
+911// Recover any edits if 
available.
+912maxSeqId = Math.max(maxSeqId,
+913
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
+914// Make sure mvcc is up to max.
+915this.mvcc.advanceTo(maxSeqId);
+916  } finally {
+917for (Store store : stores) {  
  // update the stores that we are done replaying
+918  
((HStore)store).stopReplayingFromWAL();
+919}
+920  }
 921
-922if (this.writestate.writesEnabled) 
{
-923  // Remove temporary data left over 
from old regions
-924  status.setStatus("Cleaning up 
temporary data from old regions");
-925  fs.cleanupTempDir();
-926}
-927
-928if (this.writestate.writesEnabled) 
{
-929  status.setStatus("Cleaning up 
detritus from prior splits");
-930  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-931  // these directories here on open.  
We may be opening a region that was
-932  // being split but we crashed in 
the middle of it all.
-933  fs.cleanupAnySplitDetritus();
-934  fs.cleanupMergesDir();
-935}
-936
-937// Initialize split policy
-938this.splitPolicy = 
RegionSplitPolicy.create(this, conf);
-939
-940// Initialize flush policy
-941this.flushPolicy = 
FlushPolicyFactory.create(this, conf);
-942
-943long lastFlushTime = 
EnvironmentEdgeManager.currentTime();
-944for (Store store: stores.values()) 
{
-945  
this.lastStoreFlushTimeMap.put(store, lastFlushTime);
-946}
-947
-948// Use maximum of log sequenceid or 
that which was found in stores
-949// (particularly if no recovered 
edits, seqid will be -1).
-950long nextSeqid = maxSeqId;
-951
-952// In distributedLogReplay mode, we 
don't know the last change sequence number because region
-953// is opened before recovery 
completes. So we add a safety bumper to avoid new sequence number
-954// overlaps used sequence numbers
-955if 

[42/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/apidocs/src-html/org/apache/hadoop/hbase/client/Table.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Table.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/Table.html
index 515b2b7..2e3444d 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/Table.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Table.html
@@ -217,465 +217,436 @@
 209  /**
 210   * Puts some data in the table, in 
batch.
 211   * p
-212   * This can be used for group commit, 
or for submitting user defined
-213   * batches.  The writeBuffer will be 
periodically inspected while the List
-214   * is processed, so depending on the 
List size the writeBuffer may flush
-215   * not at all, or more than once.
-216   * @param puts The list of mutations to 
apply. The batch put is done by
-217   * aggregating the iteration of the 
Puts over the write buffer
-218   * at the client-side for a single RPC 
call.
-219   * @throws IOException if a remote or 
network exception occurs.
-220   * @since 0.20.0
-221   */
-222  void put(ListPut puts) throws 
IOException;
-223
-224  /**
-225   * Atomically checks if a 
row/family/qualifier value matches the expected
-226   * value. If it does, it adds the put.  
If the passed value is null, the check
-227   * is for the lack of column (ie: 
non-existance)
-228   *
-229   * @param row to check
-230   * @param family column family to 
check
-231   * @param qualifier column qualifier to 
check
-232   * @param value the expected value
-233   * @param put data to put if check 
succeeds
-234   * @throws IOException e
-235   * @return true if the new put was 
executed, false otherwise
-236   */
-237  boolean checkAndPut(byte[] row, byte[] 
family, byte[] qualifier,
-238byte[] value, Put put) throws 
IOException;
-239
-240  /**
-241   * Atomically checks if a 
row/family/qualifier value matches the expected
-242   * value. If it does, it adds the put.  
If the passed value is null, the check
-243   * is for the lack of column (ie: 
non-existence)
+212   * This can be used for group commit, 
or for submitting user defined batches.
+213   * @param puts The list of mutations to 
apply.
+214   * @throws IOException if a remote or 
network exception occurs.
+215   * @since 0.20.0
+216   */
+217  void put(ListPut puts) throws 
IOException;
+218
+219  /**
+220   * Atomically checks if a 
row/family/qualifier value matches the expected
+221   * value. If it does, it adds the put.  
If the passed value is null, the check
+222   * is for the lack of column (ie: 
non-existance)
+223   *
+224   * @param row to check
+225   * @param family column family to 
check
+226   * @param qualifier column qualifier to 
check
+227   * @param value the expected value
+228   * @param put data to put if check 
succeeds
+229   * @throws IOException e
+230   * @return true if the new put was 
executed, false otherwise
+231   */
+232  boolean checkAndPut(byte[] row, byte[] 
family, byte[] qualifier,
+233byte[] value, Put put) throws 
IOException;
+234
+235  /**
+236   * Atomically checks if a 
row/family/qualifier value matches the expected
+237   * value. If it does, it adds the put.  
If the passed value is null, the check
+238   * is for the lack of column (ie: 
non-existence)
+239   *
+240   * The expected value argument of this 
call is on the left and the current
+241   * value of the cell is on the right 
side of the comparison operator.
+242   *
+243   * Ie. eg. GREATER operator means 
expected value  existing = add the put.
 244   *
-245   * The expected value argument of this 
call is on the left and the current
-246   * value of the cell is on the right 
side of the comparison operator.
-247   *
-248   * Ie. eg. GREATER operator means 
expected value  existing = add the put.
-249   *
-250   * @param row to check
-251   * @param family column family to 
check
-252   * @param qualifier column qualifier to 
check
-253   * @param compareOp comparison operator 
to use
-254   * @param value the expected value
-255   * @param put data to put if check 
succeeds
-256   * @throws IOException e
-257   * @return true if the new put was 
executed, false otherwise
-258   */
-259  boolean checkAndPut(byte[] row, byte[] 
family, byte[] qualifier,
-260CompareFilter.CompareOp compareOp, 
byte[] value, Put put) throws IOException;
-261
-262  /**
-263   * Deletes the specified cells/row.
-264   *
-265   * @param delete The object that 
specifies what to delete.
-266   * @throws IOException if a remote or 
network exception occurs.
-267   * @since 0.20.0
-268   */
-269  void delete(Delete delete) throws 
IOException;
-270
-271  /**
-272   * Deletes the specified cells/rows in 
bulk.
-273   * @param deletes List of things to 
delete.  List gets modified by this
-274   * method (in particular it gets 
re-ordered, so the order in which the elements
-275   * are inserted 

hbase-site git commit: INFRA-10751 Empty commit

2017-08-11 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 346adc371 -> f322bf68e


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/f322bf68
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/f322bf68
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/f322bf68

Branch: refs/heads/asf-site
Commit: f322bf68e446ae539cfaa7cb7eb0611ebff04e22
Parents: 346adc3
Author: jenkins 
Authored: Fri Aug 11 15:06:21 2017 +
Committer: jenkins 
Committed: Fri Aug 11 15:06:21 2017 +

--

--




[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureCleaner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureCleaner.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureCleaner.html
index 904b921..3531f22 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureCleaner.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureCleaner.html
@@ -323,7 +323,7 @@
 315  @Override
 316  public void setMaxProcId(long 
maxProcId) {
 317assert lastProcId.get()  0 : 
"expected only one call to setMaxProcId()";
-318LOG.debug("Load maxProcId=" + 
maxProcId);
+318LOG.debug("Load max pid=" + 
maxProcId);
 319lastProcId.set(maxProcId);
 320  }
 321
@@ -735,7 +735,7 @@
 727   
!(procedures.containsKey(oldProcId) || completed.containsKey(oldProcId)) 

 728   
nonceKeysToProcIdsMap.containsKey(nonceKey)) {
 729  if (traceEnabled) {
-730LOG.trace("Waiting for procId=" + 
oldProcId.longValue() + " to be submitted");
+730LOG.trace("Waiting for pid=" + 
oldProcId.longValue() + " to be submitted");
 731  }
 732  Threads.sleep(100);
 733}
@@ -1007,9 +1007,9 @@
 999  public void removeResult(final long 
procId) {
 1000CompletedProcedureRetainer retainer 
= completed.get(procId);
 1001if (retainer == null) {
-1002  assert 
!procedures.containsKey(procId) : "procId=" + procId + " is still running";
+1002  assert 
!procedures.containsKey(procId) : "pid=" + procId + " is still running";
 1003  if (LOG.isDebugEnabled()) {
-1004LOG.debug("procId=" + procId + " 
already removed by the cleaner.");
+1004LOG.debug("pid=" + procId + " 
already removed by the cleaner.");
 1005  }
 1006  return;
 1007}
@@ -1357,7 +1357,7 @@
 1349  return 
LockState.LOCK_YIELD_WAIT;
 1350} catch (Throwable e) {
 1351  // Catch NullPointerExceptions or 
similar errors...
-1352  LOG.fatal("CODE-BUG: Uncaught 
runtime exception fo " + proc, e);
+1352  LOG.fatal("CODE-BUG: Uncaught 
runtime exception for " + proc, e);
 1353}
 1354
 1355// allows to kill the executor 
before something is stored to the wal.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
index 904b921..3531f22 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
@@ -323,7 +323,7 @@
 315  @Override
 316  public void setMaxProcId(long 
maxProcId) {
 317assert lastProcId.get()  0 : 
"expected only one call to setMaxProcId()";
-318LOG.debug("Load maxProcId=" + 
maxProcId);
+318LOG.debug("Load max pid=" + 
maxProcId);
 319lastProcId.set(maxProcId);
 320  }
 321
@@ -735,7 +735,7 @@
 727   
!(procedures.containsKey(oldProcId) || completed.containsKey(oldProcId)) 

 728   
nonceKeysToProcIdsMap.containsKey(nonceKey)) {
 729  if (traceEnabled) {
-730LOG.trace("Waiting for procId=" + 
oldProcId.longValue() + " to be submitted");
+730LOG.trace("Waiting for pid=" + 
oldProcId.longValue() + " to be submitted");
 731  }
 732  Threads.sleep(100);
 733}
@@ -1007,9 +1007,9 @@
 999  public void removeResult(final long 
procId) {
 1000CompletedProcedureRetainer retainer 
= completed.get(procId);
 1001if (retainer == null) {
-1002  assert 
!procedures.containsKey(procId) : "procId=" + procId + " is still running";
+1002  assert 
!procedures.containsKey(procId) : "pid=" + procId + " is still running";
 1003  if (LOG.isDebugEnabled()) {
-1004LOG.debug("procId=" + procId + " 
already removed by the cleaner.");
+1004LOG.debug("pid=" + procId + " 
already removed by the cleaner.");
 1005  }
 1006  return;
 1007}
@@ -1357,7 +1357,7 @@
 1349  return 
LockState.LOCK_YIELD_WAIT;
 1350} catch (Throwable e) {
 1351  // Catch NullPointerExceptions or 
similar errors...
-1352  LOG.fatal("CODE-BUG: Uncaught 
runtime exception fo " + proc, e);
+1352  LOG.fatal("CODE-BUG: Uncaught 
runtime exception for " + proc, e);
 1353}
 1354
 1355// allows to 

[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
index d8556be..d6a6b28 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
@@ -56,365 +56,371 @@
 048 * The AssignmentManager will notify this 
procedure when the RS completes
 049 * the operation and reports the 
transitioned state
 050 * (see the Assign and Unassign class for 
more detail).
-051 * pProcedures move from the 
REGION_TRANSITION_QUEUE state when they are
-052 * first submitted, to the 
REGION_TRANSITION_DISPATCH state when the request
-053 * to remote server is sent and the 
Procedure is suspended waiting on external
-054 * event to be woken again. Once the 
external event is triggered, Procedure
-055 * moves to the REGION_TRANSITION_FINISH 
state.
-056 *
-057 * pNOTE: {@link AssignProcedure} 
and {@link UnassignProcedure} should not be thought of
-058 * as being asymmetric, at least 
currently.
-059 * ul
-060 * li{@link AssignProcedure} 
moves through all the above described states and implements methods
-061 * associated with each while {@link 
UnassignProcedure} starts at state
-062 * REGION_TRANSITION_DISPATCH and state 
REGION_TRANSITION_QUEUE is not supported./li
-063 *
-064 * liWhen any step in {@link 
AssignProcedure} fails, failure handler
-065 * 
AssignProcedure#handleFailure(MasterProcedureEnv, RegionStateNode) re-attempts 
the
-066 * assignment by setting the procedure 
state to REGION_TRANSITION_QUEUE and forces
-067 * assignment to a different target 
server by setting {@link AssignProcedure#forceNewPlan}. When
-068 * the number of attempts reach hreshold 
configuration 'hbase.assignment.maximum.attempts',
-069 * the procedure is aborted. For {@link 
UnassignProcedure}, similar re-attempts are
-070 * intentionally not implemented. It is a 
'one shot' procedure.
-071 * /li
-072 * /ul
-073 *
-074 * pTODO: Considering it is a 
priority doing all we can to get make a region available as soon as possible,
-075 * re-attempting with any target makes 
sense if specified target fails in case of
-076 * {@link AssignProcedure}. For {@link 
UnassignProcedure}, if communication with RS fails,
-077 * similar re-attempt makes little sense 
(what should be different from previous attempt?). Also it
-078 * could be complex with current 
implementation of
-079 * {@link 
RegionTransitionProcedure#execute(MasterProcedureEnv)} and {@link 
UnassignProcedure}.
-080 * We have made a choice of keeping 
{@link UnassignProcedure} simple, where the procedure either
-081 * succeeds or fails depending on 
communication with RS. As parent will have broader context, parent
-082 * can better handle the failed instance 
of {@link UnassignProcedure}. Similar simplicity for
-083 * {@link AssignProcedure} is desired and 
should be explored/ discussed further.
-084 */
-085@InterfaceAudience.Private
-086public abstract class 
RegionTransitionProcedure
-087extends 
ProcedureMasterProcedureEnv
-088implements TableProcedureInterface,
-089  
RemoteProcedureMasterProcedureEnv, ServerName {
-090  private static final Log LOG = 
LogFactory.getLog(RegionTransitionProcedure.class);
-091
-092  protected final AtomicBoolean aborted = 
new AtomicBoolean(false);
-093
-094  private RegionTransitionState 
transitionState =
-095  
RegionTransitionState.REGION_TRANSITION_QUEUE;
-096  private HRegionInfo regionInfo;
-097  private volatile boolean lock = 
false;
-098
-099  public RegionTransitionProcedure() {
-100// Required by the Procedure 
framework to create the procedure on replay
-101super();
+051 *
+052 * pProcedures move from the 
REGION_TRANSITION_QUEUE state when they are
+053 * first submitted, to the 
REGION_TRANSITION_DISPATCH state when the request
+054 * to remote server is sent and the 
Procedure is suspended waiting on external
+055 * event to be woken again. Once the 
external event is triggered, Procedure
+056 * moves to the REGION_TRANSITION_FINISH 
state.
+057 *
+058 * pNOTE: {@link AssignProcedure} 
and {@link UnassignProcedure} should not be thought of
+059 * as being asymmetric, at least 
currently.
+060 * ul
+061 * li{@link AssignProcedure} 
moves through all the above described states and implements methods
+062 * associated with each while {@link 
UnassignProcedure} starts at state
+063 * REGION_TRANSITION_DISPATCH and state 
REGION_TRANSITION_QUEUE is not supported./li
+064 *
+065 * liWhen any step in {@link 
AssignProcedure} fails, failure handler
+066 * 
AssignProcedure#handleFailure(MasterProcedureEnv, 

[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/client/class-use/TableBuilder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableBuilder.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableBuilder.html
index 5f469e7..02eb25f 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableBuilder.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableBuilder.html
@@ -151,17 +151,6 @@
 
 
 TableBuilder
-TableBuilder.setWriteBufferSize(longwriteBufferSize)
-Set the write buffer size which by default is specified by 
the
- hbase.client.write.buffer setting.
-
-
-
-TableBuilder
-TableBuilderBase.setWriteBufferSize(longwriteBufferSize)
-
-
-TableBuilder
 TableBuilder.setWriteRpcTimeout(inttimeout)
 Set timeout for each write(put, delete) rpc request.
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
index aad311e..36bf254 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
@@ -538,25 +538,25 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.client.Consistency
-org.apache.hadoop.hbase.client.AbstractResponse.ResponseType
-org.apache.hadoop.hbase.client.CompactType
 org.apache.hadoop.hbase.client.Durability
 org.apache.hadoop.hbase.client.SnapshotType
-org.apache.hadoop.hbase.client.RegionLocateType
-org.apache.hadoop.hbase.client.IsolationLevel
-org.apache.hadoop.hbase.client.Scan.ReadType
-org.apache.hadoop.hbase.client.AsyncRequestFutureImpl.Retry
-org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState
-org.apache.hadoop.hbase.client.CompactionState
-org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows
-org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanControllerState
 org.apache.hadoop.hbase.client.TableState.State
-org.apache.hadoop.hbase.client.MasterSwitchType
-org.apache.hadoop.hbase.client.RequestController.ReturnCode
+org.apache.hadoop.hbase.client.Consistency
+org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows
 org.apache.hadoop.hbase.client.MobCompactPartitionPolicy
+org.apache.hadoop.hbase.client.IsolationLevel
+org.apache.hadoop.hbase.client.RegionLocateType
+org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState
+org.apache.hadoop.hbase.client.CompactType
 org.apache.hadoop.hbase.client.HBaseAdmin.ReplicationState
 org.apache.hadoop.hbase.client.ScannerCallable.MoreResults
+org.apache.hadoop.hbase.client.MasterSwitchType
+org.apache.hadoop.hbase.client.RequestController.ReturnCode
+org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanControllerState
+org.apache.hadoop.hbase.client.AsyncRequestFutureImpl.Retry
+org.apache.hadoop.hbase.client.CompactionState
+org.apache.hadoop.hbase.client.AbstractResponse.ResponseType
+org.apache.hadoop.hbase.client.Scan.ReadType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/client/package-use.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/package-use.html 
b/devapidocs/org/apache/hadoop/hbase/client/package-use.html
index e6f355b..ff8714f 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/package-use.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/package-use.html
@@ -857,135 +857,128 @@ service.
 
 
 
-BufferedMutatorImpl
-
- Used to communicate with a single HBase table similar to Table
- but meant for batched, potentially asynchronous puts.
-
-
-
 BufferedMutatorImpl.QueueRowAccess
 
-
+
 BufferedMutatorParams
 Parameters for instantiating a BufferedMutator.
 
 
-
+
 Cancellable
 This should be implemented by the Get/Scan implementations 
that
  talk to replica regions.
 
 
-
+
 CancellableRegionServerCallable
 This class is used to unify HTable calls with AsyncProcess 
Framework.
 
 
-
+
 ClientScanner
 Implements the scanner interface for the HBase client.
 
 
-
+
 ClientServiceCallable
 A RegionServerCallable set to use the Client protocol.
 
 
-
+
 ClientSideRegionScanner
 A client scanner for a region opened for read-only 

[10/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/src-html/org/apache/hadoop/hbase/client/Table.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Table.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Table.html
index 515b2b7..2e3444d 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Table.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Table.html
@@ -217,465 +217,436 @@
 209  /**
 210   * Puts some data in the table, in 
batch.
 211   * p
-212   * This can be used for group commit, 
or for submitting user defined
-213   * batches.  The writeBuffer will be 
periodically inspected while the List
-214   * is processed, so depending on the 
List size the writeBuffer may flush
-215   * not at all, or more than once.
-216   * @param puts The list of mutations to 
apply. The batch put is done by
-217   * aggregating the iteration of the 
Puts over the write buffer
-218   * at the client-side for a single RPC 
call.
-219   * @throws IOException if a remote or 
network exception occurs.
-220   * @since 0.20.0
-221   */
-222  void put(ListPut puts) throws 
IOException;
-223
-224  /**
-225   * Atomically checks if a 
row/family/qualifier value matches the expected
-226   * value. If it does, it adds the put.  
If the passed value is null, the check
-227   * is for the lack of column (ie: 
non-existance)
-228   *
-229   * @param row to check
-230   * @param family column family to 
check
-231   * @param qualifier column qualifier to 
check
-232   * @param value the expected value
-233   * @param put data to put if check 
succeeds
-234   * @throws IOException e
-235   * @return true if the new put was 
executed, false otherwise
-236   */
-237  boolean checkAndPut(byte[] row, byte[] 
family, byte[] qualifier,
-238byte[] value, Put put) throws 
IOException;
-239
-240  /**
-241   * Atomically checks if a 
row/family/qualifier value matches the expected
-242   * value. If it does, it adds the put.  
If the passed value is null, the check
-243   * is for the lack of column (ie: 
non-existence)
+212   * This can be used for group commit, 
or for submitting user defined batches.
+213   * @param puts The list of mutations to 
apply.
+214   * @throws IOException if a remote or 
network exception occurs.
+215   * @since 0.20.0
+216   */
+217  void put(ListPut puts) throws 
IOException;
+218
+219  /**
+220   * Atomically checks if a 
row/family/qualifier value matches the expected
+221   * value. If it does, it adds the put.  
If the passed value is null, the check
+222   * is for the lack of column (ie: 
non-existance)
+223   *
+224   * @param row to check
+225   * @param family column family to 
check
+226   * @param qualifier column qualifier to 
check
+227   * @param value the expected value
+228   * @param put data to put if check 
succeeds
+229   * @throws IOException e
+230   * @return true if the new put was 
executed, false otherwise
+231   */
+232  boolean checkAndPut(byte[] row, byte[] 
family, byte[] qualifier,
+233byte[] value, Put put) throws 
IOException;
+234
+235  /**
+236   * Atomically checks if a 
row/family/qualifier value matches the expected
+237   * value. If it does, it adds the put.  
If the passed value is null, the check
+238   * is for the lack of column (ie: 
non-existence)
+239   *
+240   * The expected value argument of this 
call is on the left and the current
+241   * value of the cell is on the right 
side of the comparison operator.
+242   *
+243   * Ie. eg. GREATER operator means 
expected value  existing = add the put.
 244   *
-245   * The expected value argument of this 
call is on the left and the current
-246   * value of the cell is on the right 
side of the comparison operator.
-247   *
-248   * Ie. eg. GREATER operator means 
expected value  existing = add the put.
-249   *
-250   * @param row to check
-251   * @param family column family to 
check
-252   * @param qualifier column qualifier to 
check
-253   * @param compareOp comparison operator 
to use
-254   * @param value the expected value
-255   * @param put data to put if check 
succeeds
-256   * @throws IOException e
-257   * @return true if the new put was 
executed, false otherwise
-258   */
-259  boolean checkAndPut(byte[] row, byte[] 
family, byte[] qualifier,
-260CompareFilter.CompareOp compareOp, 
byte[] value, Put put) throws IOException;
-261
-262  /**
-263   * Deletes the specified cells/row.
-264   *
-265   * @param delete The object that 
specifies what to delete.
-266   * @throws IOException if a remote or 
network exception occurs.
-267   * @since 0.20.0
-268   */
-269  void delete(Delete delete) throws 
IOException;
-270
-271  /**
-272   * Deletes the specified cells/rows in 
bulk.
-273   * @param deletes List of things to 
delete.  List gets modified by this
-274   * method (in particular it gets 
re-ordered, so the order in which the elements
-275   

[15/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/src-html/org/apache/hadoop/hbase/client/Get.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Get.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Get.html
index 438db17..48420d2 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Get.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Get.html
@@ -275,297 +275,321 @@
 267  /**
 268   * Get all available versions.
 269   * @return this for invocation 
chaining
-270   */
-271  public Get setMaxVersions() {
-272this.maxVersions = 
Integer.MAX_VALUE;
-273return this;
-274  }
-275
-276  /**
-277   * Get up to the specified number of 
versions of each column.
-278   * @param maxVersions maximum versions 
for each column
-279   * @throws IOException if invalid 
number of versions
-280   * @return this for invocation 
chaining
-281   */
-282  public Get setMaxVersions(int 
maxVersions) throws IOException {
-283if(maxVersions = 0) {
-284  throw new IOException("maxVersions 
must be positive");
-285}
-286this.maxVersions = maxVersions;
-287return this;
-288  }
-289
-290  public Get 
setLoadColumnFamiliesOnDemand(boolean value) {
-291return (Get) 
super.setLoadColumnFamiliesOnDemand(value);
-292  }
-293
-294  /**
-295   * Set the maximum number of values to 
return per row per Column Family
-296   * @param limit the maximum number of 
values returned / row / CF
-297   * @return this for invocation 
chaining
-298   */
-299  public Get 
setMaxResultsPerColumnFamily(int limit) {
-300this.storeLimit = limit;
-301return this;
-302  }
-303
-304  /**
-305   * Set offset for the row per Column 
Family. This offset is only within a particular row/CF
-306   * combination. It gets reset back to 
zero when we move to the next row or CF.
-307   * @param offset is the number of kvs 
that will be skipped.
-308   * @return this for invocation 
chaining
-309   */
-310  public Get 
setRowOffsetPerColumnFamily(int offset) {
-311this.storeOffset = offset;
-312return this;
-313  }
-314
-315  @Override
-316  public Get setFilter(Filter filter) {
-317super.setFilter(filter);
-318return this;
-319  }
-320
-321  /* Accessors */
-322
-323  /**
-324   * Set whether blocks should be cached 
for this Get.
-325   * p
-326   * This is true by default.  When true, 
default settings of the table and
-327   * family are used (this will never 
override caching blocks if the block
-328   * cache is disabled for that family or 
entirely).
-329   *
-330   * @param cacheBlocks if false, default 
settings are overridden and blocks
-331   * will not be cached
-332   */
-333  public Get setCacheBlocks(boolean 
cacheBlocks) {
-334this.cacheBlocks = cacheBlocks;
-335return this;
-336  }
-337
-338  /**
-339   * Get whether blocks should be cached 
for this Get.
-340   * @return true if default caching 
should be used, false if blocks should not
-341   * be cached
-342   */
-343  public boolean getCacheBlocks() {
-344return cacheBlocks;
-345  }
+270   * @deprecated It is easy to 
misunderstand with column family's max versions, so use
+271   * {@link 
#readAllVersions()} instead.
+272   */
+273  @Deprecated
+274  public Get setMaxVersions() {
+275return readAllVersions();
+276  }
+277
+278  /**
+279   * Get up to the specified number of 
versions of each column.
+280   * @param maxVersions maximum versions 
for each column
+281   * @throws IOException if invalid 
number of versions
+282   * @return this for invocation 
chaining
+283   * @deprecated It is easy to 
misunderstand with column family's max versions, so use
+284   * {@link 
#readVersions(int)} instead.
+285   */
+286  @Deprecated
+287  public Get setMaxVersions(int 
maxVersions) throws IOException {
+288return readVersions(maxVersions);
+289  }
+290
+291  /**
+292   * Get all available versions.
+293   * @return this for invocation 
chaining
+294   */
+295  public Get readAllVersions() {
+296this.maxVersions = 
Integer.MAX_VALUE;
+297return this;
+298  }
+299
+300  /**
+301   * Get up to the specified number of 
versions of each column.
+302   * @param versions specified number of 
versions for each column
+303   * @throws IOException if invalid 
number of versions
+304   * @return this for invocation 
chaining
+305   */
+306  public Get readVersions(int versions) 
throws IOException {
+307if (versions = 0) {
+308  throw new IOException("versions 
must be positive");
+309}
+310this.maxVersions = versions;
+311return this;
+312  }
+313
+314  public Get 
setLoadColumnFamiliesOnDemand(boolean value) {
+315return (Get) 
super.setLoadColumnFamiliesOnDemand(value);
+316  }
+317
+318  /**
+319   * Set the maximum number of values to 
return per row per Column Family
+320   * @param limit the maximum number of 
values returned / row / 

[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/src-html/org/apache/hadoop/hbase/client/HTableWrapper.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HTableWrapper.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HTableWrapper.html
index d0faa65..965105d 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/HTableWrapper.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/HTableWrapper.html
@@ -279,71 +279,61 @@
 271  }
 272
 273  @Override
-274  public long getWriteBufferSize() {
-275 return table.getWriteBufferSize();
-276  }
-277
-278  @Override
-279  public void setWriteBufferSize(long 
writeBufferSize) throws IOException {
-280
table.setWriteBufferSize(writeBufferSize);
-281  }
-282
-283  @Override
-284  public R extends Message 
Mapbyte[], R batchCoprocessorService(
-285  MethodDescriptor methodDescriptor, 
Message request, byte[] startKey, byte[] endKey,
-286  R responsePrototype) throws 
ServiceException, Throwable {
-287return 
table.batchCoprocessorService(methodDescriptor, request, startKey, endKey,
-288  responsePrototype);
-289  }
-290
-291  @Override
-292  public R extends Message void 
batchCoprocessorService(MethodDescriptor methodDescriptor,
-293  Message request, byte[] startKey, 
byte[] endKey, R responsePrototype, CallbackR callback)
-294  throws ServiceException, Throwable 
{
-295
table.batchCoprocessorService(methodDescriptor, request, startKey, endKey, 
responsePrototype,
-296  callback);
-297  }
-298
-299  @Override
-300  public boolean checkAndMutate(byte[] 
row, byte[] family, byte[] qualifier,
-301  CompareOp compareOp, byte[] value, 
RowMutations rm) throws IOException {
-302return table.checkAndMutate(row, 
family, qualifier, compareOp, value, rm);
+274  public R extends Message 
Mapbyte[], R batchCoprocessorService(
+275  MethodDescriptor methodDescriptor, 
Message request, byte[] startKey, byte[] endKey,
+276  R responsePrototype) throws 
ServiceException, Throwable {
+277return 
table.batchCoprocessorService(methodDescriptor, request, startKey, endKey,
+278  responsePrototype);
+279  }
+280
+281  @Override
+282  public R extends Message void 
batchCoprocessorService(MethodDescriptor methodDescriptor,
+283  Message request, byte[] startKey, 
byte[] endKey, R responsePrototype, CallbackR callback)
+284  throws ServiceException, Throwable 
{
+285
table.batchCoprocessorService(methodDescriptor, request, startKey, endKey, 
responsePrototype,
+286  callback);
+287  }
+288
+289  @Override
+290  public boolean checkAndMutate(byte[] 
row, byte[] family, byte[] qualifier,
+291  CompareOp compareOp, byte[] value, 
RowMutations rm) throws IOException {
+292return table.checkAndMutate(row, 
family, qualifier, compareOp, value, rm);
+293  }
+294
+295  @Override
+296  public void setOperationTimeout(int 
operationTimeout) {
+297
table.setOperationTimeout(operationTimeout);
+298  }
+299
+300  @Override
+301  public int getOperationTimeout() {
+302return table.getOperationTimeout();
 303  }
 304
 305  @Override
-306  public void setOperationTimeout(int 
operationTimeout) {
-307
table.setOperationTimeout(operationTimeout);
-308  }
-309
-310  @Override
-311  public int getOperationTimeout() {
-312return table.getOperationTimeout();
-313  }
-314
-315  @Override
-316  @Deprecated
-317  public void setRpcTimeout(int 
rpcTimeout) {
-318table.setRpcTimeout(rpcTimeout);
-319  }
-320
-321  @Override
-322  public void setWriteRpcTimeout(int 
writeRpcTimeout) { table.setWriteRpcTimeout(writeRpcTimeout); }
-323
-324  @Override
-325  public void setReadRpcTimeout(int 
readRpcTimeout) { table.setReadRpcTimeout(readRpcTimeout); }
-326
-327  @Override
-328  @Deprecated
-329  public int getRpcTimeout() {
-330return table.getRpcTimeout();
-331  }
-332
-333  @Override
-334  public int getWriteRpcTimeout() { 
return table.getWriteRpcTimeout(); }
-335
-336  @Override
-337  public int getReadRpcTimeout() { return 
table.getReadRpcTimeout(); }
-338}
+306  @Deprecated
+307  public void setRpcTimeout(int 
rpcTimeout) {
+308table.setRpcTimeout(rpcTimeout);
+309  }
+310
+311  @Override
+312  public void setWriteRpcTimeout(int 
writeRpcTimeout) { table.setWriteRpcTimeout(writeRpcTimeout); }
+313
+314  @Override
+315  public void setReadRpcTimeout(int 
readRpcTimeout) { table.setReadRpcTimeout(readRpcTimeout); }
+316
+317  @Override
+318  @Deprecated
+319  public int getRpcTimeout() {
+320return table.getRpcTimeout();
+321  }
+322
+323  @Override
+324  public int getWriteRpcTimeout() { 
return table.getWriteRpcTimeout(); }
+325
+326  @Override
+327  public int getReadRpcTimeout() { return 
table.getReadRpcTimeout(); }
+328}
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/src-html/org/apache/hadoop/hbase/client/Query.html

[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/index-all.html
--
diff --git a/devapidocs/index-all.html b/devapidocs/index-all.html
index d3e74d7..8e77a9e 100644
--- a/devapidocs/index-all.html
+++ b/devapidocs/index-all.html
@@ -5638,6 +5638,8 @@
 
 beforeShipped()
 - Method in class org.apache.hadoop.hbase.regionserver.querymatcher.ScanWildcardColumnTracker
 
+beforeShipped()
 - Method in class org.apache.hadoop.hbase.regionserver.querymatcher.UserScanQueryMatcher
+
 beforeShipped()
 - Method in interface org.apache.hadoop.hbase.regionserver.ShipperListener
 
 The action that needs to be performed before Shipper.shipped()
 is performed
@@ -17530,6 +17532,8 @@
 
 count
 - Variable in class org.apache.hadoop.hbase.regionserver.querymatcher.ColumnCount
 
+count
 - Variable in class org.apache.hadoop.hbase.regionserver.querymatcher.UserScanQueryMatcher
+
 count
 - Variable in class org.apache.hadoop.metrics2.util.MetricSampleQuantiles
 
 Total number of items in stream
@@ -20204,6 +20208,8 @@
 
 curChunk
 - Variable in class org.apache.hadoop.hbase.regionserver.MemStoreLABImpl
 
+curColCell
 - Variable in class org.apache.hadoop.hbase.regionserver.querymatcher.UserScanQueryMatcher
+
 curFamily
 - Variable in class org.apache.hadoop.hbase.security.visibility.VisibilityLabelFilter
 
 curFamilyMaxVersions
 - Variable in class org.apache.hadoop.hbase.security.visibility.VisibilityLabelFilter
@@ -30635,8 +30641,6 @@
 
 flushCheckInterval
 - Variable in class org.apache.hadoop.hbase.regionserver.HRegion
 
-flushCommits()
 - Method in class org.apache.hadoop.hbase.client.HTable
-
 flushCommits()
 - Method in class org.apache.hadoop.hbase.rest.client.RemoteHTable
 
 flushConfig()
 - Method in class org.apache.hadoop.hbase.rsgroup.RSGroupInfoManagerImpl
@@ -33571,8 +33575,6 @@
 
 getBufferedMutator(TableName)
 - Method in class org.apache.hadoop.hbase.client.ConnectionImplementation
 
-getBufferedMutator()
 - Method in class org.apache.hadoop.hbase.client.HTable
-
 getBufferedMutator(ImmutableBytesWritable)
 - Method in class org.apache.hadoop.hbase.mapreduce.MultiTableOutputFormat.MultiTableRecordWriter
 
 getBufferOffset()
 - Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
@@ -49144,6 +49146,10 @@
 
 getTotalRITsOverThreshold()
 - Method in class org.apache.hadoop.hbase.master.assignment.AssignmentManager.RegionInTransitionStat
 
+getTotalRowActionRequestCount()
 - Method in interface org.apache.hadoop.hbase.regionserver.MetricsRegionServerWrapper
+
+getTotalRowActionRequestCount()
 - Method in class org.apache.hadoop.hbase.regionserver.MetricsRegionServerWrapperImpl
+
 getTotalSize()
 - Method in class org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator
 
 getTotalSizeOfCells(Result)
 - Static method in class org.apache.hadoop.hbase.client.Result
@@ -50326,20 +50332,6 @@
 
 getWriteBufferSize()
 - Method in class org.apache.hadoop.hbase.client.ConnectionConfiguration
 
-getWriteBufferSize()
 - Method in class org.apache.hadoop.hbase.client.HTable
-
-Returns the maximum size in bytes of the write buffer for 
this HTable.
-
-getWriteBufferSize()
 - Method in class org.apache.hadoop.hbase.client.HTableWrapper
-
-getWriteBufferSize()
 - Method in interface org.apache.hadoop.hbase.client.Table
-
-Deprecated.
-as of 1.0.1 (should not 
have been in 1.0.0). Replaced by BufferedMutator.getWriteBufferSize()
-
-
-getWriteBufferSize()
 - Method in class org.apache.hadoop.hbase.rest.client.RemoteHTable
-
 getWriteEntry()
 - Method in class org.apache.hadoop.hbase.wal.WALKey
 
 Use it to complete mvcc transaction.
@@ -54237,10 +54229,6 @@
 
 Creates an object to access a HBase table.
 
-HTable(ClusterConnection,
 BufferedMutatorImpl) - Constructor for class 
org.apache.hadoop.hbase.client.HTable
-
-For internal testing.
-
 htable
 - Variable in class org.apache.hadoop.hbase.mapred.TableRecordReaderImpl
 
 htable
 - Variable in class org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl
@@ -57998,6 +57986,8 @@
 
 Checks whether the block cache is enabled.
 
+isBoolean(String)
 - Static method in class org.apache.hadoop.hbase.thrift.DemoClient
+
 isBootstrapNamespace()
 - Method in class org.apache.hadoop.hbase.master.procedure.CreateNamespaceProcedure
 
 isBranch()
 - Method in class org.apache.hadoop.hbase.codec.prefixtree.decode.PrefixTreeArrayScanner
@@ -68462,6 +68452,8 @@
 
 mergeFamilyMaps(Mapbyte[],
 ListCell, Mapbyte[], ListCell) - Method 
in class org.apache.hadoop.hbase.regionserver.HRegion
 
+mergeFilterResponse(Cell,
 ScanQueryMatcher.MatchCode, Filter.ReturnCode) - Method in class 
org.apache.hadoop.hbase.regionserver.querymatcher.UserScanQueryMatcher
+
 mergeLocations(RegionLocations)
 - Method in class org.apache.hadoop.hbase.RegionLocations
 
 Merges this RegionLocations list with the given list 
assuming
@@ -71594,12 +71586,8 @@
 
 MutationType()
 - Constructor for enum 

[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/client/HTableWrapper.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/HTableWrapper.html 
b/devapidocs/org/apache/hadoop/hbase/client/HTableWrapper.html
index f13140b..1f135b1 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/HTableWrapper.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/HTableWrapper.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":9,"i15":10,"i16":10,"i17":10,"i18":42,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":42,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":42,"i44":10,"i45":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":9,"i15":10,"i16":10,"i17":10,"i18":42,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":42,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":42,"i43":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -452,24 +452,18 @@ implements 
 
 
-long
-getWriteBufferSize()
-Returns the maximum size in bytes of the write buffer for 
this HTable.
-
-
-
 int
 getWriteRpcTimeout()
 Get timeout (millisecond) of each rpc write request in this 
Table instance.
 
 
-
+
 Result
 increment(Incrementincrement)
 Increments one or more columns within a single row.
 
 
-
+
 long
 incrementColumnValue(byte[]row,
 byte[]family,
@@ -478,7 +472,7 @@ implements See Table.incrementColumnValue(byte[],
 byte[], byte[], long, Durability)
 
 
-
+
 long
 incrementColumnValue(byte[]row,
 byte[]family,
@@ -488,55 +482,49 @@ implements Atomically increments a column value.
 
 
-
+
 void
 internalClose()
 
-
+
 void
 mutateRow(RowMutationsrm)
 Performs multiple mutations atomically on a single 
row.
 
 
-
+
 void
 put(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPutputs)
 Puts some data in the table, in batch.
 
 
-
+
 void
 put(Putput)
 Puts some data in the table.
 
 
-
+
 void
 setOperationTimeout(intoperationTimeout)
 Set timeout (millisecond) of each operation in this Table 
instance, will override the value
  of hbase.client.operation.timeout in configuration.
 
 
-
+
 void
 setReadRpcTimeout(intreadRpcTimeout)
 Set timeout (millisecond) of each rpc read request in 
operations of this Table instance, will
  override the value of hbase.rpc.read.timeout in configuration.
 
 
-
+
 void
 setRpcTimeout(intrpcTimeout)
 Deprecated.
 
 
-
-void
-setWriteBufferSize(longwriteBufferSize)
-Sets the size of the buffer in bytes.
-
-
-
+
 void
 setWriteRpcTimeout(intwriteRpcTimeout)
 Set timeout (millisecond) of each rpc write request in 
operations of this Table instance, will
@@ -828,17 +816,12 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.
 Description copied from 
interface:Table
 Puts some data in the table, in batch.
  
- This can be used for group commit, or for submitting user defined
- batches.  The writeBuffer will be periodically inspected while the List
- is processed, so depending on the List size the writeBuffer may flush
- not at all, or more than once.
+ This can be used for group commit, or for submitting user defined 
batches.
 
 Specified by:
 putin
 interfaceTable
 Parameters:
-puts - The list of mutations to apply. The batch put is done 
by
- aggregating the iteration of the Puts over the write buffer
- at the client-side for a single RPC call.
+puts - The list of mutations to apply.
 Throws:
 http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException - if a remote or 
network exception occurs.
 
@@ -1496,49 +1479,6 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.
 
 
 
-
-
-
-
-
-getWriteBufferSize
-publiclonggetWriteBufferSize()
-Description copied from 
interface:Table
-Returns the maximum size in bytes of the write buffer for 
this HTable.
- 
- The default value comes from the configuration parameter
- hbase.client.write.buffer.
-
-Specified by:
-getWriteBufferSizein
 interfaceTable
-Returns:
-The size of the write buffer in bytes.
-
-
-
-
-
-
-
-
-setWriteBufferSize
-publicvoidsetWriteBufferSize(longwriteBufferSize)
-throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 

[35/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/client/HTable.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/HTable.html 
b/devapidocs/org/apache/hadoop/hbase/client/HTable.html
index 1ac915d..c7dff41 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/HTable.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/HTable.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":9,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":9,"i29":10,"i30":10,"i31":10,"i32":9,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":42,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":42,"i54":42,"i55":42,"i56":42,"i57":42,"i58":10,"i59":10,"i60":9};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":9,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":9,"i27":10,"i28":10,"i29":10,"i30":9,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":42,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":42,"i51":42,"i52":42,"i53":42,"i54":10,"i55":10,"i56":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -198,14 +198,6 @@ implements 
 
 
-(package private) BufferedMutatorImpl
-mutator
-
-
-private http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-mutatorLock
-
-
 private int
 operationTimeout
 
@@ -242,10 +234,6 @@ implements tableName
 
 
-private long
-writeBufferSize
-
-
 private int
 writeRpcTimeout
 
@@ -266,13 +254,6 @@ implements 
 
 protected 
-HTable(ClusterConnectionconn,
-  BufferedMutatorImplmutator)
-For internal testing.
-
-
-
-protected 
 HTable(ClusterConnectionconnection,
   TableBuilderBasebuilder,
   RpcRetryingCallerFactoryrpcCallerFactory,
@@ -483,54 +464,46 @@ implements 
 
 
-(package private) void
-flushCommits()
-
-
 Result
 get(Getget)
 Extracts certain cells from a given row.
 
 
-
+
 private Result
 get(Getget,
booleancheckExistenceOnly)
 
-
+
 Result[]
 get(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListGetgets)
 Extracts certain cells from the given rows, in batch.
 
 
-
-(package private) BufferedMutator
-getBufferedMutator()
-
-
+
 org.apache.hadoop.conf.Configuration
 getConfiguration()
 Returns the Configuration object used by this 
instance.
 
 
-
+
 protected Connection
 getConnection()
 INTERNAL Used by unit tests and tools to do 
low-level
  manipulations.
 
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ThreadPoolExecutor.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ThreadPoolExecutor
 getDefaultExecutor(org.apache.hadoop.conf.Configurationconf)
 
-
+
 TableDescriptor
 getDescriptor()
 Gets the table 
descriptor for this table.
 
 
-
+
 private Pairhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionLocation
 getKeysAndRegionsInRange(byte[]startKey,
 byte[]endKey,
@@ -539,7 +512,7 @@ implements 
 
 
-
+
 private Pairhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionLocation
 getKeysAndRegionsInRange(byte[]startKey,
 byte[]endKey,
@@ -549,93 +522,87 @@ implements 
 
 
-
+
 static int
 getMaxKeyValueSize(org.apache.hadoop.conf.Configurationconf)
 
-
+
 TableName
 getName()
 Gets the fully qualified table name instance of this 
table.
 
 
-
+
 int
 getOperationTimeout()
 Get timeout (millisecond) of each operation for in Table 
instance.
 
 
-
+
 (package private) http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ExecutorService
 getPool()
 The pool is used for mutli requests for this HTable
 
 
-
+
 int
 getReadRpcTimeout()
 Get timeout (millisecond) of each rpc read request in this 
Table instance.
 
 
-
+
 

[12/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html
index 8f0943d..63ba3f6 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html
@@ -601,634 +601,657 @@
 593  /**
 594   * Get all available versions.
 595   * @return this
-596   */
-597  public Scan setMaxVersions() {
-598this.maxVersions = 
Integer.MAX_VALUE;
-599return this;
-600  }
-601
-602  /**
-603   * Get up to the specified number of 
versions of each column.
-604   * @param maxVersions maximum versions 
for each column
-605   * @return this
-606   */
-607  public Scan setMaxVersions(int 
maxVersions) {
-608this.maxVersions = maxVersions;
-609return this;
-610  }
-611
-612  /**
-613   * Set the maximum number of cells to 
return for each call to next(). Callers should be aware
-614   * that this is not equivalent to 
calling {@link #setAllowPartialResults(boolean)}.
-615   * If you don't allow partial results, 
the number of cells in each Result must equal to your
-616   * batch setting unless it is the last 
Result for current row. So this method is helpful in paging
-617   * queries. If you just want to prevent 
OOM at client, use setAllowPartialResults(true) is better.
-618   * @param batch the maximum number of 
values
-619   * @see 
Result#mayHaveMoreCellsInRow()
-620   */
-621  public Scan setBatch(int batch) {
-622if (this.hasFilter()  
this.filter.hasFilterRow()) {
-623  throw new 
IncompatibleFilterException(
-624"Cannot set batch on a scan using 
a filter" +
-625" that returns true for 
filter.hasFilterRow");
-626}
-627this.batch = batch;
-628return this;
-629  }
-630
-631  /**
-632   * Set the maximum number of values to 
return per row per Column Family
-633   * @param limit the maximum number of 
values returned / row / CF
-634   */
-635  public Scan 
setMaxResultsPerColumnFamily(int limit) {
-636this.storeLimit = limit;
-637return this;
-638  }
-639
-640  /**
-641   * Set offset for the row per Column 
Family.
-642   * @param offset is the number of kvs 
that will be skipped.
+596   * @deprecated It is easy to 
misunderstand with column family's max versions, so use
+597   * {@link 
#readAllVersions()} instead.
+598   */
+599  @Deprecated
+600  public Scan setMaxVersions() {
+601return readAllVersions();
+602  }
+603
+604  /**
+605   * Get up to the specified number of 
versions of each column.
+606   * @param maxVersions maximum versions 
for each column
+607   * @return this
+608   * @deprecated It is easy to 
misunderstand with column family's max versions, so use
+609   * {@link 
#readVersions(int)} instead.
+610   */
+611  @Deprecated
+612  public Scan setMaxVersions(int 
maxVersions) {
+613return readVersions(maxVersions);
+614  }
+615
+616  /**
+617   * Get all available versions.
+618   * @return this
+619   */
+620  public Scan readAllVersions() {
+621this.maxVersions = 
Integer.MAX_VALUE;
+622return this;
+623  }
+624
+625  /**
+626   * Get up to the specified number of 
versions of each column.
+627   * @param versions specified number of 
versions for each column
+628   * @return this
+629   */
+630  public Scan readVersions(int versions) 
{
+631this.maxVersions = versions;
+632return this;
+633  }
+634
+635  /**
+636   * Set the maximum number of cells to 
return for each call to next(). Callers should be aware
+637   * that this is not equivalent to 
calling {@link #setAllowPartialResults(boolean)}.
+638   * If you don't allow partial results, 
the number of cells in each Result must equal to your
+639   * batch setting unless it is the last 
Result for current row. So this method is helpful in paging
+640   * queries. If you just want to prevent 
OOM at client, use setAllowPartialResults(true) is better.
+641   * @param batch the maximum number of 
values
+642   * @see 
Result#mayHaveMoreCellsInRow()
 643   */
-644  public Scan 
setRowOffsetPerColumnFamily(int offset) {
-645this.storeOffset = offset;
-646return this;
-647  }
-648
-649  /**
-650   * Set the number of rows for caching 
that will be passed to scanners.
-651   * If not set, the Configuration 
setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will
-652   * apply.
-653   * Higher caching values will enable 
faster scanners but will use more memory.
-654   * @param caching the number of rows 
for caching
-655   */
-656  public Scan setCaching(int caching) {
-657this.caching = caching;
-658return this;
-659  }
-660
-661  /**
-662   * @return the maximum result size in 
bytes. See {@link #setMaxResultSize(long)}
-663   */
-664 

[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/thrift/DemoClient.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/thrift/DemoClient.html 
b/devapidocs/org/apache/hadoop/hbase/thrift/DemoClient.html
index a81ad06..472ba0e 100644
--- a/devapidocs/org/apache/hadoop/hbase/thrift/DemoClient.html
+++ b/devapidocs/org/apache/hadoop/hbase/thrift/DemoClient.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = {"i0":10,"i1":9,"i2":9,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10};
+var methods = 
{"i0":10,"i1":9,"i2":9,"i3":9,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -146,6 +146,10 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 private static boolean
 secure
 
+
+private static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+serverPrincipal
+
 
 
 
@@ -187,27 +191,31 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 getSubject()
 
 
+private static boolean
+isBoolean(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Strings)
+
+
 static void
 main(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">String[]args)
 
-
+
 private void
 printRow(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.hbase.thrift.generated.TRowResultrows)
 
-
+
 private void
 printRow(org.apache.hadoop.hbase.thrift.generated.TRowResultrowResult)
 
-
+
 private void
 printVersions(http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferrow,
  http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.hbase.thrift.generated.TCellversions)
 
-
+
 private void
 run()
 
-
+
 private http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 utf8(byte[]buf)
 
@@ -263,12 +271,21 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
-
+
 
 secure
 private staticboolean secure
 
 
+
+
+
+
+
+serverPrincipal
+private statichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String serverPrincipal
+
+
 
 
 
@@ -283,7 +300,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 DemoClient
-DemoClient()
+DemoClient()
 
 
 
@@ -300,7 +317,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 main
-public staticvoidmain(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[]args)
+public staticvoidmain(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[]args)
  throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -308,13 +325,22 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
+
+
+
+
+
+isBoolean
+private staticbooleanisBoolean(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Strings)
+
+
 
 
 
 
 
 utf8
-privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringutf8(byte[]buf)
+privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringutf8(byte[]buf)
 
 
 
@@ -323,7 +349,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 bytes
-privatebyte[]bytes(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Strings)
+privatebyte[]bytes(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Strings)
 
 
 
@@ -332,7 +358,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 run
-privatevoidrun()
+privatevoidrun()
   throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -346,7 +372,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 printVersions

[04/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
index a701ece..26ee60a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
@@ -896,7376 +896,7383 @@
 888}
 889
 890// Write HRI to a file in case we 
need to recover hbase:meta
-891status.setStatus("Writing region info 
on filesystem");
-892fs.checkRegionInfoOnFilesystem();
-893
-894// Initialize all the HStores
-895status.setStatus("Initializing all 
the Stores");
-896long maxSeqId = 
initializeStores(reporter, status);
-897this.mvcc.advanceTo(maxSeqId);
-898if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
-899  ListStore stores = 
this.getStores();  // update the stores that we are replaying
-900  try {
-901for (Store store : stores) {
-902  ((HStore) 
store).startReplayingFromWAL();
-903}
-904// Recover any edits if 
available.
-905maxSeqId = Math.max(maxSeqId,
-906
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-907// Make sure mvcc is up to max.
-908this.mvcc.advanceTo(maxSeqId);
-909  } finally {
-910for (Store store : stores) {  
  // update the stores that we are done replaying
-911  
((HStore)store).stopReplayingFromWAL();
-912}
-913  }
-914
-915}
-916this.lastReplayedOpenRegionSeqId = 
maxSeqId;
-917
-918
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-919this.writestate.flushRequested = 
false;
-920this.writestate.compacting.set(0);
+891// Only the primary replica should 
write .regioninfo
+892if 
(this.getRegionInfo().getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
+893  status.setStatus("Writing region 
info on filesystem");
+894  fs.checkRegionInfoOnFilesystem();
+895} else {
+896  if (LOG.isDebugEnabled()) {
+897LOG.debug("Skipping creation of 
.regioninfo file for " + this.getRegionInfo());
+898  }
+899}
+900
+901// Initialize all the HStores
+902status.setStatus("Initializing all 
the Stores");
+903long maxSeqId = 
initializeStores(reporter, status);
+904this.mvcc.advanceTo(maxSeqId);
+905if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
+906  ListStore stores = 
this.getStores();  // update the stores that we are replaying
+907  try {
+908for (Store store : stores) {
+909  ((HStore) 
store).startReplayingFromWAL();
+910}
+911// Recover any edits if 
available.
+912maxSeqId = Math.max(maxSeqId,
+913
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
+914// Make sure mvcc is up to max.
+915this.mvcc.advanceTo(maxSeqId);
+916  } finally {
+917for (Store store : stores) {  
  // update the stores that we are done replaying
+918  
((HStore)store).stopReplayingFromWAL();
+919}
+920  }
 921
-922if (this.writestate.writesEnabled) 
{
-923  // Remove temporary data left over 
from old regions
-924  status.setStatus("Cleaning up 
temporary data from old regions");
-925  fs.cleanupTempDir();
-926}
-927
-928if (this.writestate.writesEnabled) 
{
-929  status.setStatus("Cleaning up 
detritus from prior splits");
-930  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-931  // these directories here on open.  
We may be opening a region that was
-932  // being split but we crashed in 
the middle of it all.
-933  fs.cleanupAnySplitDetritus();
-934  fs.cleanupMergesDir();
-935}
-936
-937// Initialize split policy
-938this.splitPolicy = 
RegionSplitPolicy.create(this, conf);
-939
-940// Initialize flush policy
-941this.flushPolicy = 
FlushPolicyFactory.create(this, conf);
-942
-943long lastFlushTime = 
EnvironmentEdgeManager.currentTime();
-944for (Store store: stores.values()) 
{
-945  
this.lastStoreFlushTimeMap.put(store, lastFlushTime);
-946}
-947
-948// Use maximum of log sequenceid or 
that which was found in stores
-949// (particularly if no recovered 
edits, seqid will be -1).
-950long nextSeqid = maxSeqId;
-951
-952// In distributedLogReplay mode, we 
don't know the last change sequence number because region
-953// is opened before recovery 
completes. So we add a safety bumper to avoid new sequence number
-954// overlaps used sequence numbers
-955

[40/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index 0337202..02b9356 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -289,7 +289,7 @@
 2024
 0
 0
-12787
+12799
 
 Files
 
@@ -982,7 +982,7 @@
 org/apache/hadoop/hbase/client/HTable.java
 0
 0
-28
+31
 
 org/apache/hadoop/hbase/client/HTableMultiplexer.java
 0
@@ -992,7 +992,7 @@
 org/apache/hadoop/hbase/client/HTableWrapper.java
 0
 0
-8
+7
 
 org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.java
 0
@@ -1252,7 +1252,7 @@
 org/apache/hadoop/hbase/client/Table.java
 0
 0
-18
+14
 
 org/apache/hadoop/hbase/client/TableDescriptor.java
 0
@@ -3202,7 +3202,7 @@
 org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
 0
 0
-3
+5
 
 org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
 0
@@ -3212,7 +3212,7 @@
 org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
 0
 0
-8
+9
 
 org/apache/hadoop/hbase/master/assignment/Util.java
 0
@@ -4192,7 +4192,7 @@
 org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
 0
 0
-48
+49
 
 org/apache/hadoop/hbase/regionserver/HRegionServer.java
 0
@@ -4337,7 +4337,7 @@
 org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
 0
 0
-3
+4
 
 org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
 0
@@ -5902,7 +5902,7 @@
 org/apache/hadoop/hbase/thrift/DemoClient.java
 0
 0
-241
+250
 
 org/apache/hadoop/hbase/thrift/HThreadedSelectorServerArgs.java
 0
@@ -6909,7 +6909,7 @@
 
 annotation
 http://checkstyle.sourceforge.net/config_annotation.html#MissingDeprecated;>MissingDeprecated
-103
+102
 Error
 
 blocks
@@ -6924,7 +6924,7 @@
 
 
 http://checkstyle.sourceforge.net/config_blocks.html#NeedBraces;>NeedBraces
-1733
+1734
 Error
 
 coding
@@ -6998,7 +6998,7 @@
 http://checkstyle.sourceforge.net/config_imports.html#UnusedImports;>UnusedImports
 
 processJavadoc: true
-110
+112
 Error
 
 indentation
@@ -7009,19 +7009,19 @@
 caseIndent: 2
 basicOffset: 2
 lineWrappingIndentation: 2
-3843
+3855
 Error
 
 javadoc
 http://checkstyle.sourceforge.net/config_javadoc.html#JavadocTagContinuationIndentation;>JavadocTagContinuationIndentation
 
 offset: 2
-765
+760
 Error
 
 
 http://checkstyle.sourceforge.net/config_javadoc.html#NonEmptyAtclauseDescription;>NonEmptyAtclauseDescription
-3229
+3232
 Error
 
 misc
@@ -12913,7 +12913,7 @@
 
 Error
 javadoc
-JavadocTagContinuationIndentation
+NonEmptyAtclauseDescription
 Javadoc comment at column 43 has parse error. Missed HTML close tag 
'TableName'. Sometimes it means that close tag missed for one of previous 
tags.
 181
 
@@ -16356,19 +16356,19 @@
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-331
+355
 
 Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-341
+365
 
 Error
 indentation
 Indentation
 'method def' child have incorrect indentation level 6, expected level 
should be 4.
-553
+577
 
 org/apache/hadoop/hbase/client/HBaseAdmin.java
 
@@ -16765,7 +16765,7 @@
 
 Error
 javadoc
-JavadocTagContinuationIndentation
+NonEmptyAtclauseDescription
 Javadoc comment at column 64 has parse error. Missed HTML close tag 
'code'. Sometimes it means that close tag missed for one of previous tags.
 2112
 
@@ -17034,206 +17034,224 @@
 26
 
 Error
-javadoc
-NonEmptyAtclauseDescription
-At-clause should have a non-empty description.
-208
-
-Error
 sizes
 LineLength
 Line is longer than 100 characters (found 108).
-417
-
+387
+
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-602
+572
+
+Error
+indentation
+Indentation
+'method def modifier' have incorrect indentation level 10, expected level 
should be one of the following: 6, 8.
+580
+
+Error
+indentation
+Indentation
+'method def' child have incorrect indentation level 12, expected level 
should be one of the following: 8, 10.
+582
+
+Error
+indentation
+Indentation
+'method def' child have incorrect indentation level 12, expected level 
should be one of the following: 8, 10.
+585
+
+Error
+indentation
+Indentation
+'method def' child have incorrect indentation level 12, expected level 
should be one of the following: 8, 10.
+586
+
+Error
+indentation
+Indentation
+'method def rcurly' have incorrect indentation level 10, expected level 
should be one of the following: 6, 8.
+587
 
 Error
+indentation
+Indentation
+'object def rcurly' have incorrect indentation level 8, expected level 
should be one of the following: 4, 6.
+588
+
+Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-612
-
+595
+
 Error
 sizes
 LineLength
 

[46/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/apidocs/org/apache/hadoop/hbase/client/Table.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/Table.html 
b/apidocs/org/apache/hadoop/hbase/client/Table.html
index f35e800..5e317d4 100644
--- a/apidocs/org/apache/hadoop/hbase/client/Table.html
+++ b/apidocs/org/apache/hadoop/hbase/client/Table.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":38,"i26":6,"i27":6,"i28":6,"i29":38,"i30":38,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":38,"i39":38,"i40":38,"i41":38,"i42":38};
+var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":38,"i26":6,"i27":6,"i28":6,"i29":38,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":38,"i38":38,"i39":38,"i40":38};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -388,26 +388,18 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 
-long
-getWriteBufferSize()
-Deprecated.
-as of 1.0.1 (should not 
have been in 1.0.0). Replaced by BufferedMutator.getWriteBufferSize()
-
-
-
-
 int
 getWriteRpcTimeout()
 Get timeout (millisecond) of each rpc write request in this 
Table instance.
 
 
-
+
 Result
 increment(Incrementincrement)
 Increments one or more columns within a single row.
 
 
-
+
 long
 incrementColumnValue(byte[]row,
 byte[]family,
@@ -416,7 +408,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 See incrementColumnValue(byte[],
 byte[], byte[], long, Durability)
 
 
-
+
 long
 incrementColumnValue(byte[]row,
 byte[]family,
@@ -426,25 +418,25 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 Atomically increments a column value.
 
 
-
+
 void
 mutateRow(RowMutationsrm)
 Performs multiple mutations atomically on a single 
row.
 
 
-
+
 void
 put(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPutputs)
 Puts some data in the table, in batch.
 
 
-
+
 void
 put(Putput)
 Puts some data in the table.
 
 
-
+
 void
 setOperationTimeout(intoperationTimeout)
 Deprecated.
@@ -452,7 +444,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 
-
+
 void
 setReadRpcTimeout(intreadRpcTimeout)
 Deprecated.
@@ -460,7 +452,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 
-
+
 void
 setRpcTimeout(intrpcTimeout)
 Deprecated.
@@ -468,16 +460,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 
-
-void
-setWriteBufferSize(longwriteBufferSize)
-Deprecated.
-as of 1.0.1 (should not 
have been in 1.0.0). Replaced by BufferedMutator and
- BufferedMutatorParams.writeBufferSize(long)
-
-
-
-
+
 void
 setWriteRpcTimeout(intwriteRpcTimeout)
 Deprecated.
@@ -800,19 +783,14 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 put
-voidput(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPutputs)
+voidput(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPutputs)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Puts some data in the table, in batch.
  
- This can be used for group commit, or for submitting user defined
- batches.  The writeBuffer will be periodically inspected while the List
- is processed, so depending on the List size the writeBuffer may flush
- not at all, or more than once.
+ This can be used for group commit, or for submitting user defined 
batches.
 
 Parameters:
-puts - The list of mutations to apply. The batch put is done 
by
- aggregating the iteration of the Puts over the write buffer
- at the client-side for a single RPC call.
+puts - The list of mutations to apply.
 Throws:
 http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException - if a remote or 
network exception occurs.
 Since:
@@ -826,7 +804,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 checkAndPut
-booleancheckAndPut(byte[]row,
+booleancheckAndPut(byte[]row,
 byte[]family,
 byte[]qualifier,
 byte[]value,
@@ -855,7 +833,7 @@ 

[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html
index 37a2235..386a41b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html
@@ -105,7 +105,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static interface RSRpcServices.LogDelegate
+static interface RSRpcServices.LogDelegate
 
 
 
@@ -150,7 +150,7 @@ var activeTableTab = "activeTableTab";
 
 
 logBatchWarning
-voidlogBatchWarning(intsum,
+voidlogBatchWarning(intsum,
  introwSizeWarnThreshold)
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
index b6c32b6..8234a37 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static final class RSRpcServices.RegionScannerCloseCallBack
+private static final class RSRpcServices.RegionScannerCloseCallBack
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements RpcCallback
 An Rpc callback for closing a RegionScanner.
@@ -209,7 +209,7 @@ implements 
 
 scanner
-private finalRegionScanner scanner
+private finalRegionScanner scanner
 
 
 
@@ -226,7 +226,7 @@ implements 
 
 RegionScannerCloseCallBack
-publicRegionScannerCloseCallBack(RegionScannerscanner)
+publicRegionScannerCloseCallBack(RegionScannerscanner)
 
 
 
@@ -243,7 +243,7 @@ implements 
 
 run
-publicvoidrun()
+publicvoidrun()
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
interface:RpcCallback
 Called at the end of an Rpc Call RpcCallContext

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
index dc81296..a084b74 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static final class RSRpcServices.RegionScannerHolder
+private static final class RSRpcServices.RegionScannerHolder
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Holder class which holds the RegionScanner, nextCallSeq and 
RpcCallbacks together.
 
@@ -239,7 +239,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 nextCallSeq
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicLong nextCallSeq
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicLong nextCallSeq
 
 
 
@@ -248,7 +248,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 scannerName
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String scannerName
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String scannerName
 
 
 
@@ -257,7 +257,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 s
-private finalRegionScanner s
+private finalRegionScanner s
 
 
 
@@ -266,7 +266,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 r
-private finalRegion r
+private finalRegion r
 
 
 
@@ -275,7 +275,7 @@ extends 

[47/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/apidocs/org/apache/hadoop/hbase/client/Scan.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/Scan.html 
b/apidocs/org/apache/hadoop/hbase/client/Scan.html
index 22287de..56dcad0 100644
--- a/apidocs/org/apache/hadoop/hbase/client/Scan.html
+++ b/apidocs/org/apache/hadoop/hbase/client/Scan.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":9,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":42,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":42,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":42,"i65":42,"i66":42,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10};
+var methods = 
{"i0":10,"i1":10,"i2":9,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":42,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":42,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":42,"i55":42,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":42,"i67":42,"i68":42,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -477,56 +477,68 @@ extends 
 
 Scan
-setACL(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">String,org.apache.hadoop.hbase.security.access.Permissionperms)
+readAllVersions()
+Get all available versions.
+
 
 
 Scan
+readVersions(intversions)
+Get up to the specified number of versions of each 
column.
+
+
+
+Scan
+setACL(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">String,org.apache.hadoop.hbase.security.access.Permissionperms)
+
+
+Scan
 setACL(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringuser,
   
org.apache.hadoop.hbase.security.access.Permissionperms)
 
-
+
 Scan
 setAllowPartialResults(booleanallowPartialResults)
 Setting whether the caller wants to see the partial results 
when server returns
  less-than-expected cells.
 
 
-
+
 Scan
 setAsyncPrefetch(booleanasyncPrefetch)
 
-
+
 Scan
 setAttribute(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname,
 byte[]value)
 Sets an attribute.
 
 
-
+
 Scan
 setAuthorizations(org.apache.hadoop.hbase.security.visibility.Authorizationsauthorizations)
 Sets the authorizations to be used by this Query
 
 
-
+
 Scan
 setBatch(intbatch)
 Set the maximum number of cells to return for each call to 
next().
 
 
-
+
 Scan
 setCacheBlocks(booleancacheBlocks)
 Set whether blocks should be cached for this Scan.
 
 
-
+
 Scan
 setCaching(intcaching)
 Set the number of rows for caching that will be passed to 
scanners.
 
 
-
+
 Scan
 setColumnFamilyTimeRange(byte[]cf,
 longminStamp,
@@ -535,139 +547,145 @@ extends 
 
 
-
+
 Scan
 setColumnFamilyTimeRange(byte[]cf,
 TimeRangetr)
 
-
+
 Scan
 setConsistency(Consistencyconsistency)
 Sets the consistency level for this operation
 
 
-
+
 Scan
 setFamilyMap(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/NavigableSet.html?is-external=true;
 title="class or interface in 
java.util">NavigableSetbyte[]familyMap)
 Setting the familyMap
 
 
-
+
 Scan
 setFilter(Filterfilter)
 Apply the specified server-side filter when performing the 
Query.
 
 
-
+
 Scan
 setId(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringid)
 This method allows you to set an identifier on an 

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
index 2b1a8b7..50028a7 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
@@ -121,7 +121,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-class HRegion.RegionScannerImpl
+class HRegion.RegionScannerImpl
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements RegionScanner, RpcCallback
 RegionScannerImpl is used to combine scanners from multiple 
Stores (aka column families).
@@ -429,7 +429,7 @@ implements 
 
 storeHeap
-KeyValueHeap storeHeap
+KeyValueHeap storeHeap
 
 
 
@@ -438,7 +438,7 @@ implements 
 
 joinedHeap
-KeyValueHeap joinedHeap
+KeyValueHeap joinedHeap
 Heap of key-values that are not essential for the provided 
filters and are thus read
  on demand, if on-demand column family loading is enabled.
 
@@ -449,7 +449,7 @@ implements 
 
 joinedContinuationRow
-protectedCell joinedContinuationRow
+protectedCell joinedContinuationRow
 If the joined heap data gathering is interrupted due to 
scan limits, this will
  contain the row for which we are populating the values.
 
@@ -460,7 +460,7 @@ implements 
 
 filterClosed
-privateboolean filterClosed
+privateboolean filterClosed
 
 
 
@@ -469,7 +469,7 @@ implements 
 
 stopRow
-protected finalbyte[] stopRow
+protected finalbyte[] stopRow
 
 
 
@@ -478,7 +478,7 @@ implements 
 
 includeStopRow
-protected finalboolean includeStopRow
+protected finalboolean includeStopRow
 
 
 
@@ -487,7 +487,7 @@ implements 
 
 region
-protected finalHRegion region
+protected finalHRegion region
 
 
 
@@ -496,7 +496,7 @@ implements 
 
 comparator
-protected finalCellComparator comparator
+protected finalCellComparator comparator
 
 
 
@@ -505,7 +505,7 @@ implements 
 
 readPt
-private finallong readPt
+private finallong readPt
 
 
 
@@ -514,7 +514,7 @@ implements 
 
 maxResultSize
-private finallong maxResultSize
+private finallong maxResultSize
 
 
 
@@ -523,7 +523,7 @@ implements 
 
 defaultScannerContext
-private finalScannerContext defaultScannerContext
+private finalScannerContext defaultScannerContext
 
 
 
@@ -532,7 +532,7 @@ implements 
 
 filter
-private finalFilterWrapper filter
+private finalFilterWrapper filter
 
 
 
@@ -549,7 +549,7 @@ implements 
 
 RegionScannerImpl
-RegionScannerImpl(Scanscan,
+RegionScannerImpl(Scanscan,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanneradditionalScanners,
   HRegionregion)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -565,7 +565,7 @@ implements 
 
 RegionScannerImpl
-RegionScannerImpl(Scanscan,
+RegionScannerImpl(Scanscan,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanneradditionalScanners,
   HRegionregion,
   longnonceGroup,
@@ -591,7 +591,7 @@ implements 
 
 getRegionInfo
-publicHRegionInfogetRegionInfo()
+publicHRegionInfogetRegionInfo()
 
 Specified by:
 getRegionInfoin
 interfaceRegionScanner
@@ -606,7 +606,7 @@ implements 
 
 initializeScanners
-protectedvoidinitializeScanners(Scanscan,
+protectedvoidinitializeScanners(Scanscan,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanneradditionalScanners)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
@@ -621,7 +621,7 @@ implements 
 
 initializeKVHeap
-protectedvoidinitializeKVHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScannerscanners,
+protectedvoidinitializeKVHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScannerscanners,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScannerjoinedScanners,
 HRegionregion)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 

[36/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/client/Get.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/Get.html 
b/devapidocs/org/apache/hadoop/hbase/client/Get.html
index 5f03da1..51e683f 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Get.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Get.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":42,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":42,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":42,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":42,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":42,"i35":42,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -373,37 +373,49 @@ implements 
 
 Get
-setACL(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,Permissionperms)
+readAllVersions()
+Get all available versions.
+
 
 
 Get
+readVersions(intversions)
+Get up to the specified number of versions of each 
column.
+
+
+
+Get
+setACL(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,Permissionperms)
+
+
+Get
 setACL(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringuser,
   Permissionperms)
 
-
+
 Get
 setAttribute(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname,
 byte[]value)
 Sets an attribute.
 
 
-
+
 Get
 setAuthorizations(Authorizationsauthorizations)
 Sets the authorizations to be used by this Query
 
 
-
+
 Get
 setCacheBlocks(booleancacheBlocks)
 Set whether blocks should be cached for this Get.
 
 
-
+
 Get
 setCheckExistenceOnly(booleancheckExistenceOnly)
 
-
+
 Get
 setClosestRowBefore(booleanclosestRowBefore)
 Deprecated.
@@ -411,7 +423,7 @@ implements 
 
 
-
+
 Get
 setColumnFamilyTimeRange(byte[]cf,
 longminStamp,
@@ -420,77 +432,83 @@ implements 
 
 
-
+
 Get
 setColumnFamilyTimeRange(byte[]cf,
 TimeRangetr)
 
-
+
 Get
 setConsistency(Consistencyconsistency)
 Sets the consistency level for this operation
 
 
-
+
 Get
 setFilter(Filterfilter)
 Apply the specified server-side filter when performing the 
Query.
 
 
-
+
 Get
 setId(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringid)
 This method allows you to set an identifier on an 
operation.
 
 
-
+
 Get
 setIsolationLevel(IsolationLevellevel)
 Set the isolation level for this query.
 
 
-
+
 Get
 setLoadColumnFamiliesOnDemand(booleanvalue)
 Set the value indicating whether loading CFs on demand 
should be allowed (cluster
  default is false).
 
 
-
+
 Get
 setMaxResultsPerColumnFamily(intlimit)
 Set the maximum number of values to return per row per 
Column Family
 
 
-
+
 Get
 setMaxVersions()
-Get all available versions.
+Deprecated.
+It is easy to 
misunderstand with column family's max versions, so use
+ readAllVersions()
 instead.
+
 
 
-
+
 Get
 setMaxVersions(intmaxVersions)
-Get up to the specified number of versions of each 
column.
+Deprecated.
+It is easy to 
misunderstand with column family's max versions, so use
+ readVersions(int)
 instead.
+
 
 
-
+
 Get
 setPriority(intpriority)
 
-
+
 Get
 setReplicaId(intId)
 Specify region replica id where Query will fetch data 
from.
 
 
-
+
 Get
 setRowOffsetPerColumnFamily(intoffset)
 Set offset for the row per Column Family.
 
 
-
+
 Get
 setTimeRange(longminStamp,
 longmaxStamp)
@@ -498,19 +516,19 @@ implements 
 
 
-
+
 Get
 setTimeRange(TimeRangetr)
 Get versions of columns only within the specified timestamp 
range,
 
 
-
+
 Get
 setTimeStamp(longtimestamp)
 Get versions of columns with the specified timestamp.
 
 
-
+
 

[39/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index 665fba8..ce30421 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -26,7 +26,7 @@ under the License.
 2007 - 2017 The Apache Software Foundation
 
   File: 2024,
- Errors: 12787,
+ Errors: 12799,
  Warnings: 0,
  Infos: 0
   
@@ -2715,7 +2715,7 @@ under the License.
   0
 
 
-  3
+  4
 
   
   
@@ -4017,7 +4017,7 @@ under the License.
   0
 
 
-  48
+  49
 
   
   
@@ -13859,7 +13859,7 @@ under the License.
   0
 
 
-  241
+  250
 
   
   
@@ -15189,7 +15189,7 @@ under the License.
   0
 
 
-  3
+  5
 
   
   
@@ -15665,7 +15665,7 @@ under the License.
   0
 
 
-  8
+  9
 
   
   
@@ -18087,7 +18087,7 @@ under the License.
   0
 
 
-  8
+  7
 
   
   
@@ -19641,7 +19641,7 @@ under the License.
   0
 
 
-  18
+  14
 
   
   
@@ -26781,7 +26781,7 @@ under the License.
   0
 
 
-  28
+  31
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/coc.html
--
diff --git a/coc.html b/coc.html
index 1de4cd1..56c6c24 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Code of Conduct Policy
@@ -380,7 +380,7 @@ email to mailto:priv...@hbase.apache.org;>the priv
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-10
+  Last Published: 
2017-08-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/cygwin.html
--
diff --git a/cygwin.html b/cygwin.html
index 243db52..e2d08c0 100644
--- a/cygwin.html
+++ b/cygwin.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Installing Apache HBase (TM) on Windows using 
Cygwin
 
@@ -679,7 +679,7 @@ Now your HBase server is running, start 
coding and build that next
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-10
+  Last Published: 
2017-08-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/dependencies.html
--
diff --git a/dependencies.html b/dependencies.html
index 618bd73..6e6752f 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Dependencies
 
@@ -527,7 +527,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-10
+  Last Published: 
2017-08-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/dependency-convergence.html
--
diff --git a/dependency-convergence.html b/dependency-convergence.html
index f310c4a..8bb1fbb 100644
--- a/dependency-convergence.html
+++ b/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Reactor Dependency Convergence
 
@@ -722,7 +722,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-10
+  Last Published: 
2017-08-11
 
 
 


[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.RegionServerMetricsWrapperRunnable.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.RegionServerMetricsWrapperRunnable.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.RegionServerMetricsWrapperRunnable.html
index 51ab56b..4fd71ab 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.RegionServerMetricsWrapperRunnable.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.RegionServerMetricsWrapperRunnable.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class MetricsRegionServerWrapperImpl.RegionServerMetricsWrapperRunnable
+public class MetricsRegionServerWrapperImpl.RegionServerMetricsWrapperRunnable
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true;
 title="class or interface in java.lang">Runnable
 This is the runnable that will be executed on the executor 
every PERIOD number of seconds
@@ -213,7 +213,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
 
 
 lastRan
-privatelong lastRan
+privatelong lastRan
 
 
 
@@ -222,7 +222,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
 
 
 lastRequestCount
-privatelong lastRequestCount
+privatelong lastRequestCount
 
 
 
@@ -239,7 +239,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
 
 
 RegionServerMetricsWrapperRunnable
-publicRegionServerMetricsWrapperRunnable()
+publicRegionServerMetricsWrapperRunnable()
 
 
 
@@ -256,7 +256,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
 
 
 run
-publicvoidrun()
+publicvoidrun()
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true#run--;
 title="class or interface in java.lang">runin 
interfacehttp://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true;
 title="class or interface in java.lang">Runnable



[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/src-html/org/apache/hadoop/hbase/client/HTable.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/HTable.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HTable.html
index 355f5ab..22e2f96 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/HTable.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/HTable.html
@@ -115,601 +115,601 @@
 107  private final TableName tableName;
 108  private final Configuration 
configuration;
 109  private final ConnectionConfiguration 
connConfiguration;
-110  @VisibleForTesting
-111  volatile BufferedMutatorImpl mutator;
-112  private final Object mutatorLock = new 
Object();
-113  private boolean closed = false;
-114  private final int scannerCaching;
-115  private final long 
scannerMaxResultSize;
-116  private final ExecutorService pool;  // 
For Multi  Scan
-117  private int operationTimeout; // global 
timeout for each blocking method with retrying rpc
-118  private final int rpcTimeout; // FIXME 
we should use this for rpc like batch and checkAndXXX
-119  private int readRpcTimeout; // timeout 
for each read rpc request
-120  private int writeRpcTimeout; // timeout 
for each write rpc request
-121  private final boolean 
cleanupPoolOnClose; // shutdown the pool in close()
-122  private final HRegionLocator locator;
-123  private final long writeBufferSize;
-124
-125  /** The Async process for batch */
-126  @VisibleForTesting
-127  AsyncProcess multiAp;
-128  private final RpcRetryingCallerFactory 
rpcCallerFactory;
-129  private final RpcControllerFactory 
rpcControllerFactory;
-130
-131  // Marked Private @since 1.0
-132  @InterfaceAudience.Private
-133  public static ThreadPoolExecutor 
getDefaultExecutor(Configuration conf) {
-134int maxThreads = 
conf.getInt("hbase.htable.threads.max", Integer.MAX_VALUE);
-135if (maxThreads == 0) {
-136  maxThreads = 1; // is there a 
better default?
-137}
-138int corePoolSize = 
conf.getInt("hbase.htable.threads.coresize", 1);
-139long keepAliveTime = 
conf.getLong("hbase.htable.threads.keepalivetime", 60);
-140
-141// Using the "direct handoff" 
approach, new threads will only be created
-142// if it is necessary and will grow 
unbounded. This could be bad but in HCM
-143// we only create as many Runnables 
as there are region servers. It means
-144// it also scales when new region 
servers are added.
-145ThreadPoolExecutor pool = new 
ThreadPoolExecutor(corePoolSize, maxThreads, keepAliveTime,
-146  TimeUnit.SECONDS, new 
SynchronousQueue(), Threads.newDaemonThreadFactory("htable"));
-147pool.allowCoreThreadTimeOut(true);
-148return pool;
-149  }
-150
-151  /**
-152   * Creates an object to access a HBase 
table.
-153   * Used by HBase internally.  DO NOT 
USE. See {@link ConnectionFactory} class comment for how to
-154   * get a {@link Table} instance (use 
{@link Table} instead of {@link HTable}).
-155   * @param connection Connection to be 
used.
-156   * @param builder The table builder
-157   * @param rpcCallerFactory The RPC 
caller factory
-158   * @param rpcControllerFactory The RPC 
controller factory
-159   * @param pool ExecutorService to be 
used.
-160   */
-161  @InterfaceAudience.Private
-162  protected HTable(final 
ClusterConnection connection,
-163  final TableBuilderBase builder,
-164  final RpcRetryingCallerFactory 
rpcCallerFactory,
-165  final RpcControllerFactory 
rpcControllerFactory,
-166  final ExecutorService pool) {
-167if (connection == null || 
connection.isClosed()) {
-168  throw new 
IllegalArgumentException("Connection is null or closed.");
-169}
-170this.connection = connection;
-171this.configuration = 
connection.getConfiguration();
-172this.connConfiguration = 
connection.getConnectionConfiguration();
-173if (pool == null) {
-174  this.pool = 
getDefaultExecutor(this.configuration);
-175  this.cleanupPoolOnClose = true;
-176} else {
-177  this.pool = pool;
-178  this.cleanupPoolOnClose = false;
-179}
-180if (rpcCallerFactory == null) {
-181  this.rpcCallerFactory = 
connection.getNewRpcRetryingCallerFactory(configuration);
-182} else {
-183  this.rpcCallerFactory = 
rpcCallerFactory;
-184}
-185
-186if (rpcControllerFactory == null) {
-187  this.rpcControllerFactory = 
RpcControllerFactory.instantiate(configuration);
-188} else {
-189  this.rpcControllerFactory = 
rpcControllerFactory;
-190}
-191
-192this.tableName = builder.tableName;
-193this.operationTimeout = 
builder.operationTimeout;
-194this.rpcTimeout = 
builder.rpcTimeout;
-195this.readRpcTimeout = 
builder.readRpcTimeout;
-196this.writeRpcTimeout = 
builder.writeRpcTimeout;
-197this.writeBufferSize = 
builder.writeBufferSize;
-198this.scannerCaching = 

[32/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/client/Table.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/Table.html 
b/devapidocs/org/apache/hadoop/hbase/client/Table.html
index 201b071..35cba89 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Table.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Table.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":38,"i26":6,"i27":6,"i28":6,"i29":38,"i30":38,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":38,"i39":38,"i40":38,"i41":38,"i42":38};
+var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":38,"i26":6,"i27":6,"i28":6,"i29":38,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":38,"i38":38,"i39":38,"i40":38};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -388,26 +388,18 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 
-long
-getWriteBufferSize()
-Deprecated.
-as of 1.0.1 (should not 
have been in 1.0.0). Replaced by BufferedMutator.getWriteBufferSize()
-
-
-
-
 int
 getWriteRpcTimeout()
 Get timeout (millisecond) of each rpc write request in this 
Table instance.
 
 
-
+
 Result
 increment(Incrementincrement)
 Increments one or more columns within a single row.
 
 
-
+
 long
 incrementColumnValue(byte[]row,
 byte[]family,
@@ -416,7 +408,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 See incrementColumnValue(byte[],
 byte[], byte[], long, Durability)
 
 
-
+
 long
 incrementColumnValue(byte[]row,
 byte[]family,
@@ -426,25 +418,25 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 Atomically increments a column value.
 
 
-
+
 void
 mutateRow(RowMutationsrm)
 Performs multiple mutations atomically on a single 
row.
 
 
-
+
 void
 put(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPutputs)
 Puts some data in the table, in batch.
 
 
-
+
 void
 put(Putput)
 Puts some data in the table.
 
 
-
+
 void
 setOperationTimeout(intoperationTimeout)
 Deprecated.
@@ -452,7 +444,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 
-
+
 void
 setReadRpcTimeout(intreadRpcTimeout)
 Deprecated.
@@ -460,7 +452,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 
-
+
 void
 setRpcTimeout(intrpcTimeout)
 Deprecated.
@@ -468,16 +460,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 
-
-void
-setWriteBufferSize(longwriteBufferSize)
-Deprecated.
-as of 1.0.1 (should not 
have been in 1.0.0). Replaced by BufferedMutator and
- BufferedMutatorParams.writeBufferSize(long)
-
-
-
-
+
 void
 setWriteRpcTimeout(intwriteRpcTimeout)
 Deprecated.
@@ -800,19 +783,14 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 put
-voidput(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPutputs)
+voidput(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPutputs)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Puts some data in the table, in batch.
  
- This can be used for group commit, or for submitting user defined
- batches.  The writeBuffer will be periodically inspected while the List
- is processed, so depending on the List size the writeBuffer may flush
- not at all, or more than once.
+ This can be used for group commit, or for submitting user defined 
batches.
 
 Parameters:
-puts - The list of mutations to apply. The batch put is done 
by
- aggregating the iteration of the Puts over the write buffer
- at the client-side for a single RPC call.
+puts - The list of mutations to apply.
 Throws:
 http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException - if a remote or 
network exception occurs.
 Since:
@@ -826,7 +804,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 checkAndPut
-booleancheckAndPut(byte[]row,
+booleancheckAndPut(byte[]row,
 byte[]family,
 byte[]qualifier,
 byte[]value,
@@ 

[28/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/master/assignment/package-summary.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/package-summary.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/package-summary.html
index b3d18d1..1c9f960 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/assignment/package-summary.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/assignment/package-summary.html
@@ -196,7 +196,7 @@
 
 UnassignProcedure
 
-Procedure that describe the unassignment of a single 
region.
+Procedure that describes the unassignment of a single 
region.
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/master/assignment/package-use.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/package-use.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/package-use.html
index e94e8e4..12d4817 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/assignment/package-use.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/assignment/package-use.html
@@ -205,7 +205,7 @@
 
 
 UnassignProcedure
-Procedure that describe the unassignment of a single 
region.
+Procedure that describes the unassignment of a single 
region.
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
index 9b1d336..86c040e 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
@@ -312,11 +312,11 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective
-org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus
+org.apache.hadoop.hbase.master.RegionState.State
 org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode
+org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus
+org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective
 org.apache.hadoop.hbase.master.MetricsMasterSourceFactoryImpl.FactoryStorage
-org.apache.hadoop.hbase.master.RegionState.State
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/master/procedure/ServerCrashException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/ServerCrashException.html 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/ServerCrashException.html
index 066581d..89087f7 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/ServerCrashException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/ServerCrashException.html
@@ -134,10 +134,11 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class ServerCrashException
+public class ServerCrashException
 extends HBaseIOException
 Passed as Exception by ServerCrashProcedure
- notifying on-going RIT that server has failed.
+ notifying on-going RIT that server has failed. This exception is less an 
error-condition than
+ it is a signal to waiting procedures that they can now proceed.
 
 See Also:
 Serialized
 Form
@@ -240,7 +241,7 @@ extends 
 
 procId
-private finallong procId
+private finallong procId
 
 
 
@@ -249,7 +250,7 @@ extends 
 
 serverName
-private finalServerName serverName
+private finalServerName serverName
 
 
 
@@ -266,7 +267,7 @@ extends 
 
 ServerCrashException
-publicServerCrashException(longprocId,
+publicServerCrashException(longprocId,
 ServerNameserverName)
 
 Parameters:
@@ -288,7 +289,7 @@ extends 
 
 getMessage
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetMessage()
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetMessage()
 
 Overrides:
 http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getMessage--;
 title="class or interface in java.lang">getMessagein 

[08/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignProcedure.html
index 7d33235..4e2ac53 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignProcedure.html
@@ -35,324 +35,335 @@
 027import 
org.apache.commons.logging.LogFactory;
 028import 
org.apache.hadoop.hbase.HRegionInfo;
 029import 
org.apache.hadoop.hbase.ServerName;
-030import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-031import 
org.apache.hadoop.hbase.client.RetriesExhaustedException;
-032import 
org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
-033import 
org.apache.hadoop.hbase.master.RegionState.State;
-034import 
org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
-035import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-036import 
org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher.RegionOpenOperation;
-037import 
org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
-038import 
org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
-039import 
org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation;
-040import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-041import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData;
-042import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState;
-043import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-044
-045/**
-046 * Procedure that describe the assignment 
of a single region.
-047 * There can only be one 
RegionTransitionProcedure per region running at a time
-048 * since each procedure takes a lock on 
the region.
-049 *
-050 * pThe Assign starts by pushing 
the "assign" operation to the AssignmentManager
-051 * and then will go in a "waiting" 
state.
-052 * The AM will batch the "assign" 
requests and ask the Balancer where to put
-053 * the region (the various policies will 
be respected: retain, round-robin, random).
-054 * Once the AM and the balancer have 
found a place for the region the procedure
-055 * will be resumed and an "open region" 
request will be placed in the Remote Dispatcher
-056 * queue, and the procedure once again 
will go in a "waiting state".
-057 * The Remote Dispatcher will batch the 
various requests for that server and
-058 * they will be sent to the RS for 
execution.
-059 * The RS will complete the open 
operation by calling master.reportRegionStateTransition().
-060 * The AM will intercept the transition 
report, and notify the procedure.
-061 * The procedure will finish the 
assignment by publishing to new state on meta
-062 * or it will retry the assignment.
-063 *
-064 * pThis procedure does not 
rollback when beyond the first
-065 * REGION_TRANSITION_QUEUE step; it will 
press on trying to assign in the face of
-066 * failure. Should we ignore rollback 
calls to Assign/Unassign then? Or just
-067 * remove rollback here?
-068 */
-069@InterfaceAudience.Private
-070public class AssignProcedure extends 
RegionTransitionProcedure {
-071  private static final Log LOG = 
LogFactory.getLog(AssignProcedure.class);
-072
-073  private boolean forceNewPlan = false;
-074
-075  /**
-076   * Gets set as desired target on move, 
merge, etc., when we want to go to a particular server.
-077   * We may not be able to respect this 
request but will try. When it is NOT set, then we ask
-078   * the balancer to assign. This value 
is used below in startTransition to set regionLocation if
-079   * non-null. Setting regionLocation in 
regionServerNode is how we override balancer setting
-080   * destination.
-081   */
-082  protected volatile ServerName 
targetServer;
-083
-084  public AssignProcedure() {
-085// Required by the Procedure 
framework to create the procedure on replay
-086super();
-087  }
-088
-089  public AssignProcedure(final 
HRegionInfo regionInfo) {
-090this(regionInfo, false);
-091  }
-092
-093  public AssignProcedure(final 
HRegionInfo regionInfo, final boolean forceNewPlan) {
-094super(regionInfo);
-095this.forceNewPlan = forceNewPlan;
-096this.targetServer = null;
-097  }
-098
-099  public AssignProcedure(final 
HRegionInfo regionInfo, final ServerName destinationServer) {
-100super(regionInfo);
-101this.forceNewPlan = false;
-102this.targetServer = 
destinationServer;
-103  }
-104
-105  @Override
-106  public TableOperationType 
getTableOperationType() {
-107return 
TableOperationType.REGION_ASSIGN;
-108  }
-109

[18/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index cbc2ae9..9f4d04d 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -692,20 +692,20 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
+org.apache.hadoop.hbase.regionserver.MemStoreCompactor.Action
+org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
 org.apache.hadoop.hbase.regionserver.FlushType
-org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
-org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
-org.apache.hadoop.hbase.regionserver.StoreScanner.StoreScannerCompactionRace
-org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
-org.apache.hadoop.hbase.regionserver.BloomType
 org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
 org.apache.hadoop.hbase.regionserver.Region.Operation
+org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
 org.apache.hadoop.hbase.regionserver.ScanType
-org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
-org.apache.hadoop.hbase.regionserver.Region.FlushResult.Result
-org.apache.hadoop.hbase.regionserver.MemStoreCompactor.Action
 org.apache.hadoop.hbase.regionserver.RegionOpeningState
+org.apache.hadoop.hbase.regionserver.BloomType
+org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
+org.apache.hadoop.hbase.regionserver.StoreScanner.StoreScannerCompactionRace
+org.apache.hadoop.hbase.regionserver.Region.FlushResult.Result
+org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
+org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.html
index e8361b7..f1461a6 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.html
@@ -380,7 +380,7 @@ extends Specified by:
 beforeShippedin
 interfaceShipperListener
 Overrides:
-beforeShippedin
 classScanQueryMatcher
+beforeShippedin
 classUserScanQueryMatcher
 Throws:
 http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.html
index cb4af05..307e792 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.html
@@ -242,14 +242,14 @@ extends UserScanQueryMatcher
-create,
 getFilter,
 getNextKeyHint,
 hasNullColumnInQuery,
 isUserScan,
 matchColumn, moreRowsMayExistAfter,
 moreRowsMayExistsAfter
+beforeShipped,
 create,
 getFilter,
 getNextKeyHint,
 hasNullColumnInQuery,
 isUserScan, matchColumn,
 moreRowsMayExistAfter,
 moreRowsMayExistsAfter
 
 
 
 
 
 Methods inherited from 
classorg.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher
-beforeShipped,
 checkColumn,
 checkDeleted,
 clearCurrentRow,
 compareKeyForNextColumn,
 compareKeyForNextRow,
 createStartKeyFromRow,
 currentRow,
 getKeyForNextColumn,
 getStartKey,
 getTrackers,
 preCheck,
 setToNewRow
+checkColumn,
 checkDeleted,
 clearCurrentRow,
 compareKeyForNextColumn,
 compareK
 eyForNextRow, createStartKeyFromRow,
 currentRow,
 getKeyForNextColumn,
 getStartKey,
 getTrackers,
 preCheck,
 

[17/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/rest/client/RemoteHTable.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/rest/client/RemoteHTable.html 
b/devapidocs/org/apache/hadoop/hbase/rest/client/RemoteHTable.html
index bbbca2d..f9f2d7e 100644
--- a/devapidocs/org/apache/hadoop/hbase/rest/client/RemoteHTable.html
+++ b/devapidocs/org/apache/hadoop/hbase/rest/client/RemoteHTable.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":42,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":42,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":42,"i50":10,"i51":10,"i52":9};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":42,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":42,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":42,"i49":10,"i50":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -507,24 +507,18 @@ implements getTableName()
 
 
-long
-getWriteBufferSize()
-Returns the maximum size in bytes of the write buffer for 
this HTable.
-
-
-
 int
 getWriteRpcTimeout()
 Get timeout (millisecond) of each rpc write request in this 
Table instance.
 
 
-
+
 Result
 increment(Incrementincrement)
 Increments one or more columns within a single row.
 
 
-
+
 long
 incrementColumnValue(byte[]row,
 byte[]family,
@@ -533,7 +527,7 @@ implements See Table.incrementColumnValue(byte[],
 byte[], byte[], long, Durability)
 
 
-
+
 long
 incrementColumnValue(byte[]row,
 byte[]family,
@@ -543,62 +537,56 @@ implements Atomically increments a column value.
 
 
-
+
 boolean
 isAutoFlush()
 
-
+
 void
 mutateRow(RowMutationsrm)
 Performs multiple mutations atomically on a single 
row.
 
 
-
+
 void
 put(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPutputs)
 Puts some data in the table, in batch.
 
 
-
+
 void
 put(Putput)
 Puts some data in the table.
 
 
-
+
 void
 setOperationTimeout(intoperationTimeout)
 Set timeout (millisecond) of each operation in this Table 
instance, will override the value
  of hbase.client.operation.timeout in configuration.
 
 
-
+
 void
 setReadRpcTimeout(intreadRpcTimeout)
 Set timeout (millisecond) of each rpc read request in 
operations of this Table instance, will
  override the value of hbase.rpc.read.timeout in configuration.
 
 
-
+
 void
 setRpcTimeout(intrpcTimeout)
 Deprecated.
 
 
-
-void
-setWriteBufferSize(longwriteBufferSize)
-Sets the size of the buffer in bytes.
-
-
-
+
 void
 setWriteRpcTimeout(intwriteRpcTimeout)
 Set timeout (millisecond) of each rpc write request in 
operations of this Table instance, will
  override the value of hbase.rpc.write.timeout in configuration.
 
 
-
+
 private static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 toURLEncodedBytes(byte[]row)
 
@@ -1013,17 +1001,12 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.
 Description copied from 
interface:Table
 Puts some data in the table, in batch.
  
- This can be used for group commit, or for submitting user defined
- batches.  The writeBuffer will be periodically inspected while the List
- is processed, so depending on the List size the writeBuffer may flush
- not at all, or more than once.
+ This can be used for group commit, or for submitting user defined 
batches.
 
 Specified by:
 putin
 interfaceTable
 Parameters:
-puts - The list of mutations to apply. The batch put is done 
by
- aggregating the iteration of the Puts over the write buffer
- at the client-side for a single RPC call.
+puts - The list of mutations to apply.
 Throws:
 http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException - if a remote or 
network exception occurs.
 
@@ -1643,49 +1626,6 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.
 
 
 
-
-
-
-
-
-getWriteBufferSize
-publiclonggetWriteBufferSize()
-Description copied from 
interface:Table
-Returns the maximum size in bytes of the write buffer for 

[41/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/apidocs/src-html/org/apache/hadoop/hbase/client/TableBuilder.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/TableBuilder.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/TableBuilder.html
index 8c639fc..5188ab8 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/TableBuilder.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/TableBuilder.html
@@ -65,16 +65,10 @@
 057  TableBuilder setWriteRpcTimeout(int 
timeout);
 058
 059  /**
-060   * Set the write buffer size which by 
default is specified by the
-061   * {@code hbase.client.write.buffer} 
setting.
-062   */
-063  TableBuilder setWriteBufferSize(long 
writeBufferSize);
-064
-065  /**
-066   * Create the {@link Table} instance.
-067   */
-068  Table build();
-069}
+060   * Create the {@link Table} instance.
+061   */
+062  Table build();
+063}
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/apidocs/src-html/org/apache/hadoop/hbase/rest/client/RemoteHTable.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/rest/client/RemoteHTable.html 
b/apidocs/src-html/org/apache/hadoop/hbase/rest/client/RemoteHTable.html
index 4e1f2ed..1067d9a 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/rest/client/RemoteHTable.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/rest/client/RemoteHTable.html
@@ -830,92 +830,82 @@
 822  }
 823
 824  @Override
-825  public long getWriteBufferSize() {
-826throw new 
UnsupportedOperationException("getWriteBufferSize not implemented");
-827  }
-828
-829  @Override
-830  public void setWriteBufferSize(long 
writeBufferSize) throws IOException {
-831throw new 
IOException("setWriteBufferSize not supported");
-832  }
-833
-834  @Override
-835  public R extends Message 
Mapbyte[], R batchCoprocessorService(
-836  Descriptors.MethodDescriptor 
method, Message request,
-837  byte[] startKey, byte[] endKey, R 
responsePrototype) throws ServiceException, Throwable {
-838throw new 
UnsupportedOperationException("batchCoprocessorService not implemented");
-839  }
-840
-841  @Override
-842  public R extends Message void 
batchCoprocessorService(
-843  Descriptors.MethodDescriptor 
method, Message request,
-844  byte[] startKey, byte[] endKey, R 
responsePrototype, CallbackR callback)
-845  throws ServiceException, Throwable 
{
-846throw new 
UnsupportedOperationException("batchCoprocessorService not implemented");
+825  public R extends Message 
Mapbyte[], R batchCoprocessorService(
+826  Descriptors.MethodDescriptor 
method, Message request,
+827  byte[] startKey, byte[] endKey, R 
responsePrototype) throws ServiceException, Throwable {
+828throw new 
UnsupportedOperationException("batchCoprocessorService not implemented");
+829  }
+830
+831  @Override
+832  public R extends Message void 
batchCoprocessorService(
+833  Descriptors.MethodDescriptor 
method, Message request,
+834  byte[] startKey, byte[] endKey, R 
responsePrototype, CallbackR callback)
+835  throws ServiceException, Throwable 
{
+836throw new 
UnsupportedOperationException("batchCoprocessorService not implemented");
+837  }
+838
+839  @Override public boolean 
checkAndMutate(byte[] row, byte[] family, byte[] qualifier,
+840  CompareOp compareOp, byte[] value, 
RowMutations rm) throws IOException {
+841throw new 
UnsupportedOperationException("checkAndMutate not implemented");
+842  }
+843
+844  @Override
+845  public void setOperationTimeout(int 
operationTimeout) {
+846throw new 
UnsupportedOperationException();
 847  }
 848
-849  @Override public boolean 
checkAndMutate(byte[] row, byte[] family, byte[] qualifier,
-850  CompareOp compareOp, byte[] value, 
RowMutations rm) throws IOException {
-851throw new 
UnsupportedOperationException("checkAndMutate not implemented");
+849  @Override
+850  public int getOperationTimeout() {
+851throw new 
UnsupportedOperationException();
 852  }
 853
 854  @Override
-855  public void setOperationTimeout(int 
operationTimeout) {
-856throw new 
UnsupportedOperationException();
-857  }
-858
-859  @Override
-860  public int getOperationTimeout() {
-861throw new 
UnsupportedOperationException();
-862  }
-863
-864  @Override
-865  @Deprecated
-866  public void setRpcTimeout(int 
rpcTimeout) {
-867throw new 
UnsupportedOperationException();
-868  }
-869
-870  @Override
-871  @Deprecated
-872  public int getRpcTimeout() {
+855  @Deprecated
+856  public void setRpcTimeout(int 
rpcTimeout) {
+857throw new 
UnsupportedOperationException();
+858  }
+859
+860  @Override
+861  @Deprecated
+862  public int getRpcTimeout() {
+863throw new 
UnsupportedOperationException();
+864  }
+865
+866  @Override
+867  public int getReadRpcTimeout() {
+868throw new 

hbase git commit: HBASE-18558 clean up duplicate dependency management entries for hbase-shaded-miscellaneous

2017-08-11 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 6f44b2486 -> 043ec9b37


HBASE-18558 clean up duplicate dependency management entries for 
hbase-shaded-miscellaneous


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/043ec9b3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/043ec9b3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/043ec9b3

Branch: refs/heads/master
Commit: 043ec9b37e43328e8784f88e3d6867b007a31d1d
Parents: 6f44b24
Author: Michael Stack 
Authored: Fri Aug 11 07:24:17 2017 -0700
Committer: Michael Stack 
Committed: Fri Aug 11 07:24:17 2017 -0700

--
 pom.xml | 5 -
 1 file changed, 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/043ec9b3/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 7648e8e..7925e4e 100755
--- a/pom.xml
+++ b/pom.xml
@@ -1718,11 +1718,6 @@
 ${metrics-core.version}
   
   
-org.apache.hbase.thirdparty
-hbase-shaded-miscellaneous
-${hbase-thirdparty.version}
-  
-  
 commons-collections
 commons-collections
 ${collections.version}



hbase git commit: HBASE-18551 [AMv2] UnassignProcedure and crashed regionservers

2017-08-11 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 0c16bb591 -> 5940f4224


HBASE-18551 [AMv2] UnassignProcedure and crashed regionservers

If an unassign is unable to communicate with its target server,
expire the server and then wait on a signal from ServerCrashProcedure
before proceeding. The unassign has lock on the region so no one else
can proceed till we complete. We prevent any subsequent assign from
running until logs have been split for crashed server.

In AssignProcedure, do not assign if table is DISABLING or DISABLED.

M 
hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
 Change remoteCallFailed so it returns boolean on whether implementor
wants to stay suspended or not.

M 
hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
  Doc. Also, if we are unable to talk to remote server, expire it and
then wait on SCP to wake us up after it has processed logs for failed
server.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5940f422
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5940f422
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5940f422

Branch: refs/heads/branch-2
Commit: 5940f4224c0ce0c01e98cdb28f74c6e227c918e3
Parents: 0c16bb5
Author: Michael Stack 
Authored: Thu Aug 10 14:22:56 2017 -0700
Committer: Michael Stack 
Committed: Fri Aug 11 07:17:26 2017 -0700

--
 .../hbase/procedure2/ProcedureExecutor.java | 10 +--
 .../hadoop/hbase/master/MasterRpcServices.java  |  2 +-
 .../hbase/master/TableNamespaceManager.java |  2 +-
 .../hadoop/hbase/master/TableStateManager.java  |  1 +
 .../master/assignment/AssignProcedure.java  | 13 +++-
 .../assignment/RegionTransitionProcedure.java   | 44 ++--
 .../master/assignment/UnassignProcedure.java| 70 ++--
 .../master/procedure/DisableTableProcedure.java |  4 +-
 .../master/procedure/RSProcedureDispatcher.java |  2 +-
 .../master/procedure/ServerCrashException.java  |  3 +-
 .../master/procedure/ServerCrashProcedure.java  |  3 +-
 .../TestSplitTransactionOnCluster.java  | 17 +++--
 12 files changed, 100 insertions(+), 71 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5940f422/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index c110c2d..d0052f6 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -315,7 +315,7 @@ public class ProcedureExecutor {
   @Override
   public void setMaxProcId(long maxProcId) {
 assert lastProcId.get() < 0 : "expected only one call to 
setMaxProcId()";
-LOG.debug("Load maxProcId=" + maxProcId);
+LOG.debug("Load max pid=" + maxProcId);
 lastProcId.set(maxProcId);
   }
 
@@ -727,7 +727,7 @@ public class ProcedureExecutor {
!(procedures.containsKey(oldProcId) || 
completed.containsKey(oldProcId)) &&
nonceKeysToProcIdsMap.containsKey(nonceKey)) {
   if (traceEnabled) {
-LOG.trace("Waiting for procId=" + oldProcId.longValue() + " to be 
submitted");
+LOG.trace("Waiting for pid=" + oldProcId.longValue() + " to be 
submitted");
   }
   Threads.sleep(100);
 }
@@ -999,9 +999,9 @@ public class ProcedureExecutor {
   public void removeResult(final long procId) {
 CompletedProcedureRetainer retainer = completed.get(procId);
 if (retainer == null) {
-  assert !procedures.containsKey(procId) : "procId=" + procId + " is still 
running";
+  assert !procedures.containsKey(procId) : "pid=" + procId + " is still 
running";
   if (LOG.isDebugEnabled()) {
-LOG.debug("procId=" + procId + " already removed by the cleaner.");
+LOG.debug("pid=" + procId + " already removed by the cleaner.");
   }
   return;
 }
@@ -1349,7 +1349,7 @@ public class ProcedureExecutor {
   return LockState.LOCK_YIELD_WAIT;
 } catch (Throwable e) {
   // Catch NullPointerExceptions or similar errors...
-  LOG.fatal("CODE-BUG: Uncaught runtime exception fo " + proc, e);
+  LOG.fatal("CODE-BUG: Uncaught runtime exception for " + proc, e);
 }
 
 // allows to kill the executor before something is stored to the wal.


hbase git commit: HBASE-18551 [AMv2] UnassignProcedure and crashed regionservers

2017-08-11 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master cabdbf181 -> 6f44b2486


HBASE-18551 [AMv2] UnassignProcedure and crashed regionservers

If an unassign is unable to communicate with its target server,
expire the server and then wait on a signal from ServerCrashProcedure
before proceeding. The unassign has lock on the region so no one else
can proceed till we complete. We prevent any subsequent assign from
running until logs have been split for crashed server.

In AssignProcedure, do not assign if table is DISABLING or DISABLED.

M 
hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
 Change remoteCallFailed so it returns boolean on whether implementor
wants to stay suspended or not.

M 
hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
  Doc. Also, if we are unable to talk to remote server, expire it and
then wait on SCP to wake us up after it has processed logs for failed
server.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6f44b248
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6f44b248
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6f44b248

Branch: refs/heads/master
Commit: 6f44b24860192d81dbf88ffd834d4b998a6fe636
Parents: cabdbf1
Author: Michael Stack 
Authored: Thu Aug 10 14:22:56 2017 -0700
Committer: Michael Stack 
Committed: Fri Aug 11 07:16:33 2017 -0700

--
 .../hbase/procedure2/ProcedureExecutor.java | 10 +--
 .../hadoop/hbase/master/MasterRpcServices.java  |  2 +-
 .../hbase/master/TableNamespaceManager.java |  2 +-
 .../hadoop/hbase/master/TableStateManager.java  |  1 +
 .../master/assignment/AssignProcedure.java  | 13 +++-
 .../assignment/RegionTransitionProcedure.java   | 44 ++--
 .../master/assignment/UnassignProcedure.java| 70 ++--
 .../master/procedure/DisableTableProcedure.java |  4 +-
 .../master/procedure/RSProcedureDispatcher.java |  2 +-
 .../master/procedure/ServerCrashException.java  |  3 +-
 .../master/procedure/ServerCrashProcedure.java  |  3 +-
 .../TestSplitTransactionOnCluster.java  | 17 +++--
 12 files changed, 100 insertions(+), 71 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6f44b248/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index c110c2d..d0052f6 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -315,7 +315,7 @@ public class ProcedureExecutor {
   @Override
   public void setMaxProcId(long maxProcId) {
 assert lastProcId.get() < 0 : "expected only one call to 
setMaxProcId()";
-LOG.debug("Load maxProcId=" + maxProcId);
+LOG.debug("Load max pid=" + maxProcId);
 lastProcId.set(maxProcId);
   }
 
@@ -727,7 +727,7 @@ public class ProcedureExecutor {
!(procedures.containsKey(oldProcId) || 
completed.containsKey(oldProcId)) &&
nonceKeysToProcIdsMap.containsKey(nonceKey)) {
   if (traceEnabled) {
-LOG.trace("Waiting for procId=" + oldProcId.longValue() + " to be 
submitted");
+LOG.trace("Waiting for pid=" + oldProcId.longValue() + " to be 
submitted");
   }
   Threads.sleep(100);
 }
@@ -999,9 +999,9 @@ public class ProcedureExecutor {
   public void removeResult(final long procId) {
 CompletedProcedureRetainer retainer = completed.get(procId);
 if (retainer == null) {
-  assert !procedures.containsKey(procId) : "procId=" + procId + " is still 
running";
+  assert !procedures.containsKey(procId) : "pid=" + procId + " is still 
running";
   if (LOG.isDebugEnabled()) {
-LOG.debug("procId=" + procId + " already removed by the cleaner.");
+LOG.debug("pid=" + procId + " already removed by the cleaner.");
   }
   return;
 }
@@ -1349,7 +1349,7 @@ public class ProcedureExecutor {
   return LockState.LOCK_YIELD_WAIT;
 } catch (Throwable e) {
   // Catch NullPointerExceptions or similar errors...
-  LOG.fatal("CODE-BUG: Uncaught runtime exception fo " + proc, e);
+  LOG.fatal("CODE-BUG: Uncaught runtime exception for " + proc, e);
 }
 
 // allows to kill the executor before something is stored to the wal.


hbase git commit: HBASE-18469 Correct RegionServer metric of totalRequestCount

2017-08-11 Thread liyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1 d72ba890d -> a03c2036e


HBASE-18469 Correct RegionServer metric of totalRequestCount


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a03c2036
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a03c2036
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a03c2036

Branch: refs/heads/branch-1
Commit: a03c2036e9433800db7a9d6cad632a210f5ede35
Parents: d72ba89
Author: Yu Li 
Authored: Fri Aug 11 14:13:18 2017 +0800
Committer: Yu Li 
Committed: Fri Aug 11 14:53:23 2017 +0800

--
 .../regionserver/MetricsRegionServerSource.java |  3 ++
 .../MetricsRegionServerWrapper.java |  2 +
 .../MetricsRegionServerSourceImpl.java  |  2 +
 .../MetricsRegionServerWrapperImpl.java |  5 +++
 .../hbase/regionserver/RSRpcServices.java   | 15 +--
 .../MetricsRegionServerWrapperStub.java |  5 +++
 .../regionserver/TestMetricsRegionServer.java   |  4 ++
 .../regionserver/TestRegionServerMetrics.java   | 41 ++--
 8 files changed, 71 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a03c2036/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index 06bdee6..3ac678e 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -244,6 +244,9 @@ public interface MetricsRegionServerSource extends 
BaseSource, JvmPauseMonitorSo
   String TOTAL_REQUEST_COUNT = "totalRequestCount";
   String TOTAL_REQUEST_COUNT_DESC =
   "Total number of requests this RegionServer has answered.";
+  String TOTAL_ROW_ACTION_REQUEST_COUNT = "totalRowActionRequestCount";
+  String TOTAL_ROW_ACTION_REQUEST_COUNT_DESC =
+  "Total number of region requests this RegionServer has answered, count 
by row-level action";
   String READ_REQUEST_COUNT = "readRequestCount";
   String READ_REQUEST_COUNT_DESC =
   "Number of read requests this region server has answered.";

http://git-wip-us.apache.org/repos/asf/hbase/blob/a03c2036/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
index 7232063..7d7f66d 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
@@ -367,4 +367,6 @@ public interface MetricsRegionServerWrapper {
   long getDeleteFamilyBloomHitCount();
 
   long getTrailerHitCount();
+
+  long getTotalRowActionRequestCount();
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a03c2036/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
--
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
index 9b7f80a..cec122b 100644
--- 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
+++ 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
@@ -354,6 +354,8 @@ public class MetricsRegionServerSourceImpl
   .addGauge(Interns.info(AVERAGE_REGION_SIZE, 
AVERAGE_REGION_SIZE_DESC), rsWrap.getAverageRegionSize())
   .addCounter(Interns.info(TOTAL_REQUEST_COUNT, 
TOTAL_REQUEST_COUNT_DESC),
   rsWrap.getTotalRequestCount())
+  .addCounter(Interns.info(TOTAL_ROW_ACTION_REQUEST_COUNT, 
TOTAL_ROW_ACTION_REQUEST_COUNT_DESC),
+  rsWrap.getTotalRowActionRequestCount())
   .addCounter(Interns.info(READ_REQUEST_COUNT, 
READ_REQUEST_COUNT_DESC),
   rsWrap.getReadRequestsCount())
   .addCounter(Interns.info(WRITE_REQUEST_COUNT, 
WRITE_REQUEST_COUNT_DESC),


hbase git commit: HBASE-18500 Performance issue: Don't use BufferedMutator for HTable's put method

2017-08-11 Thread zghao
Repository: hbase
Updated Branches:
  refs/heads/branch-2 56a4fedda -> 0c16bb591


HBASE-18500 Performance issue: Don't use BufferedMutator for HTable's put method


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0c16bb59
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0c16bb59
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0c16bb59

Branch: refs/heads/branch-2
Commit: 0c16bb591b165050439cf945aca19fa708e0b350
Parents: 56a4fed
Author: Guanghao Zhang 
Authored: Wed Aug 2 13:52:16 2017 +0800
Committer: Guanghao Zhang 
Committed: Fri Aug 11 16:38:08 2017 +0800

--
 .../hbase/client/BufferedMutatorImpl.java   |  19 ---
 .../org/apache/hadoop/hbase/client/HTable.java  | 126 ---
 .../org/apache/hadoop/hbase/client/Table.java   |  33 +
 .../hadoop/hbase/client/TableBuilder.java   |   6 -
 .../hadoop/hbase/client/TableBuilderBase.java   |   9 --
 .../hadoop/hbase/client/TestAsyncProcess.java   |  71 +--
 .../hadoop/hbase/rest/client/RemoteHTable.java  |  10 --
 .../hadoop/hbase/client/HTableWrapper.java  |  10 --
 .../security/access/AccessControlLists.java |  16 ++-
 .../hadoop/hbase/PerformanceEvaluation.java |  13 +-
 .../hadoop/hbase/client/TestClientPushback.java |   6 +-
 .../hadoop/hbase/client/TestFromClientSide.java |   5 +-
 .../org/apache/hadoop/hbase/client/TestHCM.java |   3 +-
 .../hadoop/hbase/client/TestMultiParallel.java  |   1 -
 .../hbase/client/TestServerBusyException.java   |   8 +-
 .../hadoop/hbase/constraint/TestConstraint.java |   9 +-
 .../hbase/coprocessor/TestHTableWrapper.java|   8 --
 .../hbase/regionserver/RegionAsTable.java   |  10 --
 .../replication/TestMasterReplication.java  |   1 -
 .../replication/TestMultiSlaveReplication.java  |   3 -
 .../hbase/replication/TestReplicationBase.java  |   1 -
 .../replication/TestReplicationSmallTests.java  |   1 -
 .../replication/TestReplicationSyncUpTool.java  |   4 -
 .../hbase/security/access/SecureTestUtil.java   |   2 +-
 .../security/access/TestNamespaceCommands.java  |   1 +
 25 files changed, 69 insertions(+), 307 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0c16bb59/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
index 0ddc159..b7d3104 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
@@ -305,25 +305,6 @@ public class BufferedMutatorImpl implements 
BufferedMutator {
   }
 };
   }
-  /**
-   * This is used for legacy purposes in {@link 
HTable#setWriteBufferSize(long)} only. This ought
-   * not be called for production uses.
-   * If the new buffer size is smaller than the stored data, the {@link 
BufferedMutatorImpl#flush()}
-   * will be called.
-   * @param writeBufferSize The max size of internal buffer where data is 
stored.
-   * @throws 
org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException
-   * if an I/O error occurs and there are too many retries.
-   * @throws java.io.InterruptedIOException if the I/O task is interrupted.
-   * @deprecated Going away when we drop public support for {@link HTable}.
-   */
-  @Deprecated
-  public void setWriteBufferSize(long writeBufferSize) throws 
RetriesExhaustedWithDetailsException,
-  InterruptedIOException {
-this.writeBufferSize = writeBufferSize;
-if (currentWriteBufferSize.get() > writeBufferSize) {
-  flush();
-}
-  }
 
   /**
* {@inheritDoc}

http://git-wip-us.apache.org/repos/asf/hbase/blob/0c16bb59/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index c0d321b..2920281 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -107,9 +107,6 @@ public class HTable implements Table {
   private final TableName tableName;
   private final Configuration configuration;
   private final ConnectionConfiguration connConfiguration;
-  @VisibleForTesting
-  volatile BufferedMutatorImpl mutator;
-  private final Object mutatorLock = new Object();
   private boolean closed = false;
   private final int scannerCaching;
   private final long 

hbase git commit: HBASE-18500 Performance issue: Don't use BufferedMutator for HTable's put method

2017-08-11 Thread zghao
Repository: hbase
Updated Branches:
  refs/heads/master 679f34e88 -> cabdbf181


HBASE-18500 Performance issue: Don't use BufferedMutator for HTable's put method


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cabdbf18
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cabdbf18
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cabdbf18

Branch: refs/heads/master
Commit: cabdbf181a0b44bd1f9c32aa67b8a2ee3b863758
Parents: 679f34e
Author: Guanghao Zhang 
Authored: Wed Aug 2 13:52:16 2017 +0800
Committer: Guanghao Zhang 
Committed: Fri Aug 11 16:33:35 2017 +0800

--
 .../hbase/client/BufferedMutatorImpl.java   |  19 ---
 .../org/apache/hadoop/hbase/client/HTable.java  | 126 ---
 .../org/apache/hadoop/hbase/client/Table.java   |  33 +
 .../hadoop/hbase/client/TableBuilder.java   |   6 -
 .../hadoop/hbase/client/TableBuilderBase.java   |   9 --
 .../hadoop/hbase/client/TestAsyncProcess.java   |  71 +--
 .../hadoop/hbase/rest/client/RemoteHTable.java  |  10 --
 .../hadoop/hbase/client/HTableWrapper.java  |  10 --
 .../security/access/AccessControlLists.java |  16 ++-
 .../hadoop/hbase/PerformanceEvaluation.java |  13 +-
 .../hadoop/hbase/client/TestClientPushback.java |   6 +-
 .../hadoop/hbase/client/TestFromClientSide.java |   5 +-
 .../org/apache/hadoop/hbase/client/TestHCM.java |   3 +-
 .../hadoop/hbase/client/TestMultiParallel.java  |   1 -
 .../hbase/client/TestServerBusyException.java   |   8 +-
 .../hadoop/hbase/constraint/TestConstraint.java |   9 +-
 .../hbase/coprocessor/TestHTableWrapper.java|   8 --
 .../hbase/regionserver/RegionAsTable.java   |  10 --
 .../replication/TestMasterReplication.java  |   1 -
 .../replication/TestMultiSlaveReplication.java  |   3 -
 .../hbase/replication/TestReplicationBase.java  |   1 -
 .../replication/TestReplicationSmallTests.java  |   1 -
 .../replication/TestReplicationSyncUpTool.java  |   4 -
 .../hbase/security/access/SecureTestUtil.java   |   2 +-
 .../security/access/TestNamespaceCommands.java  |   1 +
 25 files changed, 69 insertions(+), 307 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cabdbf18/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
index 0ddc159..b7d3104 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
@@ -305,25 +305,6 @@ public class BufferedMutatorImpl implements 
BufferedMutator {
   }
 };
   }
-  /**
-   * This is used for legacy purposes in {@link 
HTable#setWriteBufferSize(long)} only. This ought
-   * not be called for production uses.
-   * If the new buffer size is smaller than the stored data, the {@link 
BufferedMutatorImpl#flush()}
-   * will be called.
-   * @param writeBufferSize The max size of internal buffer where data is 
stored.
-   * @throws 
org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException
-   * if an I/O error occurs and there are too many retries.
-   * @throws java.io.InterruptedIOException if the I/O task is interrupted.
-   * @deprecated Going away when we drop public support for {@link HTable}.
-   */
-  @Deprecated
-  public void setWriteBufferSize(long writeBufferSize) throws 
RetriesExhaustedWithDetailsException,
-  InterruptedIOException {
-this.writeBufferSize = writeBufferSize;
-if (currentWriteBufferSize.get() > writeBufferSize) {
-  flush();
-}
-  }
 
   /**
* {@inheritDoc}

http://git-wip-us.apache.org/repos/asf/hbase/blob/cabdbf18/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index c0d321b..2920281 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -107,9 +107,6 @@ public class HTable implements Table {
   private final TableName tableName;
   private final Configuration configuration;
   private final ConnectionConfiguration connConfiguration;
-  @VisibleForTesting
-  volatile BufferedMutatorImpl mutator;
-  private final Object mutatorLock = new Object();
   private boolean closed = false;
   private final int scannerCaching;
   private final long 

hbase git commit: HBASE-18469 Correct RegionServer metric of totalRequestCount

2017-08-11 Thread liyu
Repository: hbase
Updated Branches:
  refs/heads/master 8da77b414 -> 679f34e88


HBASE-18469 Correct RegionServer metric of totalRequestCount


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/679f34e8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/679f34e8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/679f34e8

Branch: refs/heads/master
Commit: 679f34e88108d870b79f3175b195e8e7d803c331
Parents: 8da77b4
Author: Yu Li 
Authored: Fri Aug 11 14:13:18 2017 +0800
Committer: Yu Li 
Committed: Fri Aug 11 14:13:18 2017 +0800

--
 .../regionserver/MetricsRegionServerSource.java |  3 ++
 .../MetricsRegionServerWrapper.java |  2 +
 .../MetricsRegionServerSourceImpl.java  |  2 +
 .../MetricsRegionServerWrapperImpl.java |  5 +++
 .../hbase/regionserver/RSRpcServices.java   | 14 ++-
 .../MetricsRegionServerWrapperStub.java |  5 +++
 .../regionserver/TestMetricsRegionServer.java   |  4 ++
 .../regionserver/TestRegionServerMetrics.java   | 40 +++-
 8 files changed, 71 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/679f34e8/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index df522d3..9656894 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -244,6 +244,9 @@ public interface MetricsRegionServerSource extends 
BaseSource, JvmPauseMonitorSo
   String TOTAL_REQUEST_COUNT = "totalRequestCount";
   String TOTAL_REQUEST_COUNT_DESC =
   "Total number of requests this RegionServer has answered.";
+  String TOTAL_ROW_ACTION_REQUEST_COUNT = "totalRowActionRequestCount";
+  String TOTAL_ROW_ACTION_REQUEST_COUNT_DESC =
+  "Total number of region requests this RegionServer has answered, count 
by row-level action";
   String READ_REQUEST_COUNT = "readRequestCount";
   String READ_REQUEST_COUNT_DESC =
   "Number of read requests this region server has answered.";

http://git-wip-us.apache.org/repos/asf/hbase/blob/679f34e8/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
index 0aa625c..ccb9de2 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
@@ -457,4 +457,6 @@ public interface MetricsRegionServerWrapper {
   long getDeleteFamilyBloomHitCount();
 
   long getTrailerHitCount();
+
+  long getTotalRowActionRequestCount();
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/679f34e8/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
--
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
index 94b21bc..e69e17c 100644
--- 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
+++ 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
@@ -354,6 +354,8 @@ public class MetricsRegionServerSourceImpl
   .addGauge(Interns.info(AVERAGE_REGION_SIZE, 
AVERAGE_REGION_SIZE_DESC), rsWrap.getAverageRegionSize())
   .addCounter(Interns.info(TOTAL_REQUEST_COUNT, 
TOTAL_REQUEST_COUNT_DESC),
   rsWrap.getTotalRequestCount())
+  .addCounter(Interns.info(TOTAL_ROW_ACTION_REQUEST_COUNT, 
TOTAL_ROW_ACTION_REQUEST_COUNT_DESC),
+  rsWrap.getTotalRowActionRequestCount())
   .addCounter(Interns.info(READ_REQUEST_COUNT, 
READ_REQUEST_COUNT_DESC),
   rsWrap.getReadRequestsCount())
   .addCounter(Interns.info(FILTERED_READ_REQUEST_COUNT, 
FILTERED_READ_REQUEST_COUNT_DESC),


hbase git commit: HBASE-18469 Correct RegionServer metric of totalRequestCount

2017-08-11 Thread liyu
Repository: hbase
Updated Branches:
  refs/heads/branch-2 ee15c2c29 -> 56a4fedda


HBASE-18469 Correct RegionServer metric of totalRequestCount


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/56a4fedd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/56a4fedd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/56a4fedd

Branch: refs/heads/branch-2
Commit: 56a4fedda20b91e3ea36d4078ee9463e13faec74
Parents: ee15c2c
Author: Yu Li 
Authored: Fri Aug 11 14:13:18 2017 +0800
Committer: Yu Li 
Committed: Fri Aug 11 14:15:03 2017 +0800

--
 .../regionserver/MetricsRegionServerSource.java |  3 ++
 .../MetricsRegionServerWrapper.java |  2 +
 .../MetricsRegionServerSourceImpl.java  |  2 +
 .../MetricsRegionServerWrapperImpl.java |  5 +++
 .../hbase/regionserver/RSRpcServices.java   | 14 ++-
 .../MetricsRegionServerWrapperStub.java |  5 +++
 .../regionserver/TestMetricsRegionServer.java   |  4 ++
 .../regionserver/TestRegionServerMetrics.java   | 40 +++-
 8 files changed, 71 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/56a4fedd/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index df522d3..9656894 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -244,6 +244,9 @@ public interface MetricsRegionServerSource extends 
BaseSource, JvmPauseMonitorSo
   String TOTAL_REQUEST_COUNT = "totalRequestCount";
   String TOTAL_REQUEST_COUNT_DESC =
   "Total number of requests this RegionServer has answered.";
+  String TOTAL_ROW_ACTION_REQUEST_COUNT = "totalRowActionRequestCount";
+  String TOTAL_ROW_ACTION_REQUEST_COUNT_DESC =
+  "Total number of region requests this RegionServer has answered, count 
by row-level action";
   String READ_REQUEST_COUNT = "readRequestCount";
   String READ_REQUEST_COUNT_DESC =
   "Number of read requests this region server has answered.";

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a4fedd/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
index 0aa625c..ccb9de2 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
@@ -457,4 +457,6 @@ public interface MetricsRegionServerWrapper {
   long getDeleteFamilyBloomHitCount();
 
   long getTrailerHitCount();
+
+  long getTotalRowActionRequestCount();
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/56a4fedd/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
--
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
index 94b21bc..e69e17c 100644
--- 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
+++ 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
@@ -354,6 +354,8 @@ public class MetricsRegionServerSourceImpl
   .addGauge(Interns.info(AVERAGE_REGION_SIZE, 
AVERAGE_REGION_SIZE_DESC), rsWrap.getAverageRegionSize())
   .addCounter(Interns.info(TOTAL_REQUEST_COUNT, 
TOTAL_REQUEST_COUNT_DESC),
   rsWrap.getTotalRequestCount())
+  .addCounter(Interns.info(TOTAL_ROW_ACTION_REQUEST_COUNT, 
TOTAL_ROW_ACTION_REQUEST_COUNT_DESC),
+  rsWrap.getTotalRowActionRequestCount())
   .addCounter(Interns.info(READ_REQUEST_COUNT, 
READ_REQUEST_COUNT_DESC),
   rsWrap.getReadRequestsCount())
   .addCounter(Interns.info(FILTERED_READ_REQUEST_COUNT, 
FILTERED_READ_REQUEST_COUNT_DESC),