[4/4] hbase git commit: HBASE-21661 Provide Thrift2 implementation of Table/Admin

2019-01-08 Thread allan163
HBASE-21661 Provide Thrift2 implementation of Table/Admin


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f053003c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f053003c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f053003c

Branch: refs/heads/master
Commit: f053003ce7e8d9c86b2ff762b646d69e5e04cfe2
Parents: 5c902b4
Author: Allan Yang 
Authored: Wed Jan 9 15:38:23 2019 +0800
Committer: Allan Yang 
Committed: Wed Jan 9 15:38:23 2019 +0800

--
 .../apache/hadoop/hbase/thrift/Constants.java   |8 +
 .../hadoop/hbase/thrift/ThriftServer.java   |   11 +
 .../hbase/thrift/generated/AlreadyExists.java   |2 +-
 .../hbase/thrift/generated/BatchMutation.java   |2 +-
 .../thrift/generated/ColumnDescriptor.java  |2 +-
 .../hadoop/hbase/thrift/generated/Hbase.java|2 +-
 .../hadoop/hbase/thrift/generated/IOError.java  |2 +-
 .../hbase/thrift/generated/IllegalArgument.java |2 +-
 .../hadoop/hbase/thrift/generated/Mutation.java |2 +-
 .../hadoop/hbase/thrift/generated/TAppend.java  |2 +-
 .../hadoop/hbase/thrift/generated/TCell.java|2 +-
 .../hadoop/hbase/thrift/generated/TColumn.java  |2 +-
 .../hbase/thrift/generated/TIncrement.java  |2 +-
 .../hbase/thrift/generated/TRegionInfo.java |2 +-
 .../hbase/thrift/generated/TRowResult.java  |2 +-
 .../hadoop/hbase/thrift/generated/TScan.java|2 +-
 .../thrift2/ThriftHBaseServiceHandler.java  |4 +-
 .../hadoop/hbase/thrift2/ThriftUtilities.java   |  437 ++
 .../hbase/thrift2/client/ThriftAdmin.java   | 1405 ++
 .../thrift2/client/ThriftClientBuilder.java |   37 +
 .../hbase/thrift2/client/ThriftConnection.java  |  322 
 .../hbase/thrift2/client/ThriftTable.java   |  492 ++
 .../hadoop/hbase/thrift2/generated/TAppend.java |2 +-
 .../hbase/thrift2/generated/TAuthorization.java |2 +-
 .../thrift2/generated/TCellVisibility.java  |2 +-
 .../hadoop/hbase/thrift2/generated/TColumn.java |2 +-
 .../generated/TColumnFamilyDescriptor.java  |2 +-
 .../thrift2/generated/TColumnIncrement.java |2 +-
 .../hbase/thrift2/generated/TColumnValue.java   |2 +-
 .../hadoop/hbase/thrift2/generated/TDelete.java |2 +-
 .../hadoop/hbase/thrift2/generated/TGet.java|  127 +-
 .../hbase/thrift2/generated/THBaseService.java  |  122 +-
 .../hbase/thrift2/generated/THRegionInfo.java   |2 +-
 .../thrift2/generated/THRegionLocation.java |2 +-
 .../hbase/thrift2/generated/TIOError.java   |2 +-
 .../thrift2/generated/TIllegalArgument.java |2 +-
 .../hbase/thrift2/generated/TIncrement.java |2 +-
 .../thrift2/generated/TNamespaceDescriptor.java |2 +-
 .../hadoop/hbase/thrift2/generated/TPut.java|2 +-
 .../hadoop/hbase/thrift2/generated/TResult.java |2 +-
 .../hbase/thrift2/generated/TRowMutations.java  |2 +-
 .../hadoop/hbase/thrift2/generated/TScan.java   |  127 +-
 .../hbase/thrift2/generated/TServerName.java|2 +-
 .../thrift2/generated/TTableDescriptor.java |2 +-
 .../hbase/thrift2/generated/TTableName.java |   50 +-
 .../hbase/thrift2/generated/TTimeRange.java |2 +-
 .../apache/hadoop/hbase/thrift2/hbase.thrift|   13 +-
 .../hbase/thrift2/TestThriftConnection.java |  841 +++
 48 files changed, 3946 insertions(+), 118 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f053003c/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/Constants.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/Constants.java 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/Constants.java
index 8e3d004..55f2499 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/Constants.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/Constants.java
@@ -144,8 +144,16 @@ public final class Constants {
   public static final String THRIFT_READONLY_ENABLED = "hbase.thrift.readonly";
   public static final boolean THRIFT_READONLY_ENABLED_DEFAULT = false;
 
+  public static final String HBASE_THRIFT_CLIENT_SCANNER_CACHING =
+  "hbase.thrift.client.scanner.caching";
 
+  public static final int HBASE_THRIFT_CLIENT_SCANNER_CACHING_DEFAULT = 20;
 
+  public static final String HBASE_THRIFT_SERVER_NAME = 
"hbase.thrift.server.name";
+  public static final String HBASE_THRIFT_SERVER_PORT = 
"hbase.thrift.server.port";
+
+  public static final String HBASE_THRIFT_CLIENT_BUIDLER_CLASS =
+  "hbase.thrift.client.builder.class";
 
 
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f053003c/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java

[3/4] hbase git commit: HBASE-21661 Provide Thrift2 implementation of Table/Admin

2019-01-08 Thread allan163
http://git-wip-us.apache.org/repos/asf/hbase/blob/f053003c/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
new file mode 100644
index 000..d45a6db4
--- /dev/null
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
@@ -0,0 +1,1405 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.thrift2.client;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Future;
+import java.util.regex.Pattern;
+
+import org.apache.commons.lang3.NotImplementedException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CacheEvictionStats;
+import org.apache.hadoop.hbase.ClusterMetrics;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.NamespaceNotFoundException;
+import org.apache.hadoop.hbase.RegionMetrics;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.CompactType;
+import org.apache.hadoop.hbase.client.CompactionState;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.SnapshotDescription;
+import org.apache.hadoop.hbase.client.SnapshotType;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.replication.TableCFs;
+import org.apache.hadoop.hbase.client.security.SecurityCapability;
+import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
+import org.apache.hadoop.hbase.quotas.QuotaFilter;
+import org.apache.hadoop.hbase.quotas.QuotaRetriever;
+import org.apache.hadoop.hbase.quotas.QuotaSettings;
+import org.apache.hadoop.hbase.replication.ReplicationException;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
+import org.apache.hadoop.hbase.thrift2.ThriftUtilities;
+import org.apache.hadoop.hbase.thrift2.generated.TColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.thrift2.generated.THBaseService;
+import org.apache.hadoop.hbase.thrift2.generated.TNamespaceDescriptor;
+import org.apache.hadoop.hbase.thrift2.generated.TTableDescriptor;
+import org.apache.hadoop.hbase.thrift2.generated.TTableName;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.thrift.TException;
+import org.apache.thrift.transport.TTransport;
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
+public class ThriftAdmin implements Admin {
+
+  private THBaseService.Client client;
+  private TTransport transport;
+  private int operationTimeout;
+  private Configuration conf;
+
+
+  public ThriftAdmin(THBaseService.Client client, TTransport tTransport, 
Configuration conf) {
+this.client = client;
+this.transport = tTransport;
+this.operationTimeout = 
conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
+HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
+this.conf = conf;
+  }
+
+  @Override
+  public int getOperationTimeout() {
+return operationTimeout;
+  }
+
+  @Override
+  public void abort(String why, Throwable e) {
+
+  }
+
+  @Override
+  public boolean isAborted() {
+return false;
+  }
+
+  @Override
+  public void close() throws IOException {
+transport.close();
+  }
+
+  @Override
+  public Configuration getConfiguration() {
+return conf;
+  }

[1/4] hbase git commit: HBASE-21661 Provide Thrift2 implementation of Table/Admin

2019-01-08 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/master 5c902b48e -> f053003ce


http://git-wip-us.apache.org/repos/asf/hbase/blob/f053003c/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
--
diff --git 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
new file mode 100644
index 000..1583619
--- /dev/null
+++ 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java
@@ -0,0 +1,841 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.thrift2;
+
+import static org.apache.hadoop.hbase.thrift.Constants.THRIFT_INFO_SERVER_PORT;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.CompareOperator;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.filter.ColumnValueFilter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.PrefixFilter;
+import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.RestTests;
+import org.apache.hadoop.hbase.thrift.Constants;
+import org.apache.hadoop.hbase.thrift2.client.ThriftConnection;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@Category({ RestTests.class, MediumTests.class})
+
+public class TestThriftConnection {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestThriftConnection.class);
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+  HBaseClassTestRule.forClass(TestThriftConnection.class);
+
+  private static final byte[] FAMILYA = Bytes.toBytes("fa");
+  private static final byte[] FAMILYB = Bytes.toBytes("fb");
+  private static final byte[] FAMILYC = Bytes.toBytes("fc");
+  private static final byte[] FAMILYD = Bytes.toBytes("fd");
+
+  private static final byte[] ROW_1 = Bytes.toBytes("testrow1");
+  private static final byte[] ROW_2 = Bytes.toBytes("testrow2");
+  private static final byte[] ROW_3 = Bytes.toBytes("testrow3");
+  private static final byte[] ROW_4 = Bytes.toBytes("testrow4");
+
+  private static final byte[] QUALIFIER_1 = Bytes.toBytes("1");
+  private static final byte[] QUALIFIER_2 = Bytes.toBytes("2");
+  pri

[2/4] hbase git commit: HBASE-21661 Provide Thrift2 implementation of Table/Admin

2019-01-08 Thread allan163
http://git-wip-us.apache.org/repos/asf/hbase/blob/f053003c/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java
index b38d936..676275a 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/generated/TGet.java
@@ -47,7 +47,7 @@ import org.slf4j.LoggerFactory;
  * If you specify a time range and a timestamp the range is ignored.
  * Timestamps on TColumns are ignored.
  */
-@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2018-12-28")
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)", date = 
"2019-01-03")
 public class TGet implements org.apache.thrift.TBase, 
java.io.Serializable, Cloneable, Comparable {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new 
org.apache.thrift.protocol.TStruct("TGet");
 
@@ -65,6 +65,7 @@ public class TGet implements org.apache.thrift.TBase, java.i
   private static final org.apache.thrift.protocol.TField 
STORE_LIMIT_FIELD_DESC = new org.apache.thrift.protocol.TField("storeLimit", 
org.apache.thrift.protocol.TType.I32, (short)12);
   private static final org.apache.thrift.protocol.TField 
STORE_OFFSET_FIELD_DESC = new org.apache.thrift.protocol.TField("storeOffset", 
org.apache.thrift.protocol.TType.I32, (short)13);
   private static final org.apache.thrift.protocol.TField 
EXISTENCE_ONLY_FIELD_DESC = new 
org.apache.thrift.protocol.TField("existence_only", 
org.apache.thrift.protocol.TType.BOOL, (short)14);
+  private static final org.apache.thrift.protocol.TField 
FILTER_BYTES_FIELD_DESC = new org.apache.thrift.protocol.TField("filterBytes", 
org.apache.thrift.protocol.TType.STRING, (short)15);
 
   private static final Map, SchemeFactory> schemes = 
new HashMap, SchemeFactory>();
   static {
@@ -90,6 +91,7 @@ public class TGet implements org.apache.thrift.TBase, java.i
   public int storeLimit; // optional
   public int storeOffset; // optional
   public boolean existence_only; // optional
+  public ByteBuffer filterBytes; // optional
 
   /** The set of fields this struct contains, along with convenience methods 
for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -110,7 +112,8 @@ public class TGet implements org.apache.thrift.TBase, java.i
 CACHE_BLOCKS((short)11, "cacheBlocks"),
 STORE_LIMIT((short)12, "storeLimit"),
 STORE_OFFSET((short)13, "storeOffset"),
-EXISTENCE_ONLY((short)14, "existence_only");
+EXISTENCE_ONLY((short)14, "existence_only"),
+FILTER_BYTES((short)15, "filterBytes");
 
 private static final Map byName = new HashMap();
 
@@ -153,6 +156,8 @@ public class TGet implements org.apache.thrift.TBase, java.i
   return STORE_OFFSET;
 case 14: // EXISTENCE_ONLY
   return EXISTENCE_ONLY;
+case 15: // FILTER_BYTES
+  return FILTER_BYTES;
 default:
   return null;
   }
@@ -201,7 +206,7 @@ public class TGet implements org.apache.thrift.TBase, java.i
   private static final int __STOREOFFSET_ISSET_ID = 5;
   private static final int __EXISTENCE_ONLY_ISSET_ID = 6;
   private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = 
{_Fields.COLUMNS,_Fields.TIMESTAMP,_Fields.TIME_RANGE,_Fields.MAX_VERSIONS,_Fields.FILTER_STRING,_Fields.ATTRIBUTES,_Fields.AUTHORIZATIONS,_Fields.CONSISTENCY,_Fields.TARGET_REPLICA_ID,_Fields.CACHE_BLOCKS,_Fields.STORE_LIMIT,_Fields.STORE_OFFSET,_Fields.EXISTENCE_ONLY};
+  private static final _Fields optionals[] = 
{_Fields.COLUMNS,_Fields.TIMESTAMP,_Fields.TIME_RANGE,_Fields.MAX_VERSIONS,_Fields.FILTER_STRING,_Fields.ATTRIBUTES,_Fields.AUTHORIZATIONS,_Fields.CONSISTENCY,_Fields.TARGET_REPLICA_ID,_Fields.CACHE_BLOCKS,_Fields.STORE_LIMIT,_Fields.STORE_OFFSET,_Fields.EXISTENCE_ONLY,_Fields.FILTER_BYTES};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> 
metaDataMap;
   static {
 Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new 
EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -236,6 +241,8 @@ public class TGet implements org.apache.thrift.TBase, java.i
 new 
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
 tmpMap.put(_Fields.EXISTENCE_ONLY, new 
org.apache.thrift.meta_data.FieldMetaData("existence_only", 
org.apache.thrift.TFieldRequirementType.OPTIONAL, 
 new 
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+tmpMap.put(_Fields.FILTER_BYTES, new 
org.apache.thrift.meta_data.FieldMetaData("filterBytes", 
org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+ne

hbase git commit: HBASE-21645 Perform sanity check and disallow table creation/modification with region replication < 1

2019-01-08 Thread zghao
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 f86914c31 -> b9bcbd0a2


HBASE-21645 Perform sanity check and disallow table creation/modification with 
region replication < 1

Signed-off-by: Guanghao Zhang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b9bcbd0a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b9bcbd0a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b9bcbd0a

Branch: refs/heads/branch-2.0
Commit: b9bcbd0a23c93d1a427ab49650adadcd869fff40
Parents: f86914c
Author: Nihal Jain 
Authored: Tue Jan 8 23:24:08 2019 +0530
Committer: Guanghao Zhang 
Committed: Wed Jan 9 11:54:04 2019 +0800

--
 .../org/apache/hadoop/hbase/master/HMaster.java |  7 +
 .../hadoop/hbase/client/TestFromClientSide.java | 27 
 2 files changed, 34 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b9bcbd0a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 4605ec5..f92e090 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2074,6 +2074,13 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
 }
 
+// check that we have minimum 1 region replicas
+int regionReplicas = htd.getRegionReplication();
+if (regionReplicas < 1) {
+  String message = "Table region replication should be at least one.";
+  warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
+}
+
 for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
   if (hcd.getTimeToLive() <= 0) {
 String message = "TTL for column family " + hcd.getNameAsString() + " 
must be positive.";

http://git-wip-us.apache.org/repos/asf/hbase/blob/b9bcbd0a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index 663cb7f..ce1e46e 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.ClusterMetrics.Option;
 import org.apache.hadoop.hbase.CompareOperator;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -6708,4 +6709,30 @@ public class TestFromClientSide {
   assertNull(scanner.next());
 }
   }
+
+  @Test(expected = DoNotRetryIOException.class)
+  public void testCreateTableWithZeroRegionReplicas() throws Exception {
+TableName tableName = TableName.valueOf(name.getMethodName());
+TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
+.setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf")))
+.setRegionReplication(0)
+.build();
+
+TEST_UTIL.getAdmin().createTable(desc);
+  }
+
+  @Test(expected = DoNotRetryIOException.class)
+  public void testModifyTableWithZeroRegionReplicas() throws Exception {
+TableName tableName = TableName.valueOf(name.getMethodName());
+TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
+.setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf")))
+.build();
+
+TEST_UTIL.getAdmin().createTable(desc);
+TableDescriptor newDesc = TableDescriptorBuilder.newBuilder(desc)
+.setRegionReplication(0)
+.build();
+
+TEST_UTIL.getAdmin().modifyTable(newDesc);
+  }
 }



hbase git commit: HBASE-21645 Perform sanity check and disallow table creation/modification with region replication < 1

2019-01-08 Thread zghao
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 5364601de -> 111c827d1


HBASE-21645 Perform sanity check and disallow table creation/modification with 
region replication < 1

Signed-off-by: Guanghao Zhang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/111c827d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/111c827d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/111c827d

Branch: refs/heads/branch-2.1
Commit: 111c827d1f67f1cd0480c075de209bb478e464f2
Parents: 5364601
Author: Nihal Jain 
Authored: Tue Jan 8 23:24:08 2019 +0530
Committer: Guanghao Zhang 
Committed: Wed Jan 9 11:44:17 2019 +0800

--
 .../org/apache/hadoop/hbase/master/HMaster.java |  7 +
 .../hadoop/hbase/client/TestFromClientSide.java | 27 
 2 files changed, 34 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/111c827d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 60d0441..3f13a45 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2104,6 +2104,13 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
 }
 
+// check that we have minimum 1 region replicas
+int regionReplicas = htd.getRegionReplication();
+if (regionReplicas < 1) {
+  String message = "Table region replication should be at least one.";
+  warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
+}
+
 for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
   if (hcd.getTimeToLive() <= 0) {
 String message = "TTL for column family " + hcd.getNameAsString() + " 
must be positive.";

http://git-wip-us.apache.org/repos/asf/hbase/blob/111c827d/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index c4285b4..5868869 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.ClusterMetrics.Option;
 import org.apache.hadoop.hbase.CompareOperator;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -6707,4 +6708,30 @@ public class TestFromClientSide {
   assertNull(scanner.next());
 }
   }
+
+  @Test(expected = DoNotRetryIOException.class)
+  public void testCreateTableWithZeroRegionReplicas() throws Exception {
+TableName tableName = TableName.valueOf(name.getMethodName());
+TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
+.setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf")))
+.setRegionReplication(0)
+.build();
+
+TEST_UTIL.getAdmin().createTable(desc);
+  }
+
+  @Test(expected = DoNotRetryIOException.class)
+  public void testModifyTableWithZeroRegionReplicas() throws Exception {
+TableName tableName = TableName.valueOf(name.getMethodName());
+TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
+.setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf")))
+.build();
+
+TEST_UTIL.getAdmin().createTable(desc);
+TableDescriptor newDesc = TableDescriptorBuilder.newBuilder(desc)
+.setRegionReplication(0)
+.build();
+
+TEST_UTIL.getAdmin().modifyTable(newDesc);
+  }
 }



hbase git commit: HBASE-21645 Perform sanity check and disallow table creation/modification with region replication < 1

2019-01-08 Thread zghao
Repository: hbase
Updated Branches:
  refs/heads/branch-2 f02ac310d -> 26e02e1c0


HBASE-21645 Perform sanity check and disallow table creation/modification with 
region replication < 1

Signed-off-by: Guanghao Zhang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/26e02e1c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/26e02e1c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/26e02e1c

Branch: refs/heads/branch-2
Commit: 26e02e1c0b37bf1f13651eeed0e33769899bb88e
Parents: f02ac31
Author: Nihal Jain 
Authored: Tue Jan 8 23:24:08 2019 +0530
Committer: Guanghao Zhang 
Committed: Wed Jan 9 11:05:16 2019 +0800

--
 .../org/apache/hadoop/hbase/master/HMaster.java |  7 +
 .../hadoop/hbase/client/TestFromClientSide.java | 27 
 2 files changed, 34 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/26e02e1c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index adbae00..dc6fd73 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2167,6 +2167,13 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
 }
 
+// check that we have minimum 1 region replicas
+int regionReplicas = htd.getRegionReplication();
+if (regionReplicas < 1) {
+  String message = "Table region replication should be at least one.";
+  warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
+}
+
 for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
   if (hcd.getTimeToLive() <= 0) {
 String message = "TTL for column family " + hcd.getNameAsString() + " 
must be positive.";

http://git-wip-us.apache.org/repos/asf/hbase/blob/26e02e1c/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index 7e1f71c..a4618f0 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.ClusterMetrics.Option;
 import org.apache.hadoop.hbase.CompareOperator;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -6706,4 +6707,30 @@ public class TestFromClientSide {
   assertNull(scanner.next());
 }
   }
+
+  @Test(expected = DoNotRetryIOException.class)
+  public void testCreateTableWithZeroRegionReplicas() throws Exception {
+TableName tableName = TableName.valueOf(name.getMethodName());
+TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
+.setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf")))
+.setRegionReplication(0)
+.build();
+
+TEST_UTIL.getAdmin().createTable(desc);
+  }
+
+  @Test(expected = DoNotRetryIOException.class)
+  public void testModifyTableWithZeroRegionReplicas() throws Exception {
+TableName tableName = TableName.valueOf(name.getMethodName());
+TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
+.setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf")))
+.build();
+
+TEST_UTIL.getAdmin().createTable(desc);
+TableDescriptor newDesc = TableDescriptorBuilder.newBuilder(desc)
+.setRegionReplication(0)
+.build();
+
+TEST_UTIL.getAdmin().modifyTable(newDesc);
+  }
 }



hbase git commit: HBASE-21645 Perform sanity check and disallow table creation/modification with region replication < 1

2019-01-08 Thread zghao
Repository: hbase
Updated Branches:
  refs/heads/master ebe3d1d1d -> 5c902b48e


HBASE-21645 Perform sanity check and disallow table creation/modification with 
region replication < 1

Signed-off-by: Guanghao Zhang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5c902b48
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5c902b48
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5c902b48

Branch: refs/heads/master
Commit: 5c902b48e57609975ab5f5d09076276586b59581
Parents: ebe3d1d
Author: Nihal Jain 
Authored: Tue Jan 8 23:24:08 2019 +0530
Committer: Guanghao Zhang 
Committed: Wed Jan 9 10:49:19 2019 +0800

--
 .../org/apache/hadoop/hbase/master/HMaster.java |  7 +
 .../hadoop/hbase/client/TestFromClientSide.java | 27 
 2 files changed, 34 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5c902b48/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 8d47db4..9d2a743 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2189,6 +2189,13 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
 }
 
+// check that we have minimum 1 region replicas
+int regionReplicas = htd.getRegionReplication();
+if (regionReplicas < 1) {
+  String message = "Table region replication should be at least one.";
+  warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
+}
+
 for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
   if (hcd.getTimeToLive() <= 0) {
 String message = "TTL for column family " + hcd.getNameAsString() + " 
must be positive.";

http://git-wip-us.apache.org/repos/asf/hbase/blob/5c902b48/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index e5ffd73..21e4437 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.ClusterMetrics.Option;
 import org.apache.hadoop.hbase.CompareOperator;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -6706,4 +6707,30 @@ public class TestFromClientSide {
   assertNull(scanner.next());
 }
   }
+
+  @Test(expected = DoNotRetryIOException.class)
+  public void testCreateTableWithZeroRegionReplicas() throws Exception {
+TableName tableName = TableName.valueOf(name.getMethodName());
+TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
+.setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf")))
+.setRegionReplication(0)
+.build();
+
+TEST_UTIL.getAdmin().createTable(desc);
+  }
+
+  @Test(expected = DoNotRetryIOException.class)
+  public void testModifyTableWithZeroRegionReplicas() throws Exception {
+TableName tableName = TableName.valueOf(name.getMethodName());
+TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
+.setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf")))
+.build();
+
+TEST_UTIL.getAdmin().createTable(desc);
+TableDescriptor newDesc = TableDescriptorBuilder.newBuilder(desc)
+.setRegionReplication(0)
+.build();
+
+TEST_UTIL.getAdmin().modifyTable(newDesc);
+  }
 }



[hbase] Git Push Summary

2019-01-08 Thread stack
Repository: hbase
Updated Tags:  refs/tags/rel/2.1.2 [created] d851a68b6


hbase git commit: HBASE-21698 Move version in branch-2.1 from 2.1.2 to 2.1.3-SNAPSHOT

2019-01-08 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 1c73b230b -> 5364601de


HBASE-21698 Move version in branch-2.1 from 2.1.2 to 2.1.3-SNAPSHOT


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5364601d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5364601d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5364601d

Branch: refs/heads/branch-2.1
Commit: 5364601deaad0a0df0d9cb981a9fd7ec18e42c9f
Parents: 1c73b23
Author: stack 
Authored: Tue Jan 8 16:01:07 2019 -0800
Committer: stack 
Committed: Tue Jan 8 16:01:07 2019 -0800

--
 hbase-annotations/pom.xml  | 2 +-
 hbase-archetypes/hbase-archetype-builder/pom.xml   | 2 +-
 hbase-archetypes/hbase-client-project/pom.xml  | 2 +-
 hbase-archetypes/hbase-shaded-client-project/pom.xml   | 2 +-
 hbase-archetypes/pom.xml   | 2 +-
 hbase-assembly/pom.xml | 2 +-
 hbase-build-configuration/pom.xml  | 2 +-
 hbase-build-support/hbase-error-prone/pom.xml  | 4 ++--
 hbase-build-support/pom.xml| 2 +-
 hbase-checkstyle/pom.xml   | 4 ++--
 hbase-client/pom.xml   | 2 +-
 hbase-common/pom.xml   | 2 +-
 hbase-endpoint/pom.xml | 2 +-
 hbase-examples/pom.xml | 2 +-
 hbase-external-blockcache/pom.xml  | 2 +-
 hbase-hadoop-compat/pom.xml| 2 +-
 hbase-hadoop2-compat/pom.xml   | 2 +-
 hbase-http/pom.xml | 2 +-
 hbase-it/pom.xml   | 2 +-
 hbase-mapreduce/pom.xml| 2 +-
 hbase-metrics-api/pom.xml  | 2 +-
 hbase-metrics/pom.xml  | 2 +-
 hbase-procedure/pom.xml| 2 +-
 hbase-protocol-shaded/pom.xml  | 2 +-
 hbase-protocol/pom.xml | 2 +-
 hbase-replication/pom.xml  | 2 +-
 hbase-resource-bundle/pom.xml  | 2 +-
 hbase-rest/pom.xml | 2 +-
 hbase-rsgroup/pom.xml  | 2 +-
 hbase-server/pom.xml   | 2 +-
 hbase-shaded/hbase-shaded-check-invariants/pom.xml | 2 +-
 hbase-shaded/hbase-shaded-client-byo-hadoop/pom.xml| 2 +-
 hbase-shaded/hbase-shaded-client/pom.xml   | 2 +-
 hbase-shaded/hbase-shaded-mapreduce/pom.xml| 2 +-
 hbase-shaded/hbase-shaded-with-hadoop-check-invariants/pom.xml | 2 +-
 hbase-shaded/pom.xml   | 2 +-
 hbase-shell/pom.xml| 2 +-
 hbase-testing-util/pom.xml | 2 +-
 hbase-thrift/pom.xml   | 2 +-
 hbase-zookeeper/pom.xml| 2 +-
 pom.xml| 2 +-
 41 files changed, 43 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5364601d/hbase-annotations/pom.xml
--
diff --git a/hbase-annotations/pom.xml b/hbase-annotations/pom.xml
index 2a245d4..54297cf 100644
--- a/hbase-annotations/pom.xml
+++ b/hbase-annotations/pom.xml
@@ -23,7 +23,7 @@
   
 hbase
 org.apache.hbase
-2.1.2
+2.1.3-SNAPSHOT
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5364601d/hbase-archetypes/hbase-archetype-builder/pom.xml
--
diff --git a/hbase-archetypes/hbase-archetype-builder/pom.xml 
b/hbase-archetypes/hbase-archetype-builder/pom.xml
index 71aab9c..e9560be 100644
--- a/hbase-archetypes/hbase-archetype-builder/pom.xml
+++ b/hbase-archetypes/hbase-archetype-builder/pom.xml
@@ -25,7 +25,7 @@
   
 hbase-archetypes
 org.apache.hbase
-2.1.2
+2.1.3-SNAPSHOT
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5364601d/hbase-archetypes/hbase-client-project/pom.xml
--
diff --git a/hbase-archetypes/hbase-client-project/pom.xml 
b/hbase-archetypes/hbase-client-project/pom.xml
index 9c1f420..

hbase git commit: HBASE-21697 Add 2.1.2 to the download page

2019-01-08 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master d26acbe1d -> ebe3d1d1d


HBASE-21697 Add 2.1.2 to the download page


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ebe3d1d1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ebe3d1d1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ebe3d1d1

Branch: refs/heads/master
Commit: ebe3d1d1d9ad7a37961c352bda863e1cde219df0
Parents: d26acbe
Author: stack 
Authored: Tue Jan 8 15:58:14 2019 -0800
Committer: stack 
Committed: Tue Jan 8 15:58:14 2019 -0800

--
 src/site/xdoc/downloads.xml | 14 +++---
 1 file changed, 7 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ebe3d1d1/src/site/xdoc/downloads.xml
--
diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml
index fb827aa..198ac09 100644
--- a/src/site/xdoc/downloads.xml
+++ b/src/site/xdoc/downloads.xml
@@ -44,24 +44,24 @@ under the License.
 
 
   
-2.1.1
+2.1.2
   
   
 2018/10/31
   
   
-https://apache.org/dist/hbase/2.1.1/compatibility_report_2.1.0_vs_2.1.1.html";>2.1.0
 vs 2.1.1
+https://apache.org/dist/hbase/2.1.2/compatibility_report_2.1.1vs2.1.2.html";>2.1.1
 vs 2.1.2
   
   
-https://apache.org/dist/hbase/2.1.1/CHANGES.md";>Changes
+https://apache.org/dist/hbase/2.1.2/CHANGES.md";>Changes
   
   
-https://apache.org/dist/hbase/2.1.1/RELEASENOTES.md";>Release 
Notes
+https://apache.org/dist/hbase/2.1.2/RELEASENOTES.md";>Release 
Notes
   
   
-https://www.apache.org/dyn/closer.lua/hbase/2.1.1/hbase-2.1.1-src.tar.gz";>src
 (https://apache.org/dist/hbase/2.1.1/hbase-2.1.1-src.tar.gz.sha512";>sha512
 https://apache.org/dist/hbase/2.1.1/hbase-2.1.1-src.tar.gz.asc";>asc) 

-https://www.apache.org/dyn/closer.lua/hbase/2.1.1/hbase-2.1.1-bin.tar.gz";>bin
 (https://apache.org/dist/hbase/2.1.1/hbase-2.1.1-bin.tar.gz.sha512";>sha512
 https://apache.org/dist/hbase/2.1.1/hbase-2.1.1-bin.tar.gz.asc";>asc) 

-https://www.apache.org/dyn/closer.lua/hbase/2.1.1/hbase-2.1.1-client-bin.tar.gz";>client-bin
 (https://apache.org/dist/hbase/2.1.1/hbase-2.1.1-client-bin.tar.gz.sha512";>sha512
 https://apache.org/dist/hbase/2.1.1/hbase-2.1.1-client-bin.tar.gz.asc";>asc)
+https://www.apache.org/dyn/closer.lua/hbase/2.1.2/hbase-2.1.2-src.tar.gz";>src
 (https://apache.org/dist/hbase/2.1.2/hbase-2.1.2-src.tar.gz.sha512";>sha512
 https://apache.org/dist/hbase/2.1.2/hbase-2.1.2-src.tar.gz.asc";>asc) 

+https://www.apache.org/dyn/closer.lua/hbase/2.1.2/hbase-2.1.2-bin.tar.gz";>bin
 (https://apache.org/dist/hbase/2.1.2/hbase-2.1.2-bin.tar.gz.sha512";>sha512
 https://apache.org/dist/hbase/2.1.2/hbase-2.1.2-bin.tar.gz.asc";>asc) 

+https://www.apache.org/dyn/closer.lua/hbase/2.1.2/hbase-2.1.2-client-bin.tar.gz";>client-bin
 (https://apache.org/dist/hbase/2.1.2/hbase-2.1.2-client-bin.tar.gz.sha512";>sha512
 https://apache.org/dist/hbase/2.1.2/hbase-2.1.2-client-bin.tar.gz.asc";>asc)
   
 
 



svn commit: r31824 - /dev/hbase/hbase-2.1.2RC1/ /release/hbase/2.1.1/ /release/hbase/2.1.2/

2019-01-08 Thread stack
Author: stack
Date: Tue Jan  8 23:47:56 2019
New Revision: 31824

Log:
Move 2.1.2RC1 to 2.1.2 release and delete 2.1.1

Added:
release/hbase/2.1.2/
  - copied from r31823, dev/hbase/hbase-2.1.2RC1/
Removed:
dev/hbase/hbase-2.1.2RC1/
release/hbase/2.1.1/



[2/4] hbase git commit: HBASE-21687 Update Findbugs Maven Plugin to 3.0.4 to work with Maven 3.6.0+

2019-01-08 Thread apurtell
HBASE-21687 Update Findbugs Maven Plugin to 3.0.4 to work with Maven 3.6.0+

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/42fca236
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/42fca236
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/42fca236

Branch: refs/heads/branch-1.3
Commit: 42fca2364aa3fd2f9b7fdbba92dc2b16616f31d6
Parents: 4b96224
Author: Sean Busbey 
Authored: Mon Jan 7 15:55:57 2019 -0600
Committer: Andrew Purtell 
Committed: Tue Jan 8 10:49:40 2019 -0800

--
 pom.xml | 9 +
 1 file changed, 5 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/42fca236/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 7563a65..27f0864 100644
--- a/pom.xml
+++ b/pom.xml
@@ -571,8 +571,8 @@
 
   org.codehaus.mojo
   findbugs-maven-plugin
-  3.0.0
-  
+  ${findbugs.maven.version}
+  
   
 
${project.basedir}/../dev-support/findbugs-exclude.xml
 true
@@ -1292,6 +1292,7 @@
 1.6
 2.4.1
 1.3.9-1
+3.0.4
 6.18
 2.10.3
 1.5.2.1
@@ -1907,8 +1908,8 @@
 
   org.codehaus.mojo
   findbugs-maven-plugin
-  3.0.0
-  
+  ${findbugs.maven.version}
+  
   
 
${project.basedir}/../dev-support/findbugs-exclude.xml
 true



[4/4] hbase git commit: HBASE-21687 Update Findbugs Maven Plugin to 3.0.4 to work with Maven 3.6.0+

2019-01-08 Thread apurtell
HBASE-21687 Update Findbugs Maven Plugin to 3.0.4 to work with Maven 3.6.0+

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6b87541d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6b87541d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6b87541d

Branch: refs/heads/branch-1
Commit: 6b87541de826464edc5fbafd917facf4e2b66470
Parents: f9afd92
Author: Sean Busbey 
Authored: Mon Jan 7 15:55:57 2019 -0600
Committer: Andrew Purtell 
Committed: Tue Jan 8 10:50:02 2019 -0800

--
 pom.xml | 9 +
 1 file changed, 5 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6b87541d/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 020e805..10ac12b 100644
--- a/pom.xml
+++ b/pom.xml
@@ -573,8 +573,8 @@
 
   org.codehaus.mojo
   findbugs-maven-plugin
-  3.0.0
-  
+  ${findbugs.maven.version}
+  
   
 
${project.basedir}/../dev-support/findbugs-exclude.xml
 true
@@ -1300,6 +1300,7 @@
 3.6.1
 2.4.1
 1.3.9-1
+3.0.4
 6.18
 2.10.3
 1.5.3
@@ -1989,8 +1990,8 @@
 
   org.codehaus.mojo
   findbugs-maven-plugin
-  3.0.0
-  
+  ${findbugs.maven.version}
+  
   
 
${project.basedir}/../dev-support/findbugs-exclude.xml
 true



hbase git commit: HBASE-21688: Address WAL filesystem issues

2019-01-08 Thread elserj
Repository: hbase
Updated Branches:
  refs/heads/master 7d6ce3569 -> d26acbe1d


HBASE-21688: Address WAL filesystem issues

Signed-off-by: Josh Elser 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d26acbe1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d26acbe1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d26acbe1

Branch: refs/heads/master
Commit: d26acbe1dbc0852fd580c926ffe2d53353c582f1
Parents: 7d6ce35
Author: Vladimir Rodionov 
Authored: Mon Jan 7 19:47:17 2019 -0800
Committer: Josh Elser 
Committed: Tue Jan 8 13:56:11 2019 -0500

--
 .../backup/impl/IncrementalBackupManager.java   | 11 
 .../backup/master/TestBackupLogCleaner.java |  6 ++---
 .../test/IntegrationTestBigLinkedList.java  |  8 +++---
 .../test/IntegrationTestLoadAndVerify.java  |  7 ++---
 .../org/apache/hadoop/hbase/io/WALLink.java |  4 +--
 .../hadoop/hbase/master/MasterWalManager.java   | 10 ---
 .../ReplicationSourceWALReader.java |  2 +-
 .../regionserver/WALEntryStream.java| 14 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |  6 ++---
 .../hbase/fs/TestBlockReorderMultiBlocks.java   |  2 +-
 .../hadoop/hbase/master/AbstractTestDLS.java|  4 +--
 .../regionserver/TestWALEntryStream.java| 28 ++--
 12 files changed, 55 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d26acbe1/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
--
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
index 853f458..93d264a 100644
--- 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
@@ -39,7 +39,7 @@ import org.apache.hadoop.hbase.backup.util.BackupUtils;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
-import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -243,10 +243,11 @@ public class IncrementalBackupManager extends 
BackupManager {
   throws IOException {
 LOG.debug("In getLogFilesForNewBackup()\n" + "olderTimestamps: " + 
olderTimestamps
 + "\n newestTimestamps: " + newestTimestamps);
-Path rootdir = FSUtils.getRootDir(conf);
-Path logDir = new Path(rootdir, HConstants.HREGION_LOGDIR_NAME);
-Path oldLogDir = new Path(rootdir, HConstants.HREGION_OLDLOGDIR_NAME);
-FileSystem fs = rootdir.getFileSystem(conf);
+
+Path walRootDir = CommonFSUtils.getWALRootDir(conf);
+Path logDir = new Path(walRootDir, HConstants.HREGION_LOGDIR_NAME);
+Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
+FileSystem fs = walRootDir.getFileSystem(conf);
 NewestLogFilter pathFilter = new NewestLogFilter();
 
 List resultLogFiles = new ArrayList<>();

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26acbe1/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java
--
diff --git 
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java
 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java
index b37c7a9..9273487 100644
--- 
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java
+++ 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java
@@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.junit.ClassRule;
 import org.junit.Test;
@@ -151,8 +151,8 @@ public class TestBackupLogCleaner extends TestBackupBase {
   }
 
   private List getListOfWALFiles(Configuration c) throws 
IOException {
-Path logRoot = new Path(FSUtils.getRootDir(c), 
HConstants.HREGION_LOGDIR_NAME);
-FileSystem fs = FileSystem.get(c);
+Path logRoot = new Path(CommonFSUtils.getWALRootDir(c), 
HCo

[1/4] hbase git commit: HBASE-21687 Update Findbugs Maven Plugin to 3.0.4 to work with Maven 3.6.0+

2019-01-08 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 f9afd9254 -> 6b87541de
  refs/heads/branch-1.2 18f428abb -> 822676ae1
  refs/heads/branch-1.3 4b9622429 -> 42fca2364
  refs/heads/branch-1.4 928baae11 -> f93fca532


HBASE-21687 Update Findbugs Maven Plugin to 3.0.4 to work with Maven 3.6.0+

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/822676ae
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/822676ae
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/822676ae

Branch: refs/heads/branch-1.2
Commit: 822676ae1fce2b166cfde2d4be92dbed2e37d2a4
Parents: 18f428a
Author: Sean Busbey 
Authored: Mon Jan 7 15:55:57 2019 -0600
Committer: Andrew Purtell 
Committed: Tue Jan 8 10:49:11 2019 -0800

--
 pom.xml | 9 +
 1 file changed, 5 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/822676ae/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 159744c..2c2ce3b 100644
--- a/pom.xml
+++ b/pom.xml
@@ -581,8 +581,8 @@
 
   org.codehaus.mojo
   findbugs-maven-plugin
-  3.0.0
-  
+  ${findbugs.maven.version}
+  
   
 
${project.basedir}/../dev-support/findbugs-exclude.xml
 true
@@ -1302,6 +1302,7 @@
 1.6
 2.4.1
 1.3.9-1
+3.0.4
 6.18
 2.10.3
 1.5.2.1
@@ -1914,8 +1915,8 @@
 
   org.codehaus.mojo
   findbugs-maven-plugin
-  3.0.0
-  
+  ${findbugs.maven.version}
+  
   
 
${project.basedir}/../dev-support/findbugs-exclude.xml
 true



[3/4] hbase git commit: HBASE-21687 Update Findbugs Maven Plugin to 3.0.4 to work with Maven 3.6.0+

2019-01-08 Thread apurtell
HBASE-21687 Update Findbugs Maven Plugin to 3.0.4 to work with Maven 3.6.0+

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f93fca53
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f93fca53
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f93fca53

Branch: refs/heads/branch-1.4
Commit: f93fca532d18c651c57529014d491649feb6cce2
Parents: 928baae
Author: Sean Busbey 
Authored: Mon Jan 7 15:55:57 2019 -0600
Committer: Andrew Purtell 
Committed: Tue Jan 8 10:49:50 2019 -0800

--
 pom.xml | 9 +
 1 file changed, 5 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f93fca53/pom.xml
--
diff --git a/pom.xml b/pom.xml
index db1c889..4a88765 100644
--- a/pom.xml
+++ b/pom.xml
@@ -573,8 +573,8 @@
 
   org.codehaus.mojo
   findbugs-maven-plugin
-  3.0.0
-  
+  ${findbugs.maven.version}
+  
   
 
${project.basedir}/../dev-support/findbugs-exclude.xml
 true
@@ -1300,6 +1300,7 @@
 3.6.1
 2.4.1
 1.3.9-1
+3.0.4
 6.18
 2.10.3
 1.5.3
@@ -1989,8 +1990,8 @@
 
   org.codehaus.mojo
   findbugs-maven-plugin
-  3.0.0
-  
+  ${findbugs.maven.version}
+  
   
 
${project.basedir}/../dev-support/findbugs-exclude.xml
 true



hbase git commit: HBASE-21091 Update Hadoop and Java "supported" versions tables

2019-01-08 Thread psomogyi
Repository: hbase
Updated Branches:
  refs/heads/master d957f0fa1 -> 7d6ce3569


HBASE-21091 Update Hadoop and Java "supported" versions tables

Amending-Author: Peter Somogyi 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7d6ce356
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7d6ce356
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7d6ce356

Branch: refs/heads/master
Commit: 7d6ce3569df88e1f974e2b5e91a79cb508863663
Parents: d957f0f
Author: Josh Elser 
Authored: Mon Sep 3 20:05:07 2018 -0700
Committer: Peter Somogyi 
Committed: Tue Jan 8 15:49:24 2019 +0100

--
 src/main/asciidoc/_chapters/configuration.adoc | 58 +++--
 1 file changed, 30 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7d6ce356/src/main/asciidoc/_chapters/configuration.adoc
--
diff --git a/src/main/asciidoc/_chapters/configuration.adoc 
b/src/main/asciidoc/_chapters/configuration.adoc
index 113058c..b969c8b 100644
--- a/src/main/asciidoc/_chapters/configuration.adoc
+++ b/src/main/asciidoc/_chapters/configuration.adoc
@@ -111,23 +111,23 @@ HBase recommends downstream users rely on JDK releases 
that are marked as Long T
 |JDK 10
 
 |2.0
-|link:http://search-hadoop.com/m/YGbbsPxZ723m3as[Not Supported]
-|yes
-|link:https://issues.apache.org/jira/browse/HBASE-20264[Not Supported]
-|link:https://issues.apache.org/jira/browse/HBASE-20264[Not Supported]
+|icon:times-circle[role="red"]
+|icon:check-circle[role="green"]
+|icon:exclamation-circle[role="yellow"] Non-LTS, see 
link:https://issues.apache.org/jira/browse/HBASE-20264[HBASE-20264]
+|icon:exclamation-circle[role="yellow"] Non-LTS, see 
link:https://issues.apache.org/jira/browse/HBASE-20264[HBASE-20264]
 
 |1.3
-|yes
-|yes
-|link:https://issues.apache.org/jira/browse/HBASE-20264[Not Supported]
-|link:https://issues.apache.org/jira/browse/HBASE-20264[Not Supported]
+|icon:check-circle[role="green"]
+|icon:check-circle[role="green"]
+|icon:exclamation-circle[role="yellow"] Non-LTS, see 
link:https://issues.apache.org/jira/browse/HBASE-20264[HBASE-20264]
+|icon:exclamation-circle[role="yellow"] Non-LTS, see 
link:https://issues.apache.org/jira/browse/HBASE-20264[HBASE-20264]
 
 
 |1.2
-|yes
-|yes
-|link:https://issues.apache.org/jira/browse/HBASE-20264[Not Supported]
-|link:https://issues.apache.org/jira/browse/HBASE-20264[Not Supported]
+|icon:check-circle[role="green"]
+|icon:check-circle[role="green"]
+|icon:exclamation-circle[role="yellow"] Non-LTS, see 
link:https://issues.apache.org/jira/browse/HBASE-20264[HBASE-20264]
+|icon:exclamation-circle[role="yellow"] Non-LTS, see 
link:https://issues.apache.org/jira/browse/HBASE-20264[HBASE-20264]
 
 |===
 
@@ -213,26 +213,28 @@ Use the following legend to interpret this table:
 
 .Hadoop version support matrix
 
-* "S" = supported
-* "X" = not supported
-* "NT" = Not tested
+* icon:check-circle[role="green"] = Tested to be fully-functional
+* icon:times-circle[role="red"] = Known to not be fully-functional
+* icon:exclamation-circle[role="yellow"] = Not tested, may/may-not function
 
 [cols="1,1,1,1,1,1", options="header"]
 |===
 | | HBase-1.2.x | HBase-1.3.x | HBase-1.5.x | HBase-2.0.x | HBase-2.1.x
-|Hadoop-2.4.x | S | S | X | X | X
-|Hadoop-2.5.x | S | S | X | X | X
-|Hadoop-2.6.0 | X | X | X | X | X
-|Hadoop-2.6.1+ | S | S | X | S | X
-|Hadoop-2.7.0 | X | X | X | X | X
-|Hadoop-2.7.1+ | S | S | S | S | S
-|Hadoop-2.8.[0-1] | X | X | X | X | X
-|Hadoop-2.8.2 | NT | NT | NT | NT | NT
-|Hadoop-2.8.3+ | NT | NT | NT | S | S
-|Hadoop-2.9.0 | X | X | X | X | X
-|Hadoop-2.9.1+ | NT | NT | NT | NT | NT
-|Hadoop-3.0.x | X | X | X | X | X
-|Hadoop-3.1.0 | X | X | X | X | X
+|Hadoop-2.4.x | icon:check-circle[role="green"] | 
icon:check-circle[role="green"] | icon:times-circle[role="red"] | 
icon:times-circle[role="red"] | icon:times-circle[role="red"]
+|Hadoop-2.5.x | icon:check-circle[role="green"] | 
icon:check-circle[role="green"] | icon:times-circle[role="red"] | 
icon:times-circle[role="red"] | icon:times-circle[role="red"]
+|Hadoop-2.6.0 | icon:times-circle[role="red"] | icon:times-circle[role="red"] 
| icon:times-circle[role="red"] | icon:times-circle[role="red"] | 
icon:times-circle[role="red"]
+|Hadoop-2.6.1+ | icon:check-circle[role="green"] | 
icon:check-circle[role="green"] | icon:times-circle[role="red"] | 
icon:check-circle[role="green"] | icon:times-circle[role="red"]
+|Hadoop-2.7.0 | icon:times-circle[role="red"] | icon:times-circle[role="red"] 
| icon:times-circle[role="red"] | icon:times-circle[role="red"] | 
icon:times-circle[role="red"]
+|Hadoop-2.7.1+ | icon:check-circle[role="green"] | 
icon:check-circle[role="green"] | icon:check-circle[role="green"] | 
icon:check-circle[role="green"

[01/14] hbase-site git commit: Published site at d957f0fa1926c13355c8cca01bbfd7133866e05d.

2019-01-08 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 1da32babf -> 4a0073433


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4a007343/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasGet.FailPrimaryGetCP.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasGet.FailPrimaryGetCP.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasGet.FailPrimaryGetCP.html
index 9df3fac..81b7d33 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasGet.FailPrimaryGetCP.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasGet.FailPrimaryGetCP.html
@@ -32,106 +32,106 @@
 024import java.util.Arrays;
 025import java.util.List;
 026import java.util.Optional;
-027import 
java.util.concurrent.ForkJoinPool;
-028import java.util.concurrent.TimeUnit;
-029import 
java.util.concurrent.atomic.AtomicInteger;
-030import java.util.function.Supplier;
-031import org.apache.commons.io.IOUtils;
-032import org.apache.hadoop.hbase.Cell;
-033import 
org.apache.hadoop.hbase.HBaseClassTestRule;
-034import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-035import 
org.apache.hadoop.hbase.HConstants;
-036import 
org.apache.hadoop.hbase.TableName;
-037import 
org.apache.hadoop.hbase.coprocessor.ObserverContext;
-038import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
-039import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-040import 
org.apache.hadoop.hbase.coprocessor.RegionObserver;
-041import 
org.apache.hadoop.hbase.regionserver.HRegion;
-042import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-043import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-044import 
org.apache.hadoop.hbase.util.Bytes;
-045import 
org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
-046import org.junit.AfterClass;
-047import org.junit.BeforeClass;
-048import org.junit.ClassRule;
-049import org.junit.Rule;
-050import org.junit.Test;
-051import 
org.junit.experimental.categories.Category;
-052import org.junit.rules.TestName;
-053import org.junit.runner.RunWith;
-054import org.junit.runners.Parameterized;
-055import 
org.junit.runners.Parameterized.Parameter;
-056import 
org.junit.runners.Parameterized.Parameters;
-057
-058@RunWith(Parameterized.class)
-059@Category({ MediumTests.class, 
ClientTests.class })
-060public class 
TestAsyncTableRegionReplicasGet {
-061
-062  @ClassRule
-063  public static final HBaseClassTestRule 
CLASS_RULE =
-064
HBaseClassTestRule.forClass(TestAsyncTableRegionReplicasGet.class);
-065
-066  private static final 
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+027import 
java.util.concurrent.ConcurrentHashMap;
+028import 
java.util.concurrent.ConcurrentMap;
+029import 
java.util.concurrent.ForkJoinPool;
+030import java.util.concurrent.TimeUnit;
+031import 
java.util.concurrent.atomic.AtomicInteger;
+032import java.util.function.Supplier;
+033import org.apache.commons.io.IOUtils;
+034import org.apache.hadoop.hbase.Cell;
+035import 
org.apache.hadoop.hbase.HBaseClassTestRule;
+036import 
org.apache.hadoop.hbase.HBaseTestingUtility;
+037import 
org.apache.hadoop.hbase.HConstants;
+038import 
org.apache.hadoop.hbase.TableName;
+039import 
org.apache.hadoop.hbase.coprocessor.ObserverContext;
+040import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
+041import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+042import 
org.apache.hadoop.hbase.coprocessor.RegionObserver;
+043import 
org.apache.hadoop.hbase.regionserver.HRegion;
+044import 
org.apache.hadoop.hbase.testclassification.ClientTests;
+045import 
org.apache.hadoop.hbase.testclassification.MediumTests;
+046import 
org.apache.hadoop.hbase.util.Bytes;
+047import 
org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
+048import org.junit.AfterClass;
+049import org.junit.BeforeClass;
+050import org.junit.ClassRule;
+051import org.junit.Rule;
+052import org.junit.Test;
+053import 
org.junit.experimental.categories.Category;
+054import org.junit.rules.TestName;
+055import org.junit.runner.RunWith;
+056import org.junit.runners.Parameterized;
+057import 
org.junit.runners.Parameterized.Parameter;
+058import 
org.junit.runners.Parameterized.Parameters;
+059
+060@RunWith(Parameterized.class)
+061@Category({ MediumTests.class, 
ClientTests.class })
+062public class 
TestAsyncTableRegionReplicasGet {
+063
+064  @ClassRule
+065  public static final HBaseClassTestRule 
CLASS_RULE =
+066
HBaseClassTestRule.forClass(TestAsyncTableRegionReplicasGet.class);
 067
-068  private static TableName TABLE_NAME = 
TableName.valueOf("async");
+068  private static final 
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
 069
-070  private static byte[] FAMILY = 
Bytes.toBytes("cf");
+070  private sta

[05/14] hbase-site git commit: Published site at d957f0fa1926c13355c8cca01bbfd7133866e05d.

2019-01-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4a007343/devapidocs/src-html/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.html
index e57e5a4..1e456db 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.html
@@ -25,11 +25,11 @@
 017 */
 018package org.apache.hadoop.hbase.ipc;
 019
-020import 
org.apache.hadoop.hbase.HBaseIOException;
+020import 
org.apache.hadoop.hbase.DoNotRetryIOException;
 021import 
org.apache.yetus.audience.InterfaceAudience;
 022
 023@InterfaceAudience.Public
-024public class StoppedRpcClientException 
extends HBaseIOException {
+024public class StoppedRpcClientException 
extends DoNotRetryIOException {
 025  public StoppedRpcClientException() {
 026super();
 027  }

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4a007343/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.html
index bd69f4a..68fdd1a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.html
@@ -153,45 +153,47 @@
 145+ " of region " + 
region.getRegionInfo().getEncodedName()
 146+ " Now waiting and 
blocking reads until observing a full flush cycle");
 147}
-148break;
-149  } else {
-150if 
(response.hasWroteFlushWalMarker()) {
-151  
if(response.getWroteFlushWalMarker()) {
-152if (LOG.isDebugEnabled()) {
-153  LOG.debug("Successfully 
triggered an empty flush marker(memstore empty) of primary "
-154  + "region replica " + 
ServerRegionReplicaUtil
-155
.getRegionInfoForDefaultReplica(region.getRegionInfo()).getEncodedName()
-156  + " of region " + 
region.getRegionInfo().getEncodedName() + " Now waiting and "
-157  + "blocking reads until 
observing a flush marker");
-158}
-159break;
-160  } else {
-161// somehow we were not able 
to get the primary to write the flush request. It may be
-162// closing or already 
flushing. Retry flush again after some sleep.
-163if (!counter.shouldRetry()) 
{
-164  throw new 
IOException("Cannot cause primary to flush or drop a wal marker after " +
-165  "retries. Failing 
opening of this region replica "
-166  + 
region.getRegionInfo().getEncodedName());
-167}
-168  }
-169} else {
-170  // nothing to do. Are we 
dealing with an old server?
-171  LOG.warn("Was not able to 
trigger a flush from primary region due to old server version? "
-172  + "Continuing to open the 
secondary region replica: "
-173  + 
region.getRegionInfo().getEncodedName());
-174  region.setReadsEnabled(true);
-175  break;
-176}
-177  }
-178  try {
-179counter.sleepUntilNextRetry();
-180  } catch (InterruptedException e) 
{
-181throw new 
InterruptedIOException(e.getMessage());
-182  }
-183}
-184  }
-185
-186}
+148region.setReadsEnabled(true);
+149break;
+150  } else {
+151if 
(response.hasWroteFlushWalMarker()) {
+152  
if(response.getWroteFlushWalMarker()) {
+153if (LOG.isDebugEnabled()) {
+154  LOG.debug("Successfully 
triggered an empty flush marker(memstore empty) of primary "
+155  + "region replica " + 
ServerRegionReplicaUtil
+156
.getRegionInfoForDefaultReplica(region.getRegionInfo()).getEncodedName()
+157  + " of region " + 
region.getRegionInfo().getEncodedName() + " Now waiting and "
+158  + "blocking reads until 
observing a flush marker");
+159}
+160
region.setReadsEnabled(true);
+161break;
+162  } else {
+163// somehow we were not able 
to get the primary to write the flush request. It may be
+164// closing or already 
flushing. Retry flush again after some sleep.
+165if (!counter.shouldRetry()) 
{
+166  throw new 
IOException("Cannot cause primary to flush or drop a wal marker after " +
+167  "retries. Failing 
opening 

[10/14] hbase-site git commit: Published site at d957f0fa1926c13355c8cca01bbfd7133866e05d.

2019-01-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4a007343/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.Converter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.Converter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.Converter.html
index 3d7d280..82373f4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.Converter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.Converter.html
@@ -269,462 +269,467 @@
 261
 262  @Override
 263  public CompletableFuture 
get(Get get) {
-264CompletableFuture 
primaryFuture =
-265  get(get, 
RegionReplicaUtil.DEFAULT_REPLICA_ID, readRpcTimeoutNs);
-266if (get.getConsistency() == 
Consistency.STRONG) {
-267  return primaryFuture;
-268}
-269// Timeline consistent read, where we 
will send requests to other region replicas
-270CompletableFuture 
future = new CompletableFuture<>();
-271connect(primaryFuture, future);
-272long primaryCallTimeoutNs = 
conn.connConf.getPrimaryCallTimeoutNs();
-273long startNs = System.nanoTime();
-274
addListener(conn.getLocator().getRegionLocations(tableName, get.getRow(),
-275  RegionLocateType.CURRENT, false, 
readRpcTimeoutNs), (locs, error) -> {
-276if (error != null) {
-277  LOG.warn(
-278"Failed to locate all the 
replicas for table={}, row='{}'," +
-279  " give up timeline 
consistent read",
-280tableName, 
Bytes.toStringBinary(get.getRow()), error);
-281  return;
-282}
-283if (locs.size() <= 1) {
-284  LOG.warn(
-285"There are no secondary 
replicas for region {}," + " give up timeline consistent read",
-286
locs.getDefaultRegionLocation().getRegion());
-287  return;
-288}
-289long delayNs = 
primaryCallTimeoutNs - (System.nanoTime() - startNs);
-290if (delayNs <= 0) {
-291  timelineConsistentGet(get, 
locs, future);
-292} else {
-293  
AsyncConnectionImpl.RETRY_TIMER.newTimeout(
-294timeout -> 
timelineConsistentGet(get, locs, future), delayNs, TimeUnit.NANOSECONDS);
-295}
-296  });
-297return future;
-298  }
-299
-300  @Override
-301  public CompletableFuture 
put(Put put) {
-302return this. 
newCaller(put, writeRpcTimeoutNs)
-303  .action((controller, loc, stub) 
-> RawAsyncTableImpl. voidMutate(controller, loc, stub,
-304put, 
RequestConverter::buildMutateRequest))
-305  .call();
-306  }
-307
-308  @Override
-309  public CompletableFuture 
delete(Delete delete) {
-310return this. 
newCaller(delete, writeRpcTimeoutNs)
-311  .action((controller, loc, stub) 
-> RawAsyncTableImpl. voidMutate(controller, loc,
-312stub, delete, 
RequestConverter::buildMutateRequest))
-313  .call();
-314  }
-315
-316  @Override
-317  public CompletableFuture 
append(Append append) {
-318checkHasFamilies(append);
-319return this. 
newCaller(append, rpcTimeoutNs)
-320  .action((controller, loc, stub) 
-> this. noncedMutate(controller, loc, stub,
-321append, 
RequestConverter::buildMutateRequest, RawAsyncTableImpl::toResult))
-322  .call();
-323  }
-324
-325  @Override
-326  public CompletableFuture 
increment(Increment increment) {
-327checkHasFamilies(increment);
-328return this. 
newCaller(increment, rpcTimeoutNs)
-329  .action((controller, loc, stub) 
-> this. noncedMutate(controller, loc,
-330stub, increment, 
RequestConverter::buildMutateRequest, RawAsyncTableImpl::toResult))
-331  .call();
-332  }
-333
-334  private final class 
CheckAndMutateBuilderImpl implements CheckAndMutateBuilder {
-335
-336private final byte[] row;
-337
-338private final byte[] family;
-339
-340private byte[] qualifier;
-341
-342private TimeRange timeRange;
-343
-344private CompareOperator op;
-345
-346private byte[] value;
-347
-348public 
CheckAndMutateBuilderImpl(byte[] row, byte[] family) {
-349  this.row = 
Preconditions.checkNotNull(row, "row is null");
-350  this.family = 
Preconditions.checkNotNull(family, "family is null");
-351}
+264if (get.getConsistency() == 
Consistency.STRONG) {
+265  return get(get, 
RegionReplicaUtil.DEFAULT_REPLICA_ID, readRpcTimeoutNs);
+266}
+267// user specifies a replica id 
explicitly, just send request to the specific replica
+268if (get.getReplicaId() >= 0) {
+269  return get(get, get.getReplicaId(), 
readRpcTimeoutNs);
+270}
+271
+272// Timeline consistent read, where we 
may send requests to other region replicas
+273CompletableFuture 
primaryFuture =
+274  get(get, 
RegionR

[03/14] hbase-site git commit: Published site at d957f0fa1926c13355c8cca01bbfd7133866e05d.

2019-01-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4a007343/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.html
index c4847bb..75b91e6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.html
@@ -38,333 +38,334 @@
 030import java.util.concurrent.TimeUnit;
 031import 
java.util.concurrent.atomic.AtomicBoolean;
 032import 
org.apache.hadoop.conf.Configuration;
-033import 
org.apache.hadoop.hbase.util.Threads;
-034import 
org.apache.yetus.audience.InterfaceAudience;
-035import 
org.apache.zookeeper.KeeperException;
-036import 
org.apache.zookeeper.KeeperException.Code;
-037import org.apache.zookeeper.ZooKeeper;
-038import org.apache.zookeeper.data.Stat;
-039import org.slf4j.Logger;
-040import org.slf4j.LoggerFactory;
-041
-042import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-043
-044/**
-045 * A very simple read only zookeeper 
implementation without watcher support.
-046 */
-047@InterfaceAudience.Private
-048public final class ReadOnlyZKClient 
implements Closeable {
-049
-050  private static final Logger LOG = 
LoggerFactory.getLogger(ReadOnlyZKClient.class);
-051
-052  public static final String 
RECOVERY_RETRY = "zookeeper.recovery.retry";
-053
-054  private static final int 
DEFAULT_RECOVERY_RETRY = 30;
-055
-056  public static final String 
RECOVERY_RETRY_INTERVAL_MILLIS =
-057  
"zookeeper.recovery.retry.intervalmill";
-058
-059  private static final int 
DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS = 1000;
-060
-061  public static final String 
KEEPALIVE_MILLIS = "zookeeper.keep-alive.time";
-062
-063  private static final int 
DEFAULT_KEEPALIVE_MILLIS = 6;
-064
-065  private static final 
EnumSet FAIL_FAST_CODES = EnumSet.of(Code.NOAUTH, 
Code.AUTHFAILED);
-066
-067  private final String connectString;
-068
-069  private final int sessionTimeoutMs;
-070
-071  private final int maxRetries;
-072
-073  private final int retryIntervalMs;
-074
-075  private final int keepAliveTimeMs;
-076
-077  private static abstract class Task 
implements Delayed {
-078
-079protected long time = 
System.nanoTime();
-080
-081public boolean needZk() {
-082  return false;
-083}
-084
-085public void exec(ZooKeeper zk) {
-086}
-087
-088public void connectFailed(IOException 
e) {
-089}
-090
-091public void closed(IOException e) {
-092}
-093
-094@Override
-095public int compareTo(Delayed o) {
-096  Task that = (Task) o;
-097  int c = Long.compare(time, 
that.time);
-098  if (c != 0) {
-099return c;
-100  }
-101  return 
Integer.compare(System.identityHashCode(this), 
System.identityHashCode(that));
-102}
-103
-104@Override
-105public long getDelay(TimeUnit unit) 
{
-106  return unit.convert(time - 
System.nanoTime(), TimeUnit.NANOSECONDS);
-107}
-108  }
-109
-110  private static final Task CLOSE = new 
Task() {
-111  };
-112
-113  private final DelayQueue 
tasks = new DelayQueue<>();
-114
-115  private final AtomicBoolean closed = 
new AtomicBoolean(false);
-116
-117  @VisibleForTesting
-118  ZooKeeper zookeeper;
-119
-120  private int pendingRequests = 0;
-121
-122  private String getId() {
-123return String.format("0x%08x", 
System.identityHashCode(this));
-124  }
-125
-126  public ReadOnlyZKClient(Configuration 
conf) {
-127// We might use a different ZK for 
client access
-128String clientZkQuorumServers = 
ZKConfig.getClientZKQuorumServersString(conf);
-129if (clientZkQuorumServers != null) 
{
-130  this.connectString = 
clientZkQuorumServers;
-131} else {
-132  this.connectString = 
ZKConfig.getZKQuorumServersString(conf);
-133}
-134this.sessionTimeoutMs = 
conf.getInt(ZK_SESSION_TIMEOUT, DEFAULT_ZK_SESSION_TIMEOUT);
-135this.maxRetries = 
conf.getInt(RECOVERY_RETRY, DEFAULT_RECOVERY_RETRY);
-136this.retryIntervalMs =
-137
conf.getInt(RECOVERY_RETRY_INTERVAL_MILLIS, 
DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS);
-138this.keepAliveTimeMs = 
conf.getInt(KEEPALIVE_MILLIS, DEFAULT_KEEPALIVE_MILLIS);
-139LOG.debug(
-140  "Connect {} to {} with session 
timeout={}ms, retries {}, " +
-141"retry interval {}ms, 
keepAlive={}ms",
-142  getId(), connectString, 
sessionTimeoutMs, maxRetries, retryIntervalMs, keepAliveTimeMs);
-143Threads.setDaemonThreadRunning(new 
Thread(this::run),
-144  "ReadOnlyZKClient-" + connectString 
+ "@" + getId());
-145  }
-146
-147  private abstract class ZKTask 
extends Task {
-148
-149protected final String path;
-150
-151private final 
CompletableFuture future;
-152
-153private final S

[14/14] hbase-site git commit: Published site at d957f0fa1926c13355c8cca01bbfd7133866e05d.

Published site at d957f0fa1926c13355c8cca01bbfd7133866e05d.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/4a007343
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/4a007343
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/4a007343

Branch: refs/heads/asf-site
Commit: 4a007343328612e148645284834b840f2b7531cd
Parents: 1da32ba
Author: jenkins 
Authored: Tue Jan 8 14:57:09 2019 +
Committer: jenkins 
Committed: Tue Jan 8 14:57:09 2019 +

--
 acid-semantics.html |   4 +-
 apache_hbase_reference_guide.pdf|   4 +-
 .../hadoop/hbase/DoNotRetryIOException.html |   2 +-
 .../apache/hadoop/hbase/HBaseIOException.html   |   2 +-
 .../hbase/class-use/DoNotRetryIOException.html  |  10 +-
 .../hbase/ipc/StoppedRpcClientException.html|   7 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   |   2 +-
 apidocs/overview-tree.html  |   2 +-
 apidocs/serialized-form.html|   2 +-
 .../hbase/ipc/StoppedRpcClientException.html|   4 +-
 book.html   |   2 +-
 bulk-loads.html |   4 +-
 checkstyle-aggregate.html   |  12 +-
 coc.html|   4 +-
 dependencies.html   |   4 +-
 dependency-convergence.html |   4 +-
 dependency-info.html|   4 +-
 dependency-management.html  |   4 +-
 devapidocs/constant-values.html |   4 +-
 .../hadoop/hbase/DoNotRetryIOException.html |   2 +-
 .../apache/hadoop/hbase/HBaseIOException.html   |   2 +-
 .../hbase/class-use/DoNotRetryIOException.html  |  12 +-
 ...syncTableImpl.CheckAndMutateBuilderImpl.html |  32 +-
 ...TableImpl.CoprocessorServiceBuilderImpl.html |  24 +-
 .../hadoop/hbase/client/RawAsyncTableImpl.html  |  56 +-
 .../hbase/ipc/StoppedRpcClientException.html|   7 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   |   2 +-
 .../hbase/zookeeper/ReadOnlyZKClient.Task.html  |  18 +-
 .../zookeeper/ReadOnlyZKClient.ZKTask.html  |  26 +-
 .../hbase/zookeeper/ReadOnlyZKClient.html   |  58 +-
 devapidocs/overview-tree.html   |   2 +-
 devapidocs/serialized-form.html |   2 +-
 .../org/apache/hadoop/hbase/Version.html|   4 +-
 ...syncTableImpl.CheckAndMutateBuilderImpl.html | 825 ++-
 .../client/RawAsyncTableImpl.Converter.html | 825 ++-
 ...TableImpl.CoprocessorServiceBuilderImpl.html | 825 ++-
 .../RawAsyncTableImpl.NoncedConverter.html  | 825 ++-
 .../hbase/client/RawAsyncTableImpl.RpcCall.html | 825 ++-
 .../hadoop/hbase/client/RawAsyncTableImpl.html  | 825 ++-
 .../hbase/ipc/StoppedRpcClientException.html|   4 +-
 .../handler/RegionReplicaFlushHandler.html  |  80 +-
 .../hbase/zookeeper/ReadOnlyZKClient.Task.html  | 655 +++
 .../zookeeper/ReadOnlyZKClient.ZKTask.html  | 655 +++
 .../hbase/zookeeper/ReadOnlyZKClient.html   | 655 +++
 downloads.html  |   4 +-
 export_control.html |   4 +-
 index.html  |   4 +-
 integration.html|   4 +-
 issue-tracking.html |   4 +-
 license.html|   4 +-
 mail-lists.html |   4 +-
 metrics.html|   4 +-
 old_news.html   |   4 +-
 plugin-management.html  |   4 +-
 plugins.html|   4 +-
 poweredbyhbase.html |   4 +-
 project-info.html   |   4 +-
 project-reports.html|   4 +-
 project-summary.html|   4 +-
 pseudo-distributed.html |   4 +-
 replication.html|   4 +-
 resources.html  |   4 +-
 source-repository.html  |   4 +-
 sponsors.html   |   4 +-
 supportingprojects.html |   4 +-
 team-list.html  |   4 +-
 testdevapidocs/index-all.html   |  14 +-
 .../hadoop/hbase/backup/package-tree.html   |   2 +-
 ...TableRegionReplicasGet.FailPrimaryGetCP.html |   8 +-
 .../client/TestAsyncTableRegionReplicasGet.html | 138 ++--
 .../hadoop/hbase/io/hfile/package-tree.html |   2 +-
 .../org/apache/hadoop/hbase/package-tree.html   |  12 +-
 .../hadoop/hbase/procedure/package-tree.html|   8 

[08/14] hbase-site git commit: Published site at d957f0fa1926c13355c8cca01bbfd7133866e05d.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4a007343/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.NoncedConverter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.NoncedConverter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.NoncedConverter.html
index 3d7d280..82373f4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.NoncedConverter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.NoncedConverter.html
@@ -269,462 +269,467 @@
 261
 262  @Override
 263  public CompletableFuture 
get(Get get) {
-264CompletableFuture 
primaryFuture =
-265  get(get, 
RegionReplicaUtil.DEFAULT_REPLICA_ID, readRpcTimeoutNs);
-266if (get.getConsistency() == 
Consistency.STRONG) {
-267  return primaryFuture;
-268}
-269// Timeline consistent read, where we 
will send requests to other region replicas
-270CompletableFuture 
future = new CompletableFuture<>();
-271connect(primaryFuture, future);
-272long primaryCallTimeoutNs = 
conn.connConf.getPrimaryCallTimeoutNs();
-273long startNs = System.nanoTime();
-274
addListener(conn.getLocator().getRegionLocations(tableName, get.getRow(),
-275  RegionLocateType.CURRENT, false, 
readRpcTimeoutNs), (locs, error) -> {
-276if (error != null) {
-277  LOG.warn(
-278"Failed to locate all the 
replicas for table={}, row='{}'," +
-279  " give up timeline 
consistent read",
-280tableName, 
Bytes.toStringBinary(get.getRow()), error);
-281  return;
-282}
-283if (locs.size() <= 1) {
-284  LOG.warn(
-285"There are no secondary 
replicas for region {}," + " give up timeline consistent read",
-286
locs.getDefaultRegionLocation().getRegion());
-287  return;
-288}
-289long delayNs = 
primaryCallTimeoutNs - (System.nanoTime() - startNs);
-290if (delayNs <= 0) {
-291  timelineConsistentGet(get, 
locs, future);
-292} else {
-293  
AsyncConnectionImpl.RETRY_TIMER.newTimeout(
-294timeout -> 
timelineConsistentGet(get, locs, future), delayNs, TimeUnit.NANOSECONDS);
-295}
-296  });
-297return future;
-298  }
-299
-300  @Override
-301  public CompletableFuture 
put(Put put) {
-302return this. 
newCaller(put, writeRpcTimeoutNs)
-303  .action((controller, loc, stub) 
-> RawAsyncTableImpl. voidMutate(controller, loc, stub,
-304put, 
RequestConverter::buildMutateRequest))
-305  .call();
-306  }
-307
-308  @Override
-309  public CompletableFuture 
delete(Delete delete) {
-310return this. 
newCaller(delete, writeRpcTimeoutNs)
-311  .action((controller, loc, stub) 
-> RawAsyncTableImpl. voidMutate(controller, loc,
-312stub, delete, 
RequestConverter::buildMutateRequest))
-313  .call();
-314  }
-315
-316  @Override
-317  public CompletableFuture 
append(Append append) {
-318checkHasFamilies(append);
-319return this. 
newCaller(append, rpcTimeoutNs)
-320  .action((controller, loc, stub) 
-> this. noncedMutate(controller, loc, stub,
-321append, 
RequestConverter::buildMutateRequest, RawAsyncTableImpl::toResult))
-322  .call();
-323  }
-324
-325  @Override
-326  public CompletableFuture 
increment(Increment increment) {
-327checkHasFamilies(increment);
-328return this. 
newCaller(increment, rpcTimeoutNs)
-329  .action((controller, loc, stub) 
-> this. noncedMutate(controller, loc,
-330stub, increment, 
RequestConverter::buildMutateRequest, RawAsyncTableImpl::toResult))
-331  .call();
-332  }
-333
-334  private final class 
CheckAndMutateBuilderImpl implements CheckAndMutateBuilder {
-335
-336private final byte[] row;
-337
-338private final byte[] family;
-339
-340private byte[] qualifier;
-341
-342private TimeRange timeRange;
-343
-344private CompareOperator op;
-345
-346private byte[] value;
-347
-348public 
CheckAndMutateBuilderImpl(byte[] row, byte[] family) {
-349  this.row = 
Preconditions.checkNotNull(row, "row is null");
-350  this.family = 
Preconditions.checkNotNull(family, "family is null");
-351}
+264if (get.getConsistency() == 
Consistency.STRONG) {
+265  return get(get, 
RegionReplicaUtil.DEFAULT_REPLICA_ID, readRpcTimeoutNs);
+266}
+267// user specifies a replica id 
explicitly, just send request to the specific replica
+268if (get.getReplicaId() >= 0) {
+269  return get(get, get.getReplicaId(), 
readRpcTimeoutNs);
+270}
+271
+272// Timeline consistent read, where we 
may send requests to other region replicas
+273CompletableFuture 
primaryFuture

[09/14] hbase-site git commit: Published site at d957f0fa1926c13355c8cca01bbfd7133866e05d.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4a007343/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CoprocessorServiceBuilderImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CoprocessorServiceBuilderImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CoprocessorServiceBuilderImpl.html
index 3d7d280..82373f4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CoprocessorServiceBuilderImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CoprocessorServiceBuilderImpl.html
@@ -269,462 +269,467 @@
 261
 262  @Override
 263  public CompletableFuture 
get(Get get) {
-264CompletableFuture 
primaryFuture =
-265  get(get, 
RegionReplicaUtil.DEFAULT_REPLICA_ID, readRpcTimeoutNs);
-266if (get.getConsistency() == 
Consistency.STRONG) {
-267  return primaryFuture;
-268}
-269// Timeline consistent read, where we 
will send requests to other region replicas
-270CompletableFuture 
future = new CompletableFuture<>();
-271connect(primaryFuture, future);
-272long primaryCallTimeoutNs = 
conn.connConf.getPrimaryCallTimeoutNs();
-273long startNs = System.nanoTime();
-274
addListener(conn.getLocator().getRegionLocations(tableName, get.getRow(),
-275  RegionLocateType.CURRENT, false, 
readRpcTimeoutNs), (locs, error) -> {
-276if (error != null) {
-277  LOG.warn(
-278"Failed to locate all the 
replicas for table={}, row='{}'," +
-279  " give up timeline 
consistent read",
-280tableName, 
Bytes.toStringBinary(get.getRow()), error);
-281  return;
-282}
-283if (locs.size() <= 1) {
-284  LOG.warn(
-285"There are no secondary 
replicas for region {}," + " give up timeline consistent read",
-286
locs.getDefaultRegionLocation().getRegion());
-287  return;
-288}
-289long delayNs = 
primaryCallTimeoutNs - (System.nanoTime() - startNs);
-290if (delayNs <= 0) {
-291  timelineConsistentGet(get, 
locs, future);
-292} else {
-293  
AsyncConnectionImpl.RETRY_TIMER.newTimeout(
-294timeout -> 
timelineConsistentGet(get, locs, future), delayNs, TimeUnit.NANOSECONDS);
-295}
-296  });
-297return future;
-298  }
-299
-300  @Override
-301  public CompletableFuture 
put(Put put) {
-302return this. 
newCaller(put, writeRpcTimeoutNs)
-303  .action((controller, loc, stub) 
-> RawAsyncTableImpl. voidMutate(controller, loc, stub,
-304put, 
RequestConverter::buildMutateRequest))
-305  .call();
-306  }
-307
-308  @Override
-309  public CompletableFuture 
delete(Delete delete) {
-310return this. 
newCaller(delete, writeRpcTimeoutNs)
-311  .action((controller, loc, stub) 
-> RawAsyncTableImpl. voidMutate(controller, loc,
-312stub, delete, 
RequestConverter::buildMutateRequest))
-313  .call();
-314  }
-315
-316  @Override
-317  public CompletableFuture 
append(Append append) {
-318checkHasFamilies(append);
-319return this. 
newCaller(append, rpcTimeoutNs)
-320  .action((controller, loc, stub) 
-> this. noncedMutate(controller, loc, stub,
-321append, 
RequestConverter::buildMutateRequest, RawAsyncTableImpl::toResult))
-322  .call();
-323  }
-324
-325  @Override
-326  public CompletableFuture 
increment(Increment increment) {
-327checkHasFamilies(increment);
-328return this. 
newCaller(increment, rpcTimeoutNs)
-329  .action((controller, loc, stub) 
-> this. noncedMutate(controller, loc,
-330stub, increment, 
RequestConverter::buildMutateRequest, RawAsyncTableImpl::toResult))
-331  .call();
-332  }
-333
-334  private final class 
CheckAndMutateBuilderImpl implements CheckAndMutateBuilder {
-335
-336private final byte[] row;
-337
-338private final byte[] family;
-339
-340private byte[] qualifier;
-341
-342private TimeRange timeRange;
-343
-344private CompareOperator op;
-345
-346private byte[] value;
-347
-348public 
CheckAndMutateBuilderImpl(byte[] row, byte[] family) {
-349  this.row = 
Preconditions.checkNotNull(row, "row is null");
-350  this.family = 
Preconditions.checkNotNull(family, "family is null");
-351}
+264if (get.getConsistency() == 
Consistency.STRONG) {
+265  return get(get, 
RegionReplicaUtil.DEFAULT_REPLICA_ID, readRpcTimeoutNs);
+266}
+267// user specifies a replica id 
explicitly, just send request to the specific replica
+268if (get.getReplicaId() >= 0) {
+269  return get(get, get.getReplicaId(), 
readRpcTimeoutNs);
+270}
+271
+272// Timeline consistent read, where we 
may send requests to 

hbase-site git commit: INFRA-10751 Empty commit

Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 4a0073433 -> 44781de0d


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/44781de0
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/44781de0
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/44781de0

Branch: refs/heads/asf-site
Commit: 44781de0de8da55d41065903df404ed7de84f3b1
Parents: 4a00734
Author: jenkins 
Authored: Tue Jan 8 14:57:26 2019 +
Committer: jenkins 
Committed: Tue Jan 8 14:57:26 2019 +

--

--




[13/14] hbase-site git commit: Published site at d957f0fa1926c13355c8cca01bbfd7133866e05d.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4a007343/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CoprocessorServiceBuilderImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CoprocessorServiceBuilderImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CoprocessorServiceBuilderImpl.html
index 5604e4e..b4bb7d1 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CoprocessorServiceBuilderImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CoprocessorServiceBuilderImpl.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private final class RawAsyncTableImpl.CoprocessorServiceBuilderImpl
+private final class RawAsyncTableImpl.CoprocessorServiceBuilderImpl
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements AsyncTable.CoprocessorServiceBuilder
 
@@ -251,7 +251,7 @@ implements 
 
 stubMaker
-private final https://docs.oracle.com/javase/8/docs/api/java/util/function/Function.html?is-external=true";
 title="class or interface in 
java.util.function">Function stubMaker
+private final https://docs.oracle.com/javase/8/docs/api/java/util/function/Function.html?is-external=true";
 title="class or interface in 
java.util.function">Function stubMaker
 
 
 
@@ -260,7 +260,7 @@ implements 
 
 callable
-private final ServiceCaller callable
+private final ServiceCaller callable
 
 
 
@@ -269,7 +269,7 @@ implements 
 
 callback
-private final AsyncTable.CoprocessorCallback callback
+private final AsyncTable.CoprocessorCallback callback
 
 
 
@@ -278,7 +278,7 @@ implements 
 
 startKey
-private byte[] startKey
+private byte[] startKey
 
 
 
@@ -287,7 +287,7 @@ implements 
 
 startKeyInclusive
-private boolean startKeyInclusive
+private boolean startKeyInclusive
 
 
 
@@ -296,7 +296,7 @@ implements 
 
 endKey
-private byte[] endKey
+private byte[] endKey
 
 
 
@@ -305,7 +305,7 @@ implements 
 
 endKeyInclusive
-private boolean endKeyInclusive
+private boolean endKeyInclusive
 
 
 
@@ -322,7 +322,7 @@ implements 
 
 CoprocessorServiceBuilderImpl
-public CoprocessorServiceBuilderImpl(https://docs.oracle.com/javase/8/docs/api/java/util/function/Function.html?is-external=true";
 title="class or interface in 
java.util.function">Function stubMaker,
+public CoprocessorServiceBuilderImpl(https://docs.oracle.com/javase/8/docs/api/java/util/function/Function.html?is-external=true";
 title="class or interface in 
java.util.function">Function stubMaker,
  ServiceCaller callable,
  AsyncTable.CoprocessorCallback callback)
 
@@ -341,7 +341,7 @@ implements 
 
 fromRow
-public RawAsyncTableImpl.CoprocessorServiceBuilderImpl fromRow(byte[] startKey,
+public RawAsyncTableImpl.CoprocessorServiceBuilderImpl fromRow(byte[] startKey,
 
boolean inclusive)
 
 Specified by:
@@ -358,7 +358,7 @@ implements 
 
 toRow
-public RawAsyncTableImpl.CoprocessorServiceBuilderImpl toRow(byte[] endKey,
+public RawAsyncTableImpl.CoprocessorServiceBuilderImpl toRow(byte[] endKey,
   
boolean inclusive)
 
 Specified by:
@@ -375,7 +375,7 @@ implements 
 
 execute
-public void execute()
+public void execute()
 Description copied from 
interface: AsyncTable.CoprocessorServiceBuilder
 Execute the coprocessorService request. You can get the 
response through the
  AsyncTable.CoprocessorCallback.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4a007343/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.html 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.html
index 1912a82..3b67800 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.html
@@ -901,7 +901,7 @@ implements 
 
 put
-public https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid> put(Put put)
+public https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture

[11/14] hbase-site git commit: Published site at d957f0fa1926c13355c8cca01bbfd7133866e05d.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4a007343/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CheckAndMutateBuilderImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CheckAndMutateBuilderImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CheckAndMutateBuilderImpl.html
index 3d7d280..82373f4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CheckAndMutateBuilderImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CheckAndMutateBuilderImpl.html
@@ -269,462 +269,467 @@
 261
 262  @Override
 263  public CompletableFuture 
get(Get get) {
-264CompletableFuture 
primaryFuture =
-265  get(get, 
RegionReplicaUtil.DEFAULT_REPLICA_ID, readRpcTimeoutNs);
-266if (get.getConsistency() == 
Consistency.STRONG) {
-267  return primaryFuture;
-268}
-269// Timeline consistent read, where we 
will send requests to other region replicas
-270CompletableFuture 
future = new CompletableFuture<>();
-271connect(primaryFuture, future);
-272long primaryCallTimeoutNs = 
conn.connConf.getPrimaryCallTimeoutNs();
-273long startNs = System.nanoTime();
-274
addListener(conn.getLocator().getRegionLocations(tableName, get.getRow(),
-275  RegionLocateType.CURRENT, false, 
readRpcTimeoutNs), (locs, error) -> {
-276if (error != null) {
-277  LOG.warn(
-278"Failed to locate all the 
replicas for table={}, row='{}'," +
-279  " give up timeline 
consistent read",
-280tableName, 
Bytes.toStringBinary(get.getRow()), error);
-281  return;
-282}
-283if (locs.size() <= 1) {
-284  LOG.warn(
-285"There are no secondary 
replicas for region {}," + " give up timeline consistent read",
-286
locs.getDefaultRegionLocation().getRegion());
-287  return;
-288}
-289long delayNs = 
primaryCallTimeoutNs - (System.nanoTime() - startNs);
-290if (delayNs <= 0) {
-291  timelineConsistentGet(get, 
locs, future);
-292} else {
-293  
AsyncConnectionImpl.RETRY_TIMER.newTimeout(
-294timeout -> 
timelineConsistentGet(get, locs, future), delayNs, TimeUnit.NANOSECONDS);
-295}
-296  });
-297return future;
-298  }
-299
-300  @Override
-301  public CompletableFuture 
put(Put put) {
-302return this. 
newCaller(put, writeRpcTimeoutNs)
-303  .action((controller, loc, stub) 
-> RawAsyncTableImpl. voidMutate(controller, loc, stub,
-304put, 
RequestConverter::buildMutateRequest))
-305  .call();
-306  }
-307
-308  @Override
-309  public CompletableFuture 
delete(Delete delete) {
-310return this. 
newCaller(delete, writeRpcTimeoutNs)
-311  .action((controller, loc, stub) 
-> RawAsyncTableImpl. voidMutate(controller, loc,
-312stub, delete, 
RequestConverter::buildMutateRequest))
-313  .call();
-314  }
-315
-316  @Override
-317  public CompletableFuture 
append(Append append) {
-318checkHasFamilies(append);
-319return this. 
newCaller(append, rpcTimeoutNs)
-320  .action((controller, loc, stub) 
-> this. noncedMutate(controller, loc, stub,
-321append, 
RequestConverter::buildMutateRequest, RawAsyncTableImpl::toResult))
-322  .call();
-323  }
-324
-325  @Override
-326  public CompletableFuture 
increment(Increment increment) {
-327checkHasFamilies(increment);
-328return this. 
newCaller(increment, rpcTimeoutNs)
-329  .action((controller, loc, stub) 
-> this. noncedMutate(controller, loc,
-330stub, increment, 
RequestConverter::buildMutateRequest, RawAsyncTableImpl::toResult))
-331  .call();
-332  }
-333
-334  private final class 
CheckAndMutateBuilderImpl implements CheckAndMutateBuilder {
-335
-336private final byte[] row;
-337
-338private final byte[] family;
-339
-340private byte[] qualifier;
-341
-342private TimeRange timeRange;
-343
-344private CompareOperator op;
-345
-346private byte[] value;
-347
-348public 
CheckAndMutateBuilderImpl(byte[] row, byte[] family) {
-349  this.row = 
Preconditions.checkNotNull(row, "row is null");
-350  this.family = 
Preconditions.checkNotNull(family, "family is null");
-351}
+264if (get.getConsistency() == 
Consistency.STRONG) {
+265  return get(get, 
RegionReplicaUtil.DEFAULT_REPLICA_ID, readRpcTimeoutNs);
+266}
+267// user specifies a replica id 
explicitly, just send request to the specific replica
+268if (get.getReplicaId() >= 0) {
+269  return get(get, get.getReplicaId(), 
readRpcTimeoutNs);
+270}
+271
+272// Timeline consistent read, where we 
may send requests to other region replica

[04/14] hbase-site git commit: Published site at d957f0fa1926c13355c8cca01bbfd7133866e05d.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4a007343/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.ZKTask.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.ZKTask.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.ZKTask.html
index c4847bb..75b91e6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.ZKTask.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.ZKTask.html
@@ -38,333 +38,334 @@
 030import java.util.concurrent.TimeUnit;
 031import 
java.util.concurrent.atomic.AtomicBoolean;
 032import 
org.apache.hadoop.conf.Configuration;
-033import 
org.apache.hadoop.hbase.util.Threads;
-034import 
org.apache.yetus.audience.InterfaceAudience;
-035import 
org.apache.zookeeper.KeeperException;
-036import 
org.apache.zookeeper.KeeperException.Code;
-037import org.apache.zookeeper.ZooKeeper;
-038import org.apache.zookeeper.data.Stat;
-039import org.slf4j.Logger;
-040import org.slf4j.LoggerFactory;
-041
-042import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-043
-044/**
-045 * A very simple read only zookeeper 
implementation without watcher support.
-046 */
-047@InterfaceAudience.Private
-048public final class ReadOnlyZKClient 
implements Closeable {
-049
-050  private static final Logger LOG = 
LoggerFactory.getLogger(ReadOnlyZKClient.class);
-051
-052  public static final String 
RECOVERY_RETRY = "zookeeper.recovery.retry";
-053
-054  private static final int 
DEFAULT_RECOVERY_RETRY = 30;
-055
-056  public static final String 
RECOVERY_RETRY_INTERVAL_MILLIS =
-057  
"zookeeper.recovery.retry.intervalmill";
-058
-059  private static final int 
DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS = 1000;
-060
-061  public static final String 
KEEPALIVE_MILLIS = "zookeeper.keep-alive.time";
-062
-063  private static final int 
DEFAULT_KEEPALIVE_MILLIS = 6;
-064
-065  private static final 
EnumSet FAIL_FAST_CODES = EnumSet.of(Code.NOAUTH, 
Code.AUTHFAILED);
-066
-067  private final String connectString;
-068
-069  private final int sessionTimeoutMs;
-070
-071  private final int maxRetries;
-072
-073  private final int retryIntervalMs;
-074
-075  private final int keepAliveTimeMs;
-076
-077  private static abstract class Task 
implements Delayed {
-078
-079protected long time = 
System.nanoTime();
-080
-081public boolean needZk() {
-082  return false;
-083}
-084
-085public void exec(ZooKeeper zk) {
-086}
-087
-088public void connectFailed(IOException 
e) {
-089}
-090
-091public void closed(IOException e) {
-092}
-093
-094@Override
-095public int compareTo(Delayed o) {
-096  Task that = (Task) o;
-097  int c = Long.compare(time, 
that.time);
-098  if (c != 0) {
-099return c;
-100  }
-101  return 
Integer.compare(System.identityHashCode(this), 
System.identityHashCode(that));
-102}
-103
-104@Override
-105public long getDelay(TimeUnit unit) 
{
-106  return unit.convert(time - 
System.nanoTime(), TimeUnit.NANOSECONDS);
-107}
-108  }
-109
-110  private static final Task CLOSE = new 
Task() {
-111  };
-112
-113  private final DelayQueue 
tasks = new DelayQueue<>();
-114
-115  private final AtomicBoolean closed = 
new AtomicBoolean(false);
-116
-117  @VisibleForTesting
-118  ZooKeeper zookeeper;
-119
-120  private int pendingRequests = 0;
-121
-122  private String getId() {
-123return String.format("0x%08x", 
System.identityHashCode(this));
-124  }
-125
-126  public ReadOnlyZKClient(Configuration 
conf) {
-127// We might use a different ZK for 
client access
-128String clientZkQuorumServers = 
ZKConfig.getClientZKQuorumServersString(conf);
-129if (clientZkQuorumServers != null) 
{
-130  this.connectString = 
clientZkQuorumServers;
-131} else {
-132  this.connectString = 
ZKConfig.getZKQuorumServersString(conf);
-133}
-134this.sessionTimeoutMs = 
conf.getInt(ZK_SESSION_TIMEOUT, DEFAULT_ZK_SESSION_TIMEOUT);
-135this.maxRetries = 
conf.getInt(RECOVERY_RETRY, DEFAULT_RECOVERY_RETRY);
-136this.retryIntervalMs =
-137
conf.getInt(RECOVERY_RETRY_INTERVAL_MILLIS, 
DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS);
-138this.keepAliveTimeMs = 
conf.getInt(KEEPALIVE_MILLIS, DEFAULT_KEEPALIVE_MILLIS);
-139LOG.debug(
-140  "Connect {} to {} with session 
timeout={}ms, retries {}, " +
-141"retry interval {}ms, 
keepAlive={}ms",
-142  getId(), connectString, 
sessionTimeoutMs, maxRetries, retryIntervalMs, keepAliveTimeMs);
-143Threads.setDaemonThreadRunning(new 
Thread(this::run),
-144  "ReadOnlyZKClient-" + connectString 
+ "@" + getId());
-145  }
-146
-147  private abstract class ZKTask 
extends Task {
-148
-149protected final String path;
-150
-151private final 
CompletableFuture 

[06/14] hbase-site git commit: Published site at d957f0fa1926c13355c8cca01bbfd7133866e05d.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4a007343/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.html
index 3d7d280..82373f4 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.html
@@ -269,462 +269,467 @@
 261
 262  @Override
 263  public CompletableFuture 
get(Get get) {
-264CompletableFuture 
primaryFuture =
-265  get(get, 
RegionReplicaUtil.DEFAULT_REPLICA_ID, readRpcTimeoutNs);
-266if (get.getConsistency() == 
Consistency.STRONG) {
-267  return primaryFuture;
-268}
-269// Timeline consistent read, where we 
will send requests to other region replicas
-270CompletableFuture 
future = new CompletableFuture<>();
-271connect(primaryFuture, future);
-272long primaryCallTimeoutNs = 
conn.connConf.getPrimaryCallTimeoutNs();
-273long startNs = System.nanoTime();
-274
addListener(conn.getLocator().getRegionLocations(tableName, get.getRow(),
-275  RegionLocateType.CURRENT, false, 
readRpcTimeoutNs), (locs, error) -> {
-276if (error != null) {
-277  LOG.warn(
-278"Failed to locate all the 
replicas for table={}, row='{}'," +
-279  " give up timeline 
consistent read",
-280tableName, 
Bytes.toStringBinary(get.getRow()), error);
-281  return;
-282}
-283if (locs.size() <= 1) {
-284  LOG.warn(
-285"There are no secondary 
replicas for region {}," + " give up timeline consistent read",
-286
locs.getDefaultRegionLocation().getRegion());
-287  return;
-288}
-289long delayNs = 
primaryCallTimeoutNs - (System.nanoTime() - startNs);
-290if (delayNs <= 0) {
-291  timelineConsistentGet(get, 
locs, future);
-292} else {
-293  
AsyncConnectionImpl.RETRY_TIMER.newTimeout(
-294timeout -> 
timelineConsistentGet(get, locs, future), delayNs, TimeUnit.NANOSECONDS);
-295}
-296  });
-297return future;
-298  }
-299
-300  @Override
-301  public CompletableFuture 
put(Put put) {
-302return this. 
newCaller(put, writeRpcTimeoutNs)
-303  .action((controller, loc, stub) 
-> RawAsyncTableImpl. voidMutate(controller, loc, stub,
-304put, 
RequestConverter::buildMutateRequest))
-305  .call();
-306  }
-307
-308  @Override
-309  public CompletableFuture 
delete(Delete delete) {
-310return this. 
newCaller(delete, writeRpcTimeoutNs)
-311  .action((controller, loc, stub) 
-> RawAsyncTableImpl. voidMutate(controller, loc,
-312stub, delete, 
RequestConverter::buildMutateRequest))
-313  .call();
-314  }
-315
-316  @Override
-317  public CompletableFuture 
append(Append append) {
-318checkHasFamilies(append);
-319return this. 
newCaller(append, rpcTimeoutNs)
-320  .action((controller, loc, stub) 
-> this. noncedMutate(controller, loc, stub,
-321append, 
RequestConverter::buildMutateRequest, RawAsyncTableImpl::toResult))
-322  .call();
-323  }
-324
-325  @Override
-326  public CompletableFuture 
increment(Increment increment) {
-327checkHasFamilies(increment);
-328return this. 
newCaller(increment, rpcTimeoutNs)
-329  .action((controller, loc, stub) 
-> this. noncedMutate(controller, loc,
-330stub, increment, 
RequestConverter::buildMutateRequest, RawAsyncTableImpl::toResult))
-331  .call();
-332  }
-333
-334  private final class 
CheckAndMutateBuilderImpl implements CheckAndMutateBuilder {
-335
-336private final byte[] row;
-337
-338private final byte[] family;
-339
-340private byte[] qualifier;
-341
-342private TimeRange timeRange;
-343
-344private CompareOperator op;
-345
-346private byte[] value;
-347
-348public 
CheckAndMutateBuilderImpl(byte[] row, byte[] family) {
-349  this.row = 
Preconditions.checkNotNull(row, "row is null");
-350  this.family = 
Preconditions.checkNotNull(family, "family is null");
-351}
+264if (get.getConsistency() == 
Consistency.STRONG) {
+265  return get(get, 
RegionReplicaUtil.DEFAULT_REPLICA_ID, readRpcTimeoutNs);
+266}
+267// user specifies a replica id 
explicitly, just send request to the specific replica
+268if (get.getReplicaId() >= 0) {
+269  return get(get, get.getReplicaId(), 
readRpcTimeoutNs);
+270}
+271
+272// Timeline consistent read, where we 
may send requests to other region replicas
+273CompletableFuture 
primaryFuture =
+274  get(get, 
RegionReplicaUtil.DEFAULT_REPLICA_ID, readRpcTimeoutNs);
+27

[07/14] hbase-site git commit: Published site at d957f0fa1926c13355c8cca01bbfd7133866e05d.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4a007343/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.RpcCall.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.RpcCall.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.RpcCall.html
index 3d7d280..82373f4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.RpcCall.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTableImpl.RpcCall.html
@@ -269,462 +269,467 @@
 261
 262  @Override
 263  public CompletableFuture 
get(Get get) {
-264CompletableFuture 
primaryFuture =
-265  get(get, 
RegionReplicaUtil.DEFAULT_REPLICA_ID, readRpcTimeoutNs);
-266if (get.getConsistency() == 
Consistency.STRONG) {
-267  return primaryFuture;
-268}
-269// Timeline consistent read, where we 
will send requests to other region replicas
-270CompletableFuture 
future = new CompletableFuture<>();
-271connect(primaryFuture, future);
-272long primaryCallTimeoutNs = 
conn.connConf.getPrimaryCallTimeoutNs();
-273long startNs = System.nanoTime();
-274
addListener(conn.getLocator().getRegionLocations(tableName, get.getRow(),
-275  RegionLocateType.CURRENT, false, 
readRpcTimeoutNs), (locs, error) -> {
-276if (error != null) {
-277  LOG.warn(
-278"Failed to locate all the 
replicas for table={}, row='{}'," +
-279  " give up timeline 
consistent read",
-280tableName, 
Bytes.toStringBinary(get.getRow()), error);
-281  return;
-282}
-283if (locs.size() <= 1) {
-284  LOG.warn(
-285"There are no secondary 
replicas for region {}," + " give up timeline consistent read",
-286
locs.getDefaultRegionLocation().getRegion());
-287  return;
-288}
-289long delayNs = 
primaryCallTimeoutNs - (System.nanoTime() - startNs);
-290if (delayNs <= 0) {
-291  timelineConsistentGet(get, 
locs, future);
-292} else {
-293  
AsyncConnectionImpl.RETRY_TIMER.newTimeout(
-294timeout -> 
timelineConsistentGet(get, locs, future), delayNs, TimeUnit.NANOSECONDS);
-295}
-296  });
-297return future;
-298  }
-299
-300  @Override
-301  public CompletableFuture 
put(Put put) {
-302return this. 
newCaller(put, writeRpcTimeoutNs)
-303  .action((controller, loc, stub) 
-> RawAsyncTableImpl. voidMutate(controller, loc, stub,
-304put, 
RequestConverter::buildMutateRequest))
-305  .call();
-306  }
-307
-308  @Override
-309  public CompletableFuture 
delete(Delete delete) {
-310return this. 
newCaller(delete, writeRpcTimeoutNs)
-311  .action((controller, loc, stub) 
-> RawAsyncTableImpl. voidMutate(controller, loc,
-312stub, delete, 
RequestConverter::buildMutateRequest))
-313  .call();
-314  }
-315
-316  @Override
-317  public CompletableFuture 
append(Append append) {
-318checkHasFamilies(append);
-319return this. 
newCaller(append, rpcTimeoutNs)
-320  .action((controller, loc, stub) 
-> this. noncedMutate(controller, loc, stub,
-321append, 
RequestConverter::buildMutateRequest, RawAsyncTableImpl::toResult))
-322  .call();
-323  }
-324
-325  @Override
-326  public CompletableFuture 
increment(Increment increment) {
-327checkHasFamilies(increment);
-328return this. 
newCaller(increment, rpcTimeoutNs)
-329  .action((controller, loc, stub) 
-> this. noncedMutate(controller, loc,
-330stub, increment, 
RequestConverter::buildMutateRequest, RawAsyncTableImpl::toResult))
-331  .call();
-332  }
-333
-334  private final class 
CheckAndMutateBuilderImpl implements CheckAndMutateBuilder {
-335
-336private final byte[] row;
-337
-338private final byte[] family;
-339
-340private byte[] qualifier;
-341
-342private TimeRange timeRange;
-343
-344private CompareOperator op;
-345
-346private byte[] value;
-347
-348public 
CheckAndMutateBuilderImpl(byte[] row, byte[] family) {
-349  this.row = 
Preconditions.checkNotNull(row, "row is null");
-350  this.family = 
Preconditions.checkNotNull(family, "family is null");
-351}
+264if (get.getConsistency() == 
Consistency.STRONG) {
+265  return get(get, 
RegionReplicaUtil.DEFAULT_REPLICA_ID, readRpcTimeoutNs);
+266}
+267// user specifies a replica id 
explicitly, just send request to the specific replica
+268if (get.getReplicaId() >= 0) {
+269  return get(get, get.getReplicaId(), 
readRpcTimeoutNs);
+270}
+271
+272// Timeline consistent read, where we 
may send requests to other region replicas
+273CompletableFuture 
primaryFuture =
+274  get(get, 
RegionReplicaUtil

[02/14] hbase-site git commit: Published site at d957f0fa1926c13355c8cca01bbfd7133866e05d.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4a007343/testdevapidocs/index-all.html
--
diff --git a/testdevapidocs/index-all.html b/testdevapidocs/index-all.html
index 69ddfee..98e959f 100644
--- a/testdevapidocs/index-all.html
+++ b/testdevapidocs/index-all.html
@@ -18410,6 +18410,8 @@
  
 getPrimaryCdl()
 - Static method in class org.apache.hadoop.hbase.client.TestReplicasClient.SlowMeCopro
  
+getPrimaryGetCount()
 - Static method in class org.apache.hadoop.hbase.client.TestAsyncTableRegionReplicasGet
+ 
 getPrimaryRegions()
 - Method in class org.apache.hadoop.hbase.master.assignment.TestAssignmentManagerUtil
  
 getPrincipalForTesting()
 - Static method in class org.apache.hadoop.hbase.security.HBaseKerberosUtils
@@ -19144,6 +19146,8 @@
  
 getSecondaryCdl()
 - Static method in class org.apache.hadoop.hbase.client.TestReplicasClient.SlowMeCopro
  
+getSecondaryGetCount()
 - Static method in class org.apache.hadoop.hbase.client.TestAsyncTableRegionReplicasGet
+ 
 getSecondaryRS()
 - Method in class org.apache.hadoop.hbase.regionserver.TestRegionReplicasAreDistributed
  
 getSecondaryRS()
 - Method in class org.apache.hadoop.hbase.regionserver.TestRegionReplicasWithModifyTable
@@ -32018,8 +32022,6 @@
  
 PRIMARY
 - Static variable in class org.apache.hadoop.hbase.master.TestRegionPlacement2
  
-PRIMARY_GET_COUNT
 - Static variable in class org.apache.hadoop.hbase.client.TestAsyncTableRegionReplicasGet
- 
 PRIMARY_TIMEOUT_DEFAULT
 - Static variable in class org.apache.hadoop.hbase.IntegrationTestRegionReplicaPerf
  
 PRIMARY_TIMEOUT_KEY
 - Static variable in class org.apache.hadoop.hbase.IntegrationTestRegionReplicaPerf
@@ -34246,10 +34248,14 @@
  
 replayEdit(HRegion,
 WAL.Entry) - Static method in class 
org.apache.hadoop.hbase.regionserver.TestHRegionReplayEvents
  
+REPLICA_COUNT
 - Static variable in class org.apache.hadoop.hbase.client.TestAsyncTableRegionReplicasGet
+ 
 REPLICA_COUNT_DEFAULT
 - Static variable in class org.apache.hadoop.hbase.IntegrationTestRegionReplicaPerf
  
 REPLICA_COUNT_KEY
 - Static variable in class org.apache.hadoop.hbase.IntegrationTestRegionReplicaPerf
  
+REPLICA_ID_TO_COUNT
 - Static variable in class org.apache.hadoop.hbase.client.TestAsyncTableRegionReplicasGet
+ 
 replicaCalls
 - Variable in class org.apache.hadoop.hbase.client.TestAsyncProcess.MyAsyncProcessWithReplicas
  
 replicaCount
 - Variable in class org.apache.hadoop.hbase.IntegrationTestRegionReplicaPerf
@@ -36842,8 +36848,6 @@
  
 SECONDARY
 - Static variable in class org.apache.hadoop.hbase.master.TestRegionPlacement2
  
-SECONDARY_GET_COUNT
 - Static variable in class org.apache.hadoop.hbase.client.TestAsyncTableRegionReplicasGet
- 
 secondaryAndTertiaryRSPlacementHelper(int,
 Map) - Method in class 
org.apache.hadoop.hbase.favored.TestFavoredNodeAssignmentHelper
  
 secondaryCdl
 - Static variable in class org.apache.hadoop.hbase.client.TestReplicasClient.SlowMeCopro
@@ -60140,6 +60144,8 @@
  
 testReadSnapshotRegionManifest()
 - Method in class org.apache.hadoop.hbase.snapshot.TestSnapshotManifest
  
+testReadSpecificReplica()
 - Method in class org.apache.hadoop.hbase.client.TestAsyncTableRegionReplicasGet
+ 
 testReadTableTimeouts()
 - Method in class org.apache.hadoop.hbase.tool.TestCanaryTool
  
 testReadWithFilter()
 - Method in class org.apache.hadoop.hbase.client.TestFromClientSide

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4a007343/testdevapidocs/org/apache/hadoop/hbase/backup/package-tree.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/backup/package-tree.html 
b/testdevapidocs/org/apache/hadoop/hbase/backup/package-tree.html
index b726ce9..7e63870 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/backup/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/backup/package-tree.html
@@ -146,8 +146,8 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.backup.TestIncrementalBackupMergeWithFailures.FailurePhase
 org.apache.hadoop.hbase.backup.TestBackupDeleteWithFailures.Failure
+org.apache.hadoop.hbase.backup.TestIncrementalBackupMergeWithFailures.FailurePhase
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4a007343/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasGet.FailPrimaryGetCP.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncTableReg

[12/14] hbase-site git commit: Published site at d957f0fa1926c13355c8cca01bbfd7133866e05d.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4a007343/devapidocs/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.html 
b/devapidocs/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.html
index 7ffda0e..04b9fc8 100644
--- a/devapidocs/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.html
+++ b/devapidocs/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public final class ReadOnlyZKClient
+public final class ReadOnlyZKClient
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true";
 title="class or interface in java.io">Closeable
 A very simple read only zookeeper implementation without 
watcher support.
@@ -327,7 +327,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 
 
 LOG
-private static final org.slf4j.Logger LOG
+private static final org.slf4j.Logger LOG
 
 
 
@@ -336,7 +336,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 
 
 RECOVERY_RETRY
-public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String RECOVERY_RETRY
+public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String RECOVERY_RETRY
 
 See Also:
 Constant
 Field Values
@@ -349,7 +349,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 
 
 DEFAULT_RECOVERY_RETRY
-private static final int DEFAULT_RECOVERY_RETRY
+private static final int DEFAULT_RECOVERY_RETRY
 
 See Also:
 Constant
 Field Values
@@ -362,7 +362,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 
 
 RECOVERY_RETRY_INTERVAL_MILLIS
-public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String RECOVERY_RETRY_INTERVAL_MILLIS
+public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String RECOVERY_RETRY_INTERVAL_MILLIS
 
 See Also:
 Constant
 Field Values
@@ -375,7 +375,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 
 
 DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS
-private static final int DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS
+private static final int DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS
 
 See Also:
 Constant
 Field Values
@@ -388,7 +388,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 
 
 KEEPALIVE_MILLIS
-public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String KEEPALIVE_MILLIS
+public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String KEEPALIVE_MILLIS
 
 See Also:
 Constant
 Field Values
@@ -401,7 +401,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 
 
 DEFAULT_KEEPALIVE_MILLIS
-private static final int DEFAULT_KEEPALIVE_MILLIS
+private static final int DEFAULT_KEEPALIVE_MILLIS
 
 See Also:
 Constant
 Field Values
@@ -414,7 +414,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 
 
 FAIL_FAST_CODES
-private static final https://docs.oracle.com/javase/8/docs/api/java/util/EnumSet.html?is-external=true";
 title="class or interface in 
java.util">EnumSet FAIL_FAST_CODES
+private static final https://docs.oracle.com/javase/8/docs/api/java/util/EnumSet.html?is-external=true";
 title="class or interface in 
java.util">EnumSet FAIL_FAST_CODES
 
 
 
@@ -423,7 +423,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 
 
 connectString
-private final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String connectString
+private final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String connectString
 
 
 
@@ -432,7 +432,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 
 
 sessionTimeoutMs
-private final int sessionTimeoutMs
+private final int sessionTimeoutMs
 
 
 
@@ -441,7 +441,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 
 
 maxRetries
-private final int maxRetries
+private final int maxRetries
 
 
 
@@ -450,7 +450,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/

[1/2] hbase git commit: HBASE-21588 Procedure v2 wal splitting implementation

Repository: hbase
Updated Branches:
  refs/heads/branch-2 348c2dfe9 -> f02ac310d


http://git-wip-us.apache.org/repos/asf/hbase/blob/f02ac310/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerCrashProcedure.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerCrashProcedure.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerCrashProcedure.java
index af2076e..6751eaf 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerCrashProcedure.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerCrashProcedure.java
@@ -21,6 +21,8 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
@@ -44,9 +46,13 @@ import org.junit.Before;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameter;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+@RunWith(Parameterized.class)
 @Category({MasterTests.class, LargeTests.class})
 public class TestServerCrashProcedure {
 
@@ -58,6 +64,9 @@ public class TestServerCrashProcedure {
 
   protected HBaseTestingUtility util;
 
+  @Parameter
+  public boolean splitWALCoordinatedByZK;
+
   private ProcedureMetrics serverCrashProcMetrics;
   private long serverCrashSubmittedCount = 0;
   private long serverCrashFailedCount = 0;
@@ -67,6 +76,10 @@ public class TestServerCrashProcedure {
 conf.set("hbase.balancer.tablesOnMaster", "none");
 conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 3);
 conf.setInt(HConstants.HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER, 3);
+conf.setBoolean("hbase.split.writer.creation.bounded", true);
+conf.setInt("hbase.regionserver.hlog.splitlog.writer.threads", 8);
+LOG.info("WAL splitting coordinated by zk? {}", splitWALCoordinatedByZK);
+conf.setBoolean(HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK, 
splitWALCoordinatedByZK);
   }
 
   @Before
@@ -173,7 +186,8 @@ public class TestServerCrashProcedure {
 
   @Test
   public void testConcurrentSCPForSameServer() throws Exception {
-final TableName tableName = 
TableName.valueOf("testConcurrentSCPForSameServer");
+final TableName tableName =
+TableName.valueOf("testConcurrentSCPForSameServer-" + 
splitWALCoordinatedByZK);
 try (Table t = createTable(tableName)) {
   // Load the table with a bit of data so some logs to split and some 
edits in each region.
   this.util.loadTable(t, HBaseTestingUtility.COLUMNS[0]);
@@ -222,4 +236,9 @@ public class TestServerCrashProcedure {
 serverCrashSubmittedCount = 
serverCrashProcMetrics.getSubmittedCounter().getCount();
 serverCrashFailedCount = 
serverCrashProcMetrics.getFailedCounter().getCount();
   }
+
+  @Parameterized.Parameters
+  public static Collection coordinatedByZK() {
+return Arrays.asList(false, true);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f02ac310/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSplitWALProcedure.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSplitWALProcedure.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSplitWALProcedure.java
new file mode 100644
index 000..5c801c5
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSplitWALProcedure.java
@@ -0,0 +1,133 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.procedure;
+
+import static 
org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK;
+import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_MAX_SPLITTER;
+
+import java.ut

[2/2] hbase git commit: HBASE-21588 Procedure v2 wal splitting implementation

HBASE-21588 Procedure v2 wal splitting implementation


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f02ac310
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f02ac310
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f02ac310

Branch: refs/heads/branch-2
Commit: f02ac310d249fb972705cd6621dfe495512d2d98
Parents: 348c2df
Author: Jingyun Tian 
Authored: Tue Jan 8 09:49:13 2019 +0800
Committer: Jingyun Tian 
Committed: Tue Jan 8 17:26:58 2019 +0800

--
 .../org/apache/hadoop/hbase/HConstants.java |   8 +
 .../src/main/protobuf/MasterProcedure.proto |  25 ++
 .../SplitLogWorkerCoordination.java |   3 -
 .../ZkSplitLogWorkerCoordination.java   |   6 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  18 +
 .../hadoop/hbase/master/MasterServices.java |   7 +
 .../hadoop/hbase/master/MasterWalManager.java   |   6 +-
 .../hadoop/hbase/master/SplitWALManager.java| 239 
 .../master/procedure/ServerCrashProcedure.java  |  76 +++-
 .../procedure/ServerProcedureInterface.java |  13 +-
 .../hbase/master/procedure/ServerQueue.java |   2 +
 .../master/procedure/SplitWALProcedure.java | 199 ++
 .../procedure/SplitWALRemoteProcedure.java  | 195 ++
 .../hbase/regionserver/HRegionServer.java   |  18 +-
 .../hbase/regionserver/SplitLogWorker.java  |  99 ++---
 .../hbase/regionserver/SplitWALCallable.java| 109 ++
 .../hadoop/hbase/master/AbstractTestDLS.java|   3 +-
 .../hadoop/hbase/master/TestRestartCluster.java |  21 +
 .../hadoop/hbase/master/TestRollingRestart.java |  18 +-
 .../hbase/master/TestSplitWALManager.java   | 383 +++
 .../procedure/TestServerCrashProcedure.java |  21 +-
 .../master/procedure/TestSplitWALProcedure.java | 133 +++
 .../hbase/regionserver/TestSplitLogWorker.java  |   5 +-
 23 files changed, 1537 insertions(+), 70 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f02ac310/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 7aa1494..bc184e5 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1347,6 +1347,14 @@ public final class HConstants {
   public static final String HBASE_CLIENT_FAST_FAIL_INTERCEPTOR_IMPL =
   "hbase.client.fast.fail.interceptor.impl";
 
+  public static final String HBASE_SPLIT_WAL_COORDINATED_BY_ZK = 
"hbase.split.wal.zk.coordinated";
+
+  public static final boolean DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK = true;
+
+  public static final String HBASE_SPLIT_WAL_MAX_SPLITTER = 
"hbase.regionserver.wal.max.splitters";
+
+  public static final int DEFAULT_HBASE_SPLIT_WAL_MAX_SPLITTER = 2;
+
   /** Config key for if the server should send backpressure and if the client 
should listen to
* that backpressure from the server */
   public static final String ENABLE_CLIENT_BACKPRESSURE = 
"hbase.client.backpressure.enabled";

http://git-wip-us.apache.org/repos/asf/hbase/blob/f02ac310/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index f96859c..1901282 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -308,6 +308,8 @@ enum ServerCrashState {
   SERVER_CRASH_WAIT_ON_ASSIGN = 9;
   SERVER_CRASH_SPLIT_META_LOGS = 10;
   SERVER_CRASH_ASSIGN_META = 11;
+  SERVER_CRASH_DELETE_SPLIT_META_WALS_DIR=12;
+  SERVER_CRASH_DELETE_SPLIT_WALS_DIR=13;
   SERVER_CRASH_HANDLE_RIT2 = 20[deprecated=true];
   SERVER_CRASH_FINISH = 100;
 }
@@ -502,4 +504,27 @@ message SwitchRpcThrottleStateData {
 message SwitchRpcThrottleRemoteStateData {
   required ServerName target_server = 1;
   required bool rpc_throttle_enabled = 2;
+}
+
+message SplitWALParameter {
+  required string wal_path = 1;
+}
+
+
+message SplitWALData {
+  required string wal_path = 1;
+  required ServerName crashed_server = 2;
+  optional ServerName worker = 3;
+}
+
+message SplitWALRemoteData {
+  required string wal_path = 1;
+  required ServerName crashed_server = 2;
+  required ServerName worker = 3;
+}
+
+enum SplitWALState {
+  ACQUIRE_SPLIT_WAL_WORKER = 1;
+  DISPATCH_WAL_TO_WORKER = 2;
+  RELEASE_SPLIT_WORKER = 3;
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/f02ac310/hbas

[09/15] hbase git commit: HBASE-21683 Reset readsEnabled flag after successfully flushing the primary region

HBASE-21683 Reset readsEnabled flag after successfully flushing the primary 
region


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e9dae2ad
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e9dae2ad
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e9dae2ad

Branch: refs/heads/HBASE-21512
Commit: e9dae2adc381550eb3e2b65be246309b309e7f06
Parents: 7377fcd
Author: zhangduo 
Authored: Mon Jan 7 20:09:20 2019 +0800
Committer: Duo Zhang 
Committed: Tue Jan 8 16:06:34 2019 +0800

--
 .../hbase/regionserver/handler/RegionReplicaFlushHandler.java  | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e9dae2ad/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
index b917379..81b6d7e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
@@ -145,6 +145,7 @@ public class RegionReplicaFlushHandler extends EventHandler 
{
 + " of region " + region.getRegionInfo().getEncodedName()
 + " Now waiting and blocking reads until observing a full 
flush cycle");
 }
+region.setReadsEnabled(true);
 break;
   } else {
 if (response.hasWroteFlushWalMarker()) {
@@ -156,6 +157,7 @@ public class RegionReplicaFlushHandler extends EventHandler 
{
   + " of region " + region.getRegionInfo().getEncodedName() + 
" Now waiting and "
   + "blocking reads until observing a flush marker");
 }
+region.setReadsEnabled(true);
 break;
   } else {
 // somehow we were not able to get the primary to write the flush 
request. It may be



[05/15] hbase git commit: HBASE-21362: Disable printing of stack-trace in shell when quotas are violated

HBASE-21362: Disable printing of stack-trace in shell when quotas are violated

Signed-off-by: Josh Elser 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a735a475
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a735a475
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a735a475

Branch: refs/heads/HBASE-21512
Commit: a735a4753250d9be6b5371f08ca1db8f4cede0ab
Parents: 84c1f08
Author: Sakthi 
Authored: Wed Dec 26 17:01:22 2018 -0800
Committer: Josh Elser 
Committed: Mon Jan 7 16:36:48 2019 -0500

--
 hbase-shell/src/main/ruby/shell/commands.rb | 10 ++
 1 file changed, 10 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a735a475/hbase-shell/src/main/ruby/shell/commands.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands.rb 
b/hbase-shell/src/main/ruby/shell/commands.rb
index d60d07c..be6dd85 100644
--- a/hbase-shell/src/main/ruby/shell/commands.rb
+++ b/hbase-shell/src/main/ruby/shell/commands.rb
@@ -169,6 +169,16 @@ module Shell
   strs = str.split("\n")
   raise (strs[0]).to_s unless strs.empty?
 end
+if cause.is_a?(org.apache.hadoop.hbase.quotas.SpaceLimitingException)
+  strs = cause.message.split("\n")
+  raise(strs[0]).to_s unless strs.empty?
+end
+if 
cause.is_a?(org.apache.hadoop.hbase.client.RetriesExhaustedException)
+  str = cause.cause.to_s
+  regex = /.*RpcThrottlingException: (?[^\n]+).*/
+  error = regex.match(str)
+  raise error[:message].capitalize unless error.nil?
+end
 
 # Throw the other exception which hasn't been handled above
 raise cause



[03/15] hbase git commit: HBASE-21588 Procedure v2 wal splitting implementation

HBASE-21588 Procedure v2 wal splitting implementation


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/281d6429
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/281d6429
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/281d6429

Branch: refs/heads/HBASE-21512
Commit: 281d6429e55149cc4c05430dcc1d1dc136d8b245
Parents: 77db1fa
Author: tianjingyun 
Authored: Sun Jan 6 01:31:59 2019 +0800
Committer: Jingyun Tian 
Committed: Mon Jan 7 15:58:15 2019 +0800

--
 .../org/apache/hadoop/hbase/HConstants.java |   8 +
 .../src/main/protobuf/MasterProcedure.proto |  25 ++
 .../SplitLogWorkerCoordination.java |   3 -
 .../ZkSplitLogWorkerCoordination.java   |   6 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  18 +
 .../hadoop/hbase/master/MasterServices.java |   7 +
 .../hadoop/hbase/master/MasterWalManager.java   |   6 +-
 .../hadoop/hbase/master/SplitWALManager.java| 239 
 .../master/procedure/ServerCrashProcedure.java  |  76 +++-
 .../procedure/ServerProcedureInterface.java |  13 +-
 .../hbase/master/procedure/ServerQueue.java |   2 +
 .../master/procedure/SplitWALProcedure.java | 199 ++
 .../procedure/SplitWALRemoteProcedure.java  | 195 ++
 .../hbase/regionserver/HRegionServer.java   |  16 +-
 .../hbase/regionserver/SplitLogWorker.java  |  10 +-
 .../hbase/regionserver/SplitWALCallable.java| 109 ++
 .../hadoop/hbase/master/AbstractTestDLS.java|   3 +-
 .../hadoop/hbase/master/TestRestartCluster.java |  21 +
 .../hadoop/hbase/master/TestRollingRestart.java |  18 +-
 .../hbase/master/TestSplitWALManager.java   | 383 +++
 .../procedure/TestServerCrashProcedure.java |  21 +-
 .../master/procedure/TestSplitWALProcedure.java | 133 +++
 .../hbase/regionserver/TestSplitLogWorker.java  |   5 +-
 23 files changed, 1491 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/281d6429/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index fdc3d82..75ee687 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1311,6 +1311,14 @@ public final class HConstants {
   public static final String HBASE_CLIENT_FAST_FAIL_INTERCEPTOR_IMPL =
   "hbase.client.fast.fail.interceptor.impl";
 
+  public static final String HBASE_SPLIT_WAL_COORDINATED_BY_ZK = 
"hbase.split.wal.zk.coordinated";
+
+  public static final boolean DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK = true;
+
+  public static final String HBASE_SPLIT_WAL_MAX_SPLITTER = 
"hbase.regionserver.wal.max.splitters";
+
+  public static final int DEFAULT_HBASE_SPLIT_WAL_MAX_SPLITTER = 2;
+
   /** Config key for if the server should send backpressure and if the client 
should listen to
* that backpressure from the server */
   public static final String ENABLE_CLIENT_BACKPRESSURE = 
"hbase.client.backpressure.enabled";

http://git-wip-us.apache.org/repos/asf/hbase/blob/281d6429/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index b365373..59af722 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -308,6 +308,8 @@ enum ServerCrashState {
   SERVER_CRASH_WAIT_ON_ASSIGN = 9;
   SERVER_CRASH_SPLIT_META_LOGS = 10;
   SERVER_CRASH_ASSIGN_META = 11;
+  SERVER_CRASH_DELETE_SPLIT_META_WALS_DIR=12;
+  SERVER_CRASH_DELETE_SPLIT_WALS_DIR=13;
   SERVER_CRASH_HANDLE_RIT2 = 20[deprecated=true];
   SERVER_CRASH_FINISH = 100;
 }
@@ -565,4 +567,27 @@ message SwitchRpcThrottleStateData {
 message SwitchRpcThrottleRemoteStateData {
   required ServerName target_server = 1;
   required bool rpc_throttle_enabled = 2;
+}
+
+message SplitWALParameter {
+  required string wal_path = 1;
+}
+
+
+message SplitWALData{
+  required string wal_path = 1;
+  required ServerName crashed_server=2;
+  optional ServerName worker = 3;
+}
+
+message SplitWALRemoteData{
+  required string wal_path = 1;
+  required ServerName crashed_server=2;
+  required ServerName worker = 3;
+}
+
+enum SplitWALState{
+  ACQUIRE_SPLIT_WAL_WORKER = 1;
+  DISPATCH_WAL_TO_WORKER = 2;
+  RELEASE_SPLIT_WORKER = 3;
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/281d6429/hbase-server

[07/15] hbase git commit: HBASE-21682 Support getting from specific replica

HBASE-21682 Support getting from specific replica


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4f0514e3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4f0514e3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4f0514e3

Branch: refs/heads/HBASE-21512
Commit: 4f0514e39aeed5aa12c0399faedbed7298a975c7
Parents: 5aaa734
Author: zhangduo 
Authored: Mon Jan 7 20:34:01 2019 +0800
Committer: Duo Zhang 
Committed: Tue Jan 8 09:49:12 2019 +0800

--
 .../hadoop/hbase/client/RawAsyncTableImpl.java  | 13 +++--
 .../client/TestAsyncTableRegionReplicasGet.java | 60 ++--
 2 files changed, 51 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4f0514e3/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java
index 28db7e8..2ab9f6a 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java
@@ -261,12 +261,17 @@ class RawAsyncTableImpl implements 
AsyncTable {
 
   @Override
   public CompletableFuture get(Get get) {
-CompletableFuture primaryFuture =
-  get(get, RegionReplicaUtil.DEFAULT_REPLICA_ID, readRpcTimeoutNs);
 if (get.getConsistency() == Consistency.STRONG) {
-  return primaryFuture;
+  return get(get, RegionReplicaUtil.DEFAULT_REPLICA_ID, readRpcTimeoutNs);
+}
+// user specifies a replica id explicitly, just send request to the 
specific replica
+if (get.getReplicaId() >= 0) {
+  return get(get, get.getReplicaId(), readRpcTimeoutNs);
 }
-// Timeline consistent read, where we will send requests to other region 
replicas
+
+// Timeline consistent read, where we may send requests to other region 
replicas
+CompletableFuture primaryFuture =
+  get(get, RegionReplicaUtil.DEFAULT_REPLICA_ID, readRpcTimeoutNs);
 CompletableFuture future = new CompletableFuture<>();
 connect(primaryFuture, future);
 long primaryCallTimeoutNs = conn.connConf.getPrimaryCallTimeoutNs();

http://git-wip-us.apache.org/repos/asf/hbase/blob/4f0514e3/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasGet.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasGet.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasGet.java
index 0445a0e..2117116 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasGet.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRegionReplicasGet.java
@@ -24,6 +24,8 @@ import java.io.IOException;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Optional;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ForkJoinPool;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -75,6 +77,8 @@ public class TestAsyncTableRegionReplicasGet {
 
   private static byte[] VALUE = Bytes.toBytes("value");
 
+  private static int REPLICA_COUNT = 3;
+
   private static AsyncConnection ASYNC_CONN;
 
   @Rule
@@ -99,9 +103,8 @@ public class TestAsyncTableRegionReplicasGet {
 
   private static volatile boolean FAIL_PRIMARY_GET = false;
 
-  private static AtomicInteger PRIMARY_GET_COUNT = new AtomicInteger(0);
-
-  private static AtomicInteger SECONDARY_GET_COUNT = new AtomicInteger(0);
+  private static ConcurrentMap REPLICA_ID_TO_COUNT =
+new ConcurrentHashMap<>();
 
   public static final class FailPrimaryGetCP implements RegionObserver, 
RegionCoprocessor {
 
@@ -117,13 +120,10 @@ public class TestAsyncTableRegionReplicasGet {
   if (!region.getTable().equals(TABLE_NAME)) {
 return;
   }
-  if (region.getReplicaId() != RegionReplicaUtil.DEFAULT_REPLICA_ID) {
-SECONDARY_GET_COUNT.incrementAndGet();
-  } else {
-PRIMARY_GET_COUNT.incrementAndGet();
-if (FAIL_PRIMARY_GET) {
-  throw new IOException("Inject error");
-}
+  REPLICA_ID_TO_COUNT.computeIfAbsent(region.getReplicaId(), k -> new 
AtomicInteger())
+.incrementAndGet();
+  if (region.getRegionId() == RegionReplicaUtil.DEFAULT_REPLICA_ID && 
FAIL_PRIMARY_GET) {
+throw new IOException("Inject error");
   }
 }
   }
@@ -152,10 +152,9 @@ public class TestAsyncTableRe

[08/15] hbase git commit: Revert "HBASE-21668 SCM fetch times out for nightlies"

Revert "HBASE-21668 SCM fetch times out for nightlies"

This reverts commit 84c1f0887d7b7968e1760f15e3d12a3f80fc87bc.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7377fcd2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7377fcd2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7377fcd2

Branch: refs/heads/HBASE-21512
Commit: 7377fcd29bf45208214973547facf4853620fba8
Parents: 4f0514e
Author: Peter Somogyi 
Authored: Tue Jan 8 08:46:46 2019 +0100
Committer: Peter Somogyi 
Committed: Tue Jan 8 08:46:46 2019 +0100

--
 dev-support/Jenkinsfile | 49 +++-
 1 file changed, 7 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7377fcd2/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index d36318b..b333afb 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -58,16 +58,9 @@ pipeline {
   stages {
 stage ('scm-checkout') {
   steps {
-dir('component') {
-  checkout([
-$class: 'GitSCM',
-branches: scm.branches,
-extensions: scm.extensions + [
-  [$class: 'CleanBeforeCheckout'],
-  [$class: 'CloneOption', honorRefspec: true, noTags: true, 
reference: '', shallow: true, depth: 30]],
-userRemoteConfigs: scm.userRemoteConfigs
-  ])
-}
+dir('component') {
+  checkout scm
+}
   }
 }
 stage ('thirdparty installs') {
@@ -210,14 +203,7 @@ pipeline {
 unstash 'yetus'
 // since we have a new node definition we need to re-do the scm 
checkout
 dir('component') {
-  checkout([
-$class: 'GitSCM',
-branches: scm.branches,
-extensions: scm.extensions + [
-  [$class: 'CleanBeforeCheckout'],
-  [$class: 'CloneOption', honorRefspec: true, noTags: true, 
reference: '', shallow: true, depth: 30]],
-userRemoteConfigs: scm.userRemoteConfigs
-  ])
+  checkout scm
 }
 sh '''#!/usr/bin/env bash
   set -e
@@ -284,14 +270,7 @@ pipeline {
 '''
 unstash 'yetus'
 dir('component') {
-  checkout([
-$class: 'GitSCM',
-branches: scm.branches,
-extensions: scm.extensions + [
-  [$class: 'CleanBeforeCheckout'],
-  [$class: 'CloneOption', honorRefspec: true, noTags: true, 
reference: '', shallow: true, depth: 30]],
-userRemoteConfigs: scm.userRemoteConfigs
-  ])
+  checkout scm
 }
 sh '''#!/usr/bin/env bash
   set -e
@@ -371,14 +350,7 @@ pipeline {
 '''
 unstash 'yetus'
 dir('component') {
-  checkout([
-$class: 'GitSCM',
-branches: scm.branches,
-extensions: scm.extensions + [
-  [$class: 'CleanBeforeCheckout'],
-  [$class: 'CloneOption', honorRefspec: true, noTags: true, 
reference: '', shallow: true, depth: 30]],
-userRemoteConfigs: scm.userRemoteConfigs
-  ])
+  checkout scm
 }
 sh '''#!/usr/bin/env bash
   set -e
@@ -465,14 +437,7 @@ pipeline {
 '''
 unstash 'yetus'
 dir('component') {
-  checkout([
-$class: 'GitSCM',
-branches: scm.branches,
-extensions: scm.extensions + [
-  [$class: 'CleanBeforeCheckout'],
-  [$class: 'CloneOption', honorRefspec: true, noTags: true, 
reference: '', shallow: true, depth: 30]],
-userRemoteConfigs: scm.userRemoteConfigs
-  ])
+  checkout scm
 }
 sh '''#!/usr/bin/env bash
   set -e



[10/15] hbase git commit: HBASE-21684 Throw DNRIOE when connection or rpc client is closed

HBASE-21684 Throw DNRIOE when connection or rpc client is closed


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d957f0fa
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d957f0fa
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d957f0fa

Branch: refs/heads/HBASE-21512
Commit: d957f0fa1926c13355c8cca01bbfd7133866e05d
Parents: e9dae2a
Author: zhangduo 
Authored: Mon Jan 7 20:00:19 2019 +0800
Committer: Duo Zhang 
Committed: Tue Jan 8 16:40:40 2019 +0800

--
 .../apache/hadoop/hbase/ipc/StoppedRpcClientException.java| 4 ++--
 .../org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java   | 7 ---
 2 files changed, 6 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d957f0fa/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.java
index d50004e..bd1e101 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.java
@@ -17,11 +17,11 @@
  */
 package org.apache.hadoop.hbase.ipc;
 
-import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.Public
-public class StoppedRpcClientException extends HBaseIOException {
+public class StoppedRpcClientException extends DoNotRetryIOException {
   public StoppedRpcClientException() {
 super();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d957f0fa/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java
index 09f8984..3b50870 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java
@@ -30,6 +30,7 @@ import java.util.concurrent.Delayed;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
@@ -259,7 +260,7 @@ public final class ReadOnlyZKClient implements Closeable {
 
   public CompletableFuture get(String path) {
 if (closed.get()) {
-  return failed(new IOException("Client already closed"));
+  return failed(new DoNotRetryIOException("Client already closed"));
 }
 CompletableFuture future = new CompletableFuture<>();
 tasks.add(new ZKTask(path, future, "get") {
@@ -275,7 +276,7 @@ public final class ReadOnlyZKClient implements Closeable {
 
   public CompletableFuture exists(String path) {
 if (closed.get()) {
-  return failed(new IOException("Client already closed"));
+  return failed(new DoNotRetryIOException("Client already closed"));
 }
 CompletableFuture future = new CompletableFuture<>();
 tasks.add(new ZKTask(path, future, "exists") {
@@ -339,7 +340,7 @@ public final class ReadOnlyZKClient implements Closeable {
   }
 }
 closeZk();
-IOException error = new IOException("Client already closed");
+DoNotRetryIOException error = new DoNotRetryIOException("Client already 
closed");
 Arrays.stream(tasks.toArray(new Task[0])).forEach(t -> t.closed(error));
 tasks.clear();
   }



[11/15] hbase git commit: HBASE-21526 Use AsyncClusterConnection in ServerManager for getRsAdmin

HBASE-21526 Use AsyncClusterConnection in ServerManager for getRsAdmin


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6b9346f9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6b9346f9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6b9346f9

Branch: refs/heads/HBASE-21512
Commit: 6b9346f91f9da98ec1c2e5fdbe5663e704296b99
Parents: 4e8a0b3
Author: zhangduo 
Authored: Thu Dec 6 21:25:34 2018 +0800
Committer: Duo Zhang 
Committed: Tue Jan 8 17:00:35 2019 +0800

--
 .../hbase/client/AsyncClusterConnection.java|   6 +
 .../hbase/client/AsyncConnectionImpl.java   |   5 +
 .../hbase/client/AsyncRegionServerAdmin.java| 210 +++
 .../apache/hadoop/hbase/util/FutureUtils.java   |   2 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  15 +-
 .../hadoop/hbase/master/ServerManager.java  |  67 --
 .../master/procedure/RSProcedureDispatcher.java |  44 ++--
 7 files changed, 263 insertions(+), 86 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6b9346f9/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
index c7dea25..1327fd7 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.ipc.RpcClient;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -27,6 +28,11 @@ import org.apache.yetus.audience.InterfaceAudience;
 public interface AsyncClusterConnection extends AsyncConnection {
 
   /**
+   * Get the admin service for the given region server.
+   */
+  AsyncRegionServerAdmin getRegionServerAdmin(ServerName serverName);
+
+  /**
* Get the nonce generator for this connection.
*/
   NonceGenerator getNonceGenerator();

http://git-wip-us.apache.org/repos/asf/hbase/blob/6b9346f9/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
index 188e830..4e7f421 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
@@ -330,4 +330,9 @@ class AsyncConnectionImpl implements AsyncClusterConnection 
{
 return new AsyncBufferedMutatorBuilderImpl(connConf, 
getTableBuilder(tableName, pool),
   RETRY_TIMER);
   }
+
+  @Override
+  public AsyncRegionServerAdmin getRegionServerAdmin(ServerName serverName) {
+return new AsyncRegionServerAdmin(serverName, this);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/6b9346f9/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java
new file mode 100644
index 000..9accd89
--- /dev/null
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java
@@ -0,0 +1,210 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.util.concurrent.CompletableFuture;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.ipc.HBaseRpcController;
+import org.

[01/15] hbase git commit: HBASE-21159 Add shell command to switch throttle on or off [Forced Update!]

Repository: hbase
Updated Branches:
  refs/heads/HBASE-21512 df35a12ce -> 76d1f43f2 (forced update)


HBASE-21159 Add shell command to switch throttle on or off

Signed-off-by: Guanghao Zhang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/77db1fae
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/77db1fae
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/77db1fae

Branch: refs/heads/HBASE-21512
Commit: 77db1fae090bc20de62d8a86e9816c69dfb97b7a
Parents: 94093e8
Author: meiyi 
Authored: Fri Jan 4 14:43:34 2019 +0800
Committer: Guanghao Zhang 
Committed: Mon Jan 7 14:21:41 2019 +0800

--
 .../org/apache/hadoop/hbase/client/Admin.java   |  13 ++
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |  13 ++
 .../hadoop/hbase/client/AsyncHBaseAdmin.java|  10 ++
 .../hbase/client/ConnectionImplementation.java  |  16 ++
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |  25 +++
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |  28 +++
 .../client/ShortCircuitMasterConnection.java|  16 ++
 .../src/main/protobuf/Master.proto  |  21 +++
 .../src/main/protobuf/MasterProcedure.proto |  15 ++
 .../hbase/coprocessor/MasterObserver.java   |  36 
 .../apache/hadoop/hbase/executor/EventType.java |   6 +
 .../hadoop/hbase/executor/ExecutorType.java |   3 +-
 .../hbase/master/MasterCoprocessorHost.java |  37 
 .../hadoop/hbase/master/MasterRpcServices.java  |  24 +++
 .../procedure/ServerProcedureInterface.java |   2 +-
 .../hbase/master/procedure/ServerQueue.java |   2 +
 .../procedure/SwitchRpcThrottleProcedure.java   | 164 ++
 .../SwitchRpcThrottleRemoteProcedure.java   | 171 +++
 .../hadoop/hbase/quotas/MasterQuotaManager.java |  54 ++
 .../quotas/RegionServerRpcQuotaManager.java |  32 +++-
 .../hadoop/hbase/quotas/RpcThrottleStorage.java |  69 
 .../hbase/regionserver/HRegionServer.java   |   2 +
 .../SwitchRpcThrottleRemoteCallable.java|  62 +++
 .../hbase/security/access/AccessController.java |  12 ++
 .../hbase/client/TestAsyncQuotaAdminApi.java|   9 +
 .../hadoop/hbase/quotas/TestQuotaAdmin.java |  41 +
 .../security/access/TestAccessController.java   |  26 +++
 hbase-shell/src/main/ruby/hbase/quotas.rb   |   4 +
 hbase-shell/src/main/ruby/shell.rb  |   2 +
 .../ruby/shell/commands/disable_rpc_throttle.rb |  40 +
 .../ruby/shell/commands/enable_rpc_throttle.rb  |  40 +
 hbase-shell/src/test/ruby/hbase/quotas_test.rb  |   8 +
 32 files changed, 999 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/77db1fae/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 08b44c9..1d892b2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -2796,4 +2796,17 @@ public interface Admin extends Abortable, Closeable {
*/
   void cloneTableSchema(final TableName tableName, final TableName 
newTableName,
   final boolean preserveSplits) throws IOException;
+
+  /**
+   * Switch the rpc throttle enable state.
+   * @param enable Set to true to enable, false to 
disable.
+   * @return Previous rpc throttle enabled value
+   */
+  boolean switchRpcThrottle(final boolean enable) throws IOException;
+
+  /**
+   * Get if the rpc throttle is enabled.
+   * @return True if rpc throttle is enabled
+   */
+  boolean isRpcThrottleEnabled() throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/77db1fae/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index 6bb253a..40ed213 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -1287,4 +1287,17 @@ public interface AsyncAdmin {
*/
   CompletableFuture> compactionSwitch(boolean 
switchState,
   List serverNamesList);
+
+  /**
+   * Switch the rpc throttle enabled state.
+   * @param enable Set to true to enable, false to 
disable.
+   * @return Previous rpc throttle enabled value
+   */
+  CompletableFuture switchRpcThrottle(boolean enable);
+
+  /**
+   * Get if the rpc throttle is enabled.
+   * @return True if rpc throttle is enabled
+   */
+  C

[12/15] hbase git commit: HBASE-21579 Use AsyncClusterConnection for HBaseInterClusterReplicationEndpoint

HBASE-21579 Use AsyncClusterConnection for HBaseInterClusterReplicationEndpoint


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c7c002e6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c7c002e6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c7c002e6

Branch: refs/heads/HBASE-21512
Commit: c7c002e6a531a9aee96ae483ff30c8da06aafeb5
Parents: 6b9346f
Author: zhangduo 
Authored: Tue Jan 1 21:27:14 2019 +0800
Committer: Duo Zhang 
Committed: Tue Jan 8 17:00:35 2019 +0800

--
 .../hbase/client/AsyncRegionServerAdmin.java| 14 +--
 .../hbase/protobuf/ReplicationProtbufUtil.java  | 35 +
 .../HBaseInterClusterReplicationEndpoint.java   | 31 +++
 .../regionserver/ReplicationSinkManager.java| 40 +++-
 .../replication/SyncReplicationTestBase.java| 12 +++---
 .../TestReplicationSinkManager.java | 21 +-
 6 files changed, 74 insertions(+), 79 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c7c002e6/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java
index 9accd89..b9141a9 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
 import java.util.concurrent.CompletableFuture;
+import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -94,9 +95,9 @@ public class AsyncRegionServerAdmin {
 void call(AdminService.Interface stub, HBaseRpcController controller, 
RpcCallback done);
   }
 
-  private  CompletableFuture call(RpcCall rpcCall) {
+  private  CompletableFuture call(RpcCall rpcCall, 
CellScanner cellScanner) {
 CompletableFuture future = new CompletableFuture<>();
-HBaseRpcController controller = conn.rpcControllerFactory.newController();
+HBaseRpcController controller = 
conn.rpcControllerFactory.newController(cellScanner);
 try {
   rpcCall.call(conn.getAdminStub(server), controller, new 
RpcCallback() {
 
@@ -115,6 +116,10 @@ public class AsyncRegionServerAdmin {
 return future;
   }
 
+  private  CompletableFuture call(RpcCall rpcCall) {
+return call(rpcCall, null);
+  }
+
   public CompletableFuture 
getRegionInfo(GetRegionInfoRequest request) {
 return call((stub, controller, done) -> stub.getRegionInfo(controller, 
request, done));
   }
@@ -154,8 +159,9 @@ public class AsyncRegionServerAdmin {
   }
 
   public CompletableFuture replicateWALEntry(
-  ReplicateWALEntryRequest request) {
-return call((stub, controller, done) -> stub.replicateWALEntry(controller, 
request, done));
+  ReplicateWALEntryRequest request, CellScanner cellScanner) {
+return call((stub, controller, done) -> stub.replicateWALEntry(controller, 
request, done),
+  cellScanner);
   }
 
   public CompletableFuture 
replay(ReplicateWALEntryRequest request) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/c7c002e6/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
index c1b3911..74fad26 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
@@ -20,51 +20,54 @@ package org.apache.hadoop.hbase.protobuf;
 
 
 import java.io.IOException;
+import java.io.InterruptedIOException;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
+import java.util.concurrent.ExecutionException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.PrivateCellUtil;
+import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin;
 import org.apache.hadoop.hbase.io.SizedCellScanner;
-import org.apache.hadoop.hbase.ipc.HBaseRpcController;
-import org.apache.hadoop.hbase.ipc.HBaseRpcControllerImpl;
 import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
 import org.apache.hadoop.hbase.u

[15/15] hbase git commit: HBASE-21538 Rewrite RegionReplicaFlushHandler to use AsyncClusterConnection

HBASE-21538 Rewrite RegionReplicaFlushHandler to use AsyncClusterConnection


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/76d1f43f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/76d1f43f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/76d1f43f

Branch: refs/heads/HBASE-21512
Commit: 76d1f43f2ff035173b72d4d904d058c17e557e5d
Parents: c7c002e
Author: Duo Zhang 
Authored: Wed Dec 12 09:33:33 2018 +0800
Committer: Duo Zhang 
Committed: Tue Jan 8 17:00:36 2019 +0800

--
 .../hbase/client/AsyncClusterConnection.java|   8 ++
 .../hbase/client/AsyncConnectionImpl.java   |   8 ++
 .../hbase/client/ClusterConnectionFactory.java  |  16 +--
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |  36 --
 .../apache/hadoop/hbase/util/FutureUtils.java   |  22 
 .../master/procedure/RSProcedureDispatcher.java |  34 +-
 .../hbase/protobuf/ReplicationProtbufUtil.java  |  15 +--
 .../hbase/regionserver/HRegionServer.java   |   3 +-
 .../handler/RegionReplicaFlushHandler.java  | 110 ++-
 9 files changed, 132 insertions(+), 120 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/76d1f43f/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
index 1327fd7..f1f64ca 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
@@ -17,10 +17,13 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import java.util.concurrent.CompletableFuture;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.ipc.RpcClient;
 import org.apache.yetus.audience.InterfaceAudience;
 
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
+
 /**
  * The asynchronous connection for internal usage.
  */
@@ -41,4 +44,9 @@ public interface AsyncClusterConnection extends 
AsyncConnection {
* Get the rpc client we used to communicate with other servers.
*/
   RpcClient getRpcClient();
+
+  /**
+   * Flush a region and get the response.
+   */
+  CompletableFuture flush(byte[] regionName, boolean 
writeFlushWALMarker);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/76d1f43f/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
index 4e7f421..d883809 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
@@ -54,6 +54,7 @@ import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
@@ -335,4 +336,11 @@ class AsyncConnectionImpl implements 
AsyncClusterConnection {
   public AsyncRegionServerAdmin getRegionServerAdmin(ServerName serverName) {
 return new AsyncRegionServerAdmin(serverName, this);
   }
+
+  @Override
+  public CompletableFuture flush(byte[] regionName,
+  boolean writeFlushWALMarker) {
+RawAsyncHBaseAdmin admin = (RawAsyncHBaseAdmin) getAdmin();
+return admin.flushRegionInternal(regionName, writeFlushWALMarker);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/76d1f43f/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
index 68c0630..79484db 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
@@ -

[13/15] hbase git commit: HBASE-21515 Also initialize an AsyncClusterConnection in HRegionServer

HBASE-21515 Also initialize an AsyncClusterConnection in HRegionServer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d752eec2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d752eec2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d752eec2

Branch: refs/heads/HBASE-21512
Commit: d752eec26d9071fc1cc14eaa28473a8171cee93a
Parents: d957f0f
Author: zhangduo 
Authored: Fri Nov 30 08:23:47 2018 +0800
Committer: Duo Zhang 
Committed: Tue Jan 8 17:00:35 2019 +0800

--
 .../hbase/client/AsyncClusterConnection.java| 38 
 .../hbase/client/AsyncConnectionImpl.java   | 39 ++--
 .../hbase/client/ClusterConnectionFactory.java  | 63 
 .../hadoop/hbase/client/ConnectionFactory.java  |  5 +-
 .../hadoop/hbase/util/ReflectionUtils.java  | 22 ---
 .../java/org/apache/hadoop/hbase/Server.java| 20 +++
 .../org/apache/hadoop/hbase/master/HMaster.java |  3 +
 .../hbase/regionserver/HRegionServer.java   | 56 -
 .../regionserver/ReplicationSyncUp.java |  6 ++
 .../hadoop/hbase/MockRegionServerServices.java  |  5 ++
 .../client/TestAsyncNonMetaRegionLocator.java   |  2 +-
 ...syncNonMetaRegionLocatorConcurrenyLimit.java |  2 +-
 .../client/TestAsyncRegionLocatorTimeout.java   |  2 +-
 ...TestAsyncSingleRequestRpcRetryingCaller.java |  4 +-
 .../hbase/client/TestAsyncTableNoncedRetry.java |  2 +-
 .../hbase/master/MockNoopMasterServices.java|  6 ++
 .../hadoop/hbase/master/MockRegionServer.java   |  5 ++
 .../hbase/master/TestActiveMasterManager.java   |  6 ++
 .../hbase/master/cleaner/TestHFileCleaner.java  |  6 ++
 .../master/cleaner/TestHFileLinkCleaner.java|  6 ++
 .../hbase/master/cleaner/TestLogsCleaner.java   |  6 ++
 .../cleaner/TestReplicationHFileCleaner.java|  6 ++
 .../regionserver/TestHeapMemoryManager.java |  6 ++
 .../hbase/regionserver/TestSplitLogWorker.java  |  6 ++
 .../hbase/regionserver/TestWALLockup.java   |  6 ++
 .../TestReplicationTrackerZKImpl.java   |  6 ++
 .../TestReplicationSourceManager.java   |  6 ++
 .../security/token/TestTokenAuthentication.java |  6 ++
 .../apache/hadoop/hbase/util/MockServer.java|  6 ++
 29 files changed, 302 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d752eec2/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
new file mode 100644
index 000..c7dea25
--- /dev/null
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.hbase.ipc.RpcClient;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * The asynchronous connection for internal usage.
+ */
+@InterfaceAudience.Private
+public interface AsyncClusterConnection extends AsyncConnection {
+
+  /**
+   * Get the nonce generator for this connection.
+   */
+  NonceGenerator getNonceGenerator();
+
+  /**
+   * Get the rpc client we used to communicate with other servers.
+   */
+  RpcClient getRpcClient();
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/d752eec2/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
index 361d5b2..188e830 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
@@ -

[06/15] hbase git commit: HBASE-21361: Disable printing of stack-trace in shell when quotas are not enabled

HBASE-21361: Disable printing of stack-trace in shell when quotas are not 
enabled

Signed-off-by: Josh Elser 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5aaa7343
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5aaa7343
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5aaa7343

Branch: refs/heads/HBASE-21512
Commit: 5aaa73434e01d723a74a349920c779af27261ddc
Parents: a735a47
Author: Sakthi 
Authored: Fri Dec 21 10:47:16 2018 -0800
Committer: Josh Elser 
Committed: Mon Jan 7 16:39:13 2019 -0500

--
 hbase-shell/src/main/ruby/shell/commands.rb | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5aaa7343/hbase-shell/src/main/ruby/shell/commands.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands.rb 
b/hbase-shell/src/main/ruby/shell/commands.rb
index be6dd85..4fdc8b5 100644
--- a/hbase-shell/src/main/ruby/shell/commands.rb
+++ b/hbase-shell/src/main/ruby/shell/commands.rb
@@ -179,6 +179,12 @@ module Shell
   error = regex.match(str)
   raise error[:message].capitalize unless error.nil?
 end
+if cause.is_a?(org.apache.hadoop.hbase.DoNotRetryIOException)
+  regex = /.*UnsupportedOperationException: quota support disabled.*/
+  error = regex.match(cause.message)
+  error_msg = 'Quota Support disabled. Please enable in configuration.'
+  raise error_msg unless error.nil?
+end
 
 # Throw the other exception which hasn't been handled above
 raise cause



[14/15] hbase git commit: HBASE-21516 Use AsyncConnection instead of Connection in SecureBulkLoadManager

HBASE-21516 Use AsyncConnection instead of Connection in SecureBulkLoadManager


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4e8a0b33
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4e8a0b33
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4e8a0b33

Branch: refs/heads/HBASE-21512
Commit: 4e8a0b33519e9749d7d69719b87d5a2590f5d707
Parents: d752eec
Author: zhangduo 
Authored: Sat Dec 1 21:15:48 2018 +0800
Committer: Duo Zhang 
Committed: Tue Jan 8 17:00:35 2019 +0800

--
 .../hadoop/hbase/protobuf/ProtobufUtil.java |  5 +-
 .../hbase/shaded/protobuf/ProtobufUtil.java |  7 ++-
 .../hbase/regionserver/HRegionServer.java   |  2 +-
 .../regionserver/SecureBulkLoadManager.java | 24 +
 .../hadoop/hbase/security/token/TokenUtil.java  | 57 +++-
 .../hbase/security/token/TestTokenUtil.java | 42 +++
 6 files changed, 96 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4e8a0b33/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index a3d49b5..d9e620b 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -261,13 +261,12 @@ public final class ProtobufUtil {
* just {@link ServiceException}. Prefer this method to
* {@link #getRemoteException(ServiceException)} because trying to
* contain direct protobuf references.
-   * @param e
*/
-  public static IOException handleRemoteException(Exception e) {
+  public static IOException handleRemoteException(Throwable e) {
 return makeIOExceptionOfException(e);
   }
 
-  private static IOException makeIOExceptionOfException(Exception e) {
+  private static IOException makeIOExceptionOfException(Throwable e) {
 Throwable t = e;
 if (e instanceof ServiceException ||
 e instanceof 
org.apache.hbase.thirdparty.com.google.protobuf.ServiceException) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/4e8a0b33/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index fea81f1..de2fb7d 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -40,7 +40,6 @@ import java.util.concurrent.TimeUnit;
 import java.util.function.Function;
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.ByteBufferExtendedCell;
@@ -123,6 +122,7 @@ import 
org.apache.hbase.thirdparty.com.google.protobuf.Service;
 import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
 import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat;
 import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
+
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
@@ -343,13 +343,12 @@ public final class ProtobufUtil {
* just {@link ServiceException}. Prefer this method to
* {@link #getRemoteException(ServiceException)} because trying to
* contain direct protobuf references.
-   * @param e
*/
-  public static IOException handleRemoteException(Exception e) {
+  public static IOException handleRemoteException(Throwable e) {
 return makeIOExceptionOfException(e);
   }
 
-  private static IOException makeIOExceptionOfException(Exception e) {
+  private static IOException makeIOExceptionOfException(Throwable e) {
 Throwable t = e;
 if (e instanceof ServiceException) {
   t = e.getCause();

http://git-wip-us.apache.org/repos/asf/hbase/blob/4e8a0b33/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServ

[04/15] hbase git commit: HBASE-21668 SCM fetch times out for nightlies

HBASE-21668 SCM fetch times out for nightlies

Signed-off-by: Sean Busbey 
Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/84c1f088
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/84c1f088
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/84c1f088

Branch: refs/heads/HBASE-21512
Commit: 84c1f0887d7b7968e1760f15e3d12a3f80fc87bc
Parents: 281d642
Author: Peter Somogyi 
Authored: Thu Jan 3 14:56:32 2019 +0100
Committer: Peter Somogyi 
Committed: Mon Jan 7 21:56:42 2019 +0100

--
 dev-support/Jenkinsfile | 49 +---
 1 file changed, 42 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/84c1f088/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index b333afb..d36318b 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -58,9 +58,16 @@ pipeline {
   stages {
 stage ('scm-checkout') {
   steps {
-dir('component') {
-  checkout scm
-}
+dir('component') {
+  checkout([
+$class: 'GitSCM',
+branches: scm.branches,
+extensions: scm.extensions + [
+  [$class: 'CleanBeforeCheckout'],
+  [$class: 'CloneOption', honorRefspec: true, noTags: true, 
reference: '', shallow: true, depth: 30]],
+userRemoteConfigs: scm.userRemoteConfigs
+  ])
+}
   }
 }
 stage ('thirdparty installs') {
@@ -203,7 +210,14 @@ pipeline {
 unstash 'yetus'
 // since we have a new node definition we need to re-do the scm 
checkout
 dir('component') {
-  checkout scm
+  checkout([
+$class: 'GitSCM',
+branches: scm.branches,
+extensions: scm.extensions + [
+  [$class: 'CleanBeforeCheckout'],
+  [$class: 'CloneOption', honorRefspec: true, noTags: true, 
reference: '', shallow: true, depth: 30]],
+userRemoteConfigs: scm.userRemoteConfigs
+  ])
 }
 sh '''#!/usr/bin/env bash
   set -e
@@ -270,7 +284,14 @@ pipeline {
 '''
 unstash 'yetus'
 dir('component') {
-  checkout scm
+  checkout([
+$class: 'GitSCM',
+branches: scm.branches,
+extensions: scm.extensions + [
+  [$class: 'CleanBeforeCheckout'],
+  [$class: 'CloneOption', honorRefspec: true, noTags: true, 
reference: '', shallow: true, depth: 30]],
+userRemoteConfigs: scm.userRemoteConfigs
+  ])
 }
 sh '''#!/usr/bin/env bash
   set -e
@@ -350,7 +371,14 @@ pipeline {
 '''
 unstash 'yetus'
 dir('component') {
-  checkout scm
+  checkout([
+$class: 'GitSCM',
+branches: scm.branches,
+extensions: scm.extensions + [
+  [$class: 'CleanBeforeCheckout'],
+  [$class: 'CloneOption', honorRefspec: true, noTags: true, 
reference: '', shallow: true, depth: 30]],
+userRemoteConfigs: scm.userRemoteConfigs
+  ])
 }
 sh '''#!/usr/bin/env bash
   set -e
@@ -437,7 +465,14 @@ pipeline {
 '''
 unstash 'yetus'
 dir('component') {
-  checkout scm
+  checkout([
+$class: 'GitSCM',
+branches: scm.branches,
+extensions: scm.extensions + [
+  [$class: 'CleanBeforeCheckout'],
+  [$class: 'CloneOption', honorRefspec: true, noTags: true, 
reference: '', shallow: true, depth: 30]],
+userRemoteConfigs: scm.userRemoteConfigs
+  ])
 }
 sh '''#!/usr/bin/env bash
   set -e



[02/15] hbase git commit: HBASE-21588 Procedure v2 wal splitting implementation

http://git-wip-us.apache.org/repos/asf/hbase/blob/281d6429/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSplitWALProcedure.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSplitWALProcedure.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSplitWALProcedure.java
new file mode 100644
index 000..5c801c5
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSplitWALProcedure.java
@@ -0,0 +1,133 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.procedure;
+
+import static 
org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK;
+import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_MAX_SPLITTER;
+
+import java.util.List;
+import java.util.Optional;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.SplitWALManager;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ MasterTests.class, MediumTests.class })
+public class TestSplitWALProcedure {
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+  HBaseClassTestRule.forClass(TestSplitWALProcedure.class);
+
+  private static HBaseTestingUtility TEST_UTIL;
+  private HMaster master;
+  private TableName TABLE_NAME;
+  private SplitWALManager splitWALManager;
+  private byte[] FAMILY;
+
+  @Before
+  public void setup() throws Exception {
+TEST_UTIL = new HBaseTestingUtility();
+TEST_UTIL.getConfiguration().setBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, 
false);
+TEST_UTIL.getConfiguration().setInt(HBASE_SPLIT_WAL_MAX_SPLITTER, 1);
+TEST_UTIL.startMiniCluster(3);
+master = TEST_UTIL.getHBaseCluster().getMaster();
+splitWALManager = master.getSplitWALManager();
+TABLE_NAME = TableName.valueOf(Bytes.toBytes("TestSplitWALProcedure"));
+FAMILY = Bytes.toBytes("test");
+  }
+
+  @After
+  public void teardown() throws Exception {
+ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(
+master.getMasterProcedureExecutor(), false);
+TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void testHandleDeadWorker() throws Exception {
+Table table = TEST_UTIL.createTable(TABLE_NAME, FAMILY, 
TEST_UTIL.KEYS_FOR_HBA_CREATE_TABLE);
+for (int i = 0; i < 10; i++) {
+  TEST_UTIL.loadTable(table, FAMILY);
+}
+HRegionServer testServer = TEST_UTIL.getHBaseCluster().getRegionServer(0);
+ProcedureExecutor masterPE = 
master.getMasterProcedureExecutor();
+List wals = 
splitWALManager.getWALsToSplit(testServer.getServerName(), false);
+Assert.assertEquals(1, wals.size());
+TEST_UTIL.getHBaseCluster().killRegionServer(testServer.getServerName());
+TEST_UTIL.waitFor(3, () -> master.getProcedures().stream()
+.anyMatch(procedure -> procedure instanceof SplitWALProcedure));
+Procedure splitWALProcedure = master.getProcedures().stream()
+.filter(procedure -> procedure instanceof 
SplitWALProcedure).findAny().get();
+Assert.assertNotNull(splitWALProcedure);
+TEST_UTIL.waitFor(5000, () -> ((SplitWALProcedure) 
splitWALProcedure).getWorker() != null);
+TEST_UTIL.getHBaseCluster()
+.killRegionServer(((SplitWALProcedure) splitWALProcedure).

hbase git commit: HBASE-21684 Throw DNRIOE when connection or rpc client is closed

Repository: hbase
Updated Branches:
  refs/heads/master e9dae2adc -> d957f0fa1


HBASE-21684 Throw DNRIOE when connection or rpc client is closed


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d957f0fa
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d957f0fa
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d957f0fa

Branch: refs/heads/master
Commit: d957f0fa1926c13355c8cca01bbfd7133866e05d
Parents: e9dae2a
Author: zhangduo 
Authored: Mon Jan 7 20:00:19 2019 +0800
Committer: Duo Zhang 
Committed: Tue Jan 8 16:40:40 2019 +0800

--
 .../apache/hadoop/hbase/ipc/StoppedRpcClientException.java| 4 ++--
 .../org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java   | 7 ---
 2 files changed, 6 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d957f0fa/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.java
index d50004e..bd1e101 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/StoppedRpcClientException.java
@@ -17,11 +17,11 @@
  */
 package org.apache.hadoop.hbase.ipc;
 
-import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.Public
-public class StoppedRpcClientException extends HBaseIOException {
+public class StoppedRpcClientException extends DoNotRetryIOException {
   public StoppedRpcClientException() {
 super();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d957f0fa/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java
index 09f8984..3b50870 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java
@@ -30,6 +30,7 @@ import java.util.concurrent.Delayed;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
@@ -259,7 +260,7 @@ public final class ReadOnlyZKClient implements Closeable {
 
   public CompletableFuture get(String path) {
 if (closed.get()) {
-  return failed(new IOException("Client already closed"));
+  return failed(new DoNotRetryIOException("Client already closed"));
 }
 CompletableFuture future = new CompletableFuture<>();
 tasks.add(new ZKTask(path, future, "get") {
@@ -275,7 +276,7 @@ public final class ReadOnlyZKClient implements Closeable {
 
   public CompletableFuture exists(String path) {
 if (closed.get()) {
-  return failed(new IOException("Client already closed"));
+  return failed(new DoNotRetryIOException("Client already closed"));
 }
 CompletableFuture future = new CompletableFuture<>();
 tasks.add(new ZKTask(path, future, "exists") {
@@ -339,7 +340,7 @@ public final class ReadOnlyZKClient implements Closeable {
   }
 }
 closeZk();
-IOException error = new IOException("Client already closed");
+DoNotRetryIOException error = new DoNotRetryIOException("Client already 
closed");
 Arrays.stream(tasks.toArray(new Task[0])).forEach(t -> t.closed(error));
 tasks.clear();
   }



hbase git commit: HBASE-21683 Reset readsEnabled flag after successfully flushing the primary region

Repository: hbase
Updated Branches:
  refs/heads/master 7377fcd29 -> e9dae2adc


HBASE-21683 Reset readsEnabled flag after successfully flushing the primary 
region


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e9dae2ad
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e9dae2ad
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e9dae2ad

Branch: refs/heads/master
Commit: e9dae2adc381550eb3e2b65be246309b309e7f06
Parents: 7377fcd
Author: zhangduo 
Authored: Mon Jan 7 20:09:20 2019 +0800
Committer: Duo Zhang 
Committed: Tue Jan 8 16:06:34 2019 +0800

--
 .../hbase/regionserver/handler/RegionReplicaFlushHandler.java  | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e9dae2ad/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
index b917379..81b6d7e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
@@ -145,6 +145,7 @@ public class RegionReplicaFlushHandler extends EventHandler 
{
 + " of region " + region.getRegionInfo().getEncodedName()
 + " Now waiting and blocking reads until observing a full 
flush cycle");
 }
+region.setReadsEnabled(true);
 break;
   } else {
 if (response.hasWroteFlushWalMarker()) {
@@ -156,6 +157,7 @@ public class RegionReplicaFlushHandler extends EventHandler 
{
   + " of region " + region.getRegionInfo().getEncodedName() + 
" Now waiting and "
   + "blocking reads until observing a flush marker");
 }
+region.setReadsEnabled(true);
 break;
   } else {
 // somehow we were not able to get the primary to write the flush 
request. It may be



hbase git commit: HBASE-21683 Reset readsEnabled flag after successfully flushing the primary region

Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 8dcce7c6c -> f86914c31


HBASE-21683 Reset readsEnabled flag after successfully flushing the primary 
region


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f86914c3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f86914c3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f86914c3

Branch: refs/heads/branch-2.0
Commit: f86914c3103ac2cea1a8829efad81b4f21463689
Parents: 8dcce7c
Author: zhangduo 
Authored: Mon Jan 7 20:09:20 2019 +0800
Committer: Duo Zhang 
Committed: Tue Jan 8 15:51:56 2019 +0800

--
 .../hbase/regionserver/handler/RegionReplicaFlushHandler.java  | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f86914c3/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
index b917379..81b6d7e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
@@ -145,6 +145,7 @@ public class RegionReplicaFlushHandler extends EventHandler 
{
 + " of region " + region.getRegionInfo().getEncodedName()
 + " Now waiting and blocking reads until observing a full 
flush cycle");
 }
+region.setReadsEnabled(true);
 break;
   } else {
 if (response.hasWroteFlushWalMarker()) {
@@ -156,6 +157,7 @@ public class RegionReplicaFlushHandler extends EventHandler 
{
   + " of region " + region.getRegionInfo().getEncodedName() + 
" Now waiting and "
   + "blocking reads until observing a flush marker");
 }
+region.setReadsEnabled(true);
 break;
   } else {
 // somehow we were not able to get the primary to write the flush 
request. It may be



hbase git commit: HBASE-21683 Reset readsEnabled flag after successfully flushing the primary region

Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 3de116af4 -> 1c73b230b


HBASE-21683 Reset readsEnabled flag after successfully flushing the primary 
region


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1c73b230
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1c73b230
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1c73b230

Branch: refs/heads/branch-2.1
Commit: 1c73b230b8e687f0867bf7cf4084f7f356ed8e3c
Parents: 3de116a
Author: zhangduo 
Authored: Mon Jan 7 20:09:20 2019 +0800
Committer: Duo Zhang 
Committed: Tue Jan 8 15:51:52 2019 +0800

--
 .../hbase/regionserver/handler/RegionReplicaFlushHandler.java  | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1c73b230/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
index b917379..81b6d7e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
@@ -145,6 +145,7 @@ public class RegionReplicaFlushHandler extends EventHandler 
{
 + " of region " + region.getRegionInfo().getEncodedName()
 + " Now waiting and blocking reads until observing a full 
flush cycle");
 }
+region.setReadsEnabled(true);
 break;
   } else {
 if (response.hasWroteFlushWalMarker()) {
@@ -156,6 +157,7 @@ public class RegionReplicaFlushHandler extends EventHandler 
{
   + " of region " + region.getRegionInfo().getEncodedName() + 
" Now waiting and "
   + "blocking reads until observing a flush marker");
 }
+region.setReadsEnabled(true);
 break;
   } else {
 // somehow we were not able to get the primary to write the flush 
request. It may be



hbase git commit: HBASE-21683 Reset readsEnabled flag after successfully flushing the primary region

Repository: hbase
Updated Branches:
  refs/heads/branch-2 d5fff9c25 -> 348c2dfe9


HBASE-21683 Reset readsEnabled flag after successfully flushing the primary 
region


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/348c2dfe
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/348c2dfe
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/348c2dfe

Branch: refs/heads/branch-2
Commit: 348c2dfe9b0857e2406081451b4936c54b595a51
Parents: d5fff9c
Author: zhangduo 
Authored: Mon Jan 7 20:09:20 2019 +0800
Committer: Duo Zhang 
Committed: Tue Jan 8 15:51:48 2019 +0800

--
 .../hbase/regionserver/handler/RegionReplicaFlushHandler.java  | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/348c2dfe/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
index b917379..81b6d7e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java
@@ -145,6 +145,7 @@ public class RegionReplicaFlushHandler extends EventHandler 
{
 + " of region " + region.getRegionInfo().getEncodedName()
 + " Now waiting and blocking reads until observing a full 
flush cycle");
 }
+region.setReadsEnabled(true);
 break;
   } else {
 if (response.hasWroteFlushWalMarker()) {
@@ -156,6 +157,7 @@ public class RegionReplicaFlushHandler extends EventHandler 
{
   + " of region " + region.getRegionInfo().getEncodedName() + 
" Now waiting and "
   + "blocking reads until observing a flush marker");
 }
+region.setReadsEnabled(true);
 break;
   } else {
 // somehow we were not able to get the primary to write the flush 
request. It may be