hbase git commit: HBASE-20581 Fix documentation about schema updates

2018-05-17 Thread elserj
Repository: hbase
Updated Branches:
  refs/heads/master dab0e90c5 -> f917f4e7c


HBASE-20581 Fix documentation about schema updates

Signed-off-by: Balazs Meszaros 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f917f4e7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f917f4e7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f917f4e7

Branch: refs/heads/master
Commit: f917f4e7c8de2987bde42eccd14a901ad3ec9b27
Parents: dab0e90
Author: Josh Elser 
Authored: Mon May 14 13:44:40 2018 -0400
Committer: Josh Elser 
Committed: Thu May 17 14:29:48 2018 -0400

--
 src/main/asciidoc/_chapters/external_apis.adoc | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f917f4e7/src/main/asciidoc/_chapters/external_apis.adoc
--
diff --git a/src/main/asciidoc/_chapters/external_apis.adoc 
b/src/main/asciidoc/_chapters/external_apis.adoc
index ffb6ee6..c1299a6 100644
--- a/src/main/asciidoc/_chapters/external_apis.adoc
+++ b/src/main/asciidoc/_chapters/external_apis.adoc
@@ -186,20 +186,20 @@ creation or mutation, and `DELETE` for deletion.
 
 |/_table_/schema
 |POST
-|Create a new table, or replace an existing table's schema
+|Update an existing table with the provided schema fragment
 |curl -vi -X POST \
   -H "Accept: text/xml" \
   -H "Content-Type: text/xml" \
-  -d '?xml version="1.0" encoding="UTF-8"?>TableSchema 
name="users">ColumnSchema name="cf" />/TableSchema>' \
+  -d '?xml version="1.0" encoding="UTF-8"?>TableSchema 
name="users">ColumnSchema name="cf" KEEP_DELETED_CELLS="true" 
/>/TableSchema>' \
   "http://example.com:8000/users/schema;
 
 |/_table_/schema
 |PUT
-|Update an existing table with the provided schema fragment
+|Create a new table, or replace an existing table's schema
 |curl -vi -X PUT \
   -H "Accept: text/xml" \
   -H "Content-Type: text/xml" \
-  -d '?xml version="1.0" encoding="UTF-8"?>TableSchema 
name="users">ColumnSchema name="cf" KEEP_DELETED_CELLS="true" 
/>/TableSchema>' \
+  -d '?xml version="1.0" encoding="UTF-8"?>TableSchema 
name="users">ColumnSchema name="cf" />/TableSchema>' \
   "http://example.com:8000/users/schema;
 
 |/_table_/schema



[2/2] hbase git commit: HBASE-20582 Upgrade JRuby to 9.1.17.0

2018-05-17 Thread elserj
HBASE-20582 Upgrade JRuby to 9.1.17.0


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dcd6b6c3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dcd6b6c3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dcd6b6c3

Branch: refs/heads/branch-2
Commit: dcd6b6c3f27d6afe7378b64f6c3fccca31618f6b
Parents: 56bb1fa
Author: Josh Elser 
Authored: Wed May 16 13:07:21 2018 -0400
Committer: Josh Elser 
Committed: Thu May 17 13:11:37 2018 -0400

--
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/dcd6b6c3/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 7573d2b..67854e6 100755
--- a/pom.xml
+++ b/pom.xml
@@ -1355,7 +1355,7 @@
 2.3.2
 3.0.1-b08
 6.1.14
-9.1.10.0
+9.1.17.0
 4.12
 1.3
 4.2.0-incubating



[1/2] hbase git commit: HBASE-20582 Upgrade JRuby to 9.1.17.0

2018-05-17 Thread elserj
Repository: hbase
Updated Branches:
  refs/heads/branch-2 56bb1fa06 -> dcd6b6c3f
  refs/heads/master cf529f18a -> dab0e90c5


HBASE-20582 Upgrade JRuby to 9.1.17.0


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dab0e90c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dab0e90c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dab0e90c

Branch: refs/heads/master
Commit: dab0e90c54c6909338638ad8addafda2584bb5bd
Parents: cf529f1
Author: Josh Elser 
Authored: Wed May 16 13:07:21 2018 -0400
Committer: Josh Elser 
Committed: Thu May 17 10:06:06 2018 -0400

--
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/dab0e90c/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 7aede4a..c5d8148 100755
--- a/pom.xml
+++ b/pom.xml
@@ -1467,7 +1467,7 @@
 2.3.2
 3.0.1-b08
 6.1.14
-9.1.10.0
+9.1.17.0
 4.12
 1.3
 4.2.0-incubating



[39/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index cea34ef..4c472b8 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -703,20 +703,20 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.regionserver.FlushType
-org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
-org.apache.hadoop.hbase.regionserver.ScanType
-org.apache.hadoop.hbase.regionserver.BloomType
-org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
-org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
 org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
 org.apache.hadoop.hbase.regionserver.Region.Operation
-org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
+org.apache.hadoop.hbase.regionserver.ScanType
+org.apache.hadoop.hbase.regionserver.MemStoreCompactionStrategy.Action
 org.apache.hadoop.hbase.regionserver.ChunkCreator.ChunkType
-org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
-org.apache.hadoop.hbase.regionserver.HRegion.FlushResult.Result
+org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
+org.apache.hadoop.hbase.regionserver.FlushType
 org.apache.hadoop.hbase.regionserver.TimeRangeTracker.Type
-org.apache.hadoop.hbase.regionserver.MemStoreCompactionStrategy.Action
+org.apache.hadoop.hbase.regionserver.HRegion.FlushResult.Result
+org.apache.hadoop.hbase.regionserver.BloomType
+org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
+org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
+org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
+org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
index 3bd22b5..2731576 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
@@ -131,8 +131,8 @@
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
 org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode
-org.apache.hadoop.hbase.regionserver.querymatcher.StripeCompactionScanQueryMatcher.DropDeletesInOutput
 org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker.DeleteResult
+org.apache.hadoop.hbase.regionserver.querymatcher.StripeCompactionScanQueryMatcher.DropDeletesInOutput
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
index f6fc79b..a4ab1b7 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
@@ -238,8 +238,8 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 

[45/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html
index 42b61c4..e69ef81 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html
@@ -468,19 +468,30 @@
 default void
 MasterObserver.postCompletedModifyTableAction(ObserverContextMasterCoprocessorEnvironmentctx,
   TableNametableName,
-  TableDescriptorhtd)
-Called after to modifying a table's properties.
+  TableDescriptorcurrentDescriptor)
+Deprecated.
+Since 2.1. Will be removed 
in 3.0.
+
 
 
 
 default void
+MasterObserver.postCompletedModifyTableAction(ObserverContextMasterCoprocessorEnvironmentctx,
+  TableNametableName,
+  TableDescriptoroldDescriptor,
+  TableDescriptorcurrentDescriptor)
+Called after to modifying a table's properties.
+
+
+
+default void
 MasterObserver.postCompletedSplitRegionAction(ObserverContextMasterCoprocessorEnvironmentc,
   RegionInforegionInfoA,
   RegionInforegionInfoB)
 Called after the region is split.
 
 
-
+
 default void
 MasterObserver.postCompletedTruncateTableAction(ObserverContextMasterCoprocessorEnvironmentctx,
 TableNametableName)
@@ -488,21 +499,21 @@
  table.
 
 
-
+
 default void
 MasterObserver.postCreateNamespace(ObserverContextMasterCoprocessorEnvironmentctx,
NamespaceDescriptorns)
 Called after the createNamespace operation has been 
requested.
 
 
-
+
 default ReplicationEndpoint
 RegionServerObserver.postCreateReplicationEndPoint(ObserverContextRegionServerCoprocessorEnvironmentctx,
  ReplicationEndpointendpoint)
 This will be called after the replication endpoint is 
instantiated.
 
 
-
+
 default void
 MasterObserver.postCreateTable(ObserverContextMasterCoprocessorEnvironmentctx,
TableDescriptordesc,
@@ -510,7 +521,7 @@
 Called after the createTable operation has been 
requested.
 
 
-
+
 default void
 MasterObserver.postDecommissionRegionServers(ObserverContextMasterCoprocessorEnvironmentctx,
  https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNameservers,
@@ -518,7 +529,7 @@
 Called after decommission region servers.
 
 
-
+
 default void
 RegionObserver.postDelete(ObserverContextRegionCoprocessorEnvironmentc,
   Deletedelete,
@@ -527,56 +538,56 @@
 Called after the client deletes a value.
 
 
-
+
 default void
 MasterObserver.postDeleteNamespace(ObserverContextMasterCoprocessorEnvironmentctx,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringnamespace)
 Called after the deleteNamespace operation has been 
requested.
 
 
-
+
 default void
 MasterObserver.postDeleteSnapshot(ObserverContextMasterCoprocessorEnvironmentctx,
   SnapshotDescriptionsnapshot)
 Called after the delete snapshot operation has been 
requested.
 
 
-
+
 default void
 MasterObserver.postDeleteTable(ObserverContextMasterCoprocessorEnvironmentctx,
TableNametableName)
 Called after the deleteTable operation has been 
requested.
 
 
-
+
 default void
 MasterObserver.postDisableReplicationPeer(ObserverContextMasterCoprocessorEnvironmentctx,
   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId)
 Called after disable a replication peer
 
 
-
+
 default void
 MasterObserver.postDisableTable(ObserverContextMasterCoprocessorEnvironmentctx,
 TableNametableName)
 Called after the disableTable operation has been 
requested.
 
 
-
+
 default void
 MasterObserver.postEnableReplicationPeer(ObserverContextMasterCoprocessorEnvironmentctx,
  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId)
 Called after enable a replication peer
 
 
-
+
 default void
 MasterObserver.postEnableTable(ObserverContextMasterCoprocessorEnvironmentctx,
TableNametableName)
 Called after the enableTable operation has been 
requested.
 
 
-
+
 default void
 EndpointObserver.postEndpointInvocation(ObserverContextRegionCoprocessorEnvironmentctx,
   com.google.protobuf.Serviceservice,
@@ -586,13 +597,13 

[46/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
index ff8cf8c..fcb8070 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
@@ -237,19 +237,30 @@
 default void
 MasterObserver.postCompletedModifyTableAction(ObserverContextMasterCoprocessorEnvironmentctx,
   TableNametableName,
-  TableDescriptorhtd)
-Called after to modifying a table's properties.
+  TableDescriptorcurrentDescriptor)
+Deprecated.
+Since 2.1. Will be removed 
in 3.0.
+
 
 
 
 default void
+MasterObserver.postCompletedModifyTableAction(ObserverContextMasterCoprocessorEnvironmentctx,
+  TableNametableName,
+  TableDescriptoroldDescriptor,
+  TableDescriptorcurrentDescriptor)
+Called after to modifying a table's properties.
+
+
+
+default void
 MasterObserver.postCompletedSplitRegionAction(ObserverContextMasterCoprocessorEnvironmentc,
   RegionInforegionInfoA,
   RegionInforegionInfoB)
 Called after the region is split.
 
 
-
+
 default void
 MasterObserver.postCompletedTruncateTableAction(ObserverContextMasterCoprocessorEnvironmentctx,
 TableNametableName)
@@ -257,14 +268,14 @@
  table.
 
 
-
+
 default void
 MasterObserver.postCreateNamespace(ObserverContextMasterCoprocessorEnvironmentctx,
NamespaceDescriptorns)
 Called after the createNamespace operation has been 
requested.
 
 
-
+
 default void
 MasterObserver.postCreateTable(ObserverContextMasterCoprocessorEnvironmentctx,
TableDescriptordesc,
@@ -272,7 +283,7 @@
 Called after the createTable operation has been 
requested.
 
 
-
+
 default void
 MasterObserver.postDecommissionRegionServers(ObserverContextMasterCoprocessorEnvironmentctx,
  https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNameservers,
@@ -280,89 +291,89 @@
 Called after decommission region servers.
 
 
-
+
 default void
 MasterObserver.postDeleteNamespace(ObserverContextMasterCoprocessorEnvironmentctx,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringnamespace)
 Called after the deleteNamespace operation has been 
requested.
 
 
-
+
 default void
 MasterObserver.postDeleteSnapshot(ObserverContextMasterCoprocessorEnvironmentctx,
   SnapshotDescriptionsnapshot)
 Called after the delete snapshot operation has been 
requested.
 
 
-
+
 default void
 MasterObserver.postDeleteTable(ObserverContextMasterCoprocessorEnvironmentctx,
TableNametableName)
 Called after the deleteTable operation has been 
requested.
 
 
-
+
 default void
 MasterObserver.postDisableReplicationPeer(ObserverContextMasterCoprocessorEnvironmentctx,
   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId)
 Called after disable a replication peer
 
 
-
+
 default void
 MasterObserver.postDisableTable(ObserverContextMasterCoprocessorEnvironmentctx,
 TableNametableName)
 Called after the disableTable operation has been 
requested.
 
 
-
+
 default void
 MasterObserver.postEnableReplicationPeer(ObserverContextMasterCoprocessorEnvironmentctx,
  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId)
 Called after enable a replication peer
 
 
-
+
 default void
 MasterObserver.postEnableTable(ObserverContextMasterCoprocessorEnvironmentctx,
TableNametableName)
 Called after the enableTable operation has been 
requested.
 
 
-
+
 default void
 MasterObserver.postGetClusterMetrics(ObserverContextMasterCoprocessorEnvironmentctx,
  ClusterMetricsstatus)
 Called after get cluster status.
 
 
-
+
 default void
 MasterObserver.postGetLocks(ObserverContextMasterCoprocessorEnvironmentctx)
 Called after a getLocks request has been processed.
 
 
-
+
 default void
 MasterObserver.postGetNamespaceDescriptor(ObserverContextMasterCoprocessorEnvironmentctx,
   NamespaceDescriptorns)
 Called after a getNamespaceDescriptor request has been 
processed.
 
 
-
+

[23/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
index 8b2674f..274eb54 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
@@ -262,1413 +262,1417 @@
 254});
 255  }
 256
-257  public void preModifyNamespace(final 
NamespaceDescriptor ns) throws IOException {
-258
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-259  @Override
-260  public void call(MasterObserver 
observer) throws IOException {
-261observer.preModifyNamespace(this, 
ns);
-262  }
-263});
-264  }
-265
-266  public void postModifyNamespace(final 
NamespaceDescriptor ns) throws IOException {
-267
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-268  @Override
-269  public void call(MasterObserver 
observer) throws IOException {
-270
observer.postModifyNamespace(this, ns);
-271  }
-272});
-273  }
-274
-275  public void 
preGetNamespaceDescriptor(final String namespaceName)
-276  throws IOException {
-277
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-278  @Override
-279  public void call(MasterObserver 
observer) throws IOException {
-280
observer.preGetNamespaceDescriptor(this, namespaceName);
-281  }
-282});
-283  }
-284
-285  public void 
postGetNamespaceDescriptor(final NamespaceDescriptor ns)
-286  throws IOException {
-287
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-288  @Override
-289  public void call(MasterObserver 
observer) throws IOException {
-290
observer.postGetNamespaceDescriptor(this, ns);
-291  }
-292});
-293  }
-294
-295  public void 
preListNamespaceDescriptors(final ListNamespaceDescriptor 
descriptors)
-296  throws IOException {
-297
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-298  @Override
-299  public void call(MasterObserver 
observer) throws IOException {
-300
observer.preListNamespaceDescriptors(this, descriptors);
-301  }
-302});
-303  }
-304
-305  public void 
postListNamespaceDescriptors(final ListNamespaceDescriptor 
descriptors)
-306  throws IOException {
-307
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-308  @Override
-309  public void call(MasterObserver 
observer) throws IOException {
-310
observer.postListNamespaceDescriptors(this, descriptors);
-311  }
-312});
-313  }
-314
-315  /* Implementation of hooks for invoking 
MasterObservers */
+257  public void preModifyNamespace(final 
NamespaceDescriptor currentNsDescriptor,
+258final NamespaceDescriptor 
newNsDescriptor) throws IOException {
+259
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
+260  @Override
+261  public void call(MasterObserver 
observer) throws IOException {
+262observer.preModifyNamespace(this, 
currentNsDescriptor, newNsDescriptor);
+263  }
+264});
+265  }
+266
+267  public void postModifyNamespace(final 
NamespaceDescriptor oldNsDescriptor,
+268final NamespaceDescriptor 
currentNsDescriptor) throws IOException {
+269
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
+270  @Override
+271  public void call(MasterObserver 
observer) throws IOException {
+272
observer.postModifyNamespace(this, oldNsDescriptor, currentNsDescriptor);
+273  }
+274});
+275  }
+276
+277  public void 
preGetNamespaceDescriptor(final String namespaceName)
+278  throws IOException {
+279
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
+280  @Override
+281  public void call(MasterObserver 
observer) throws IOException {
+282
observer.preGetNamespaceDescriptor(this, namespaceName);
+283  }
+284});
+285  }
+286
+287  public void 
postGetNamespaceDescriptor(final NamespaceDescriptor ns)
+288  throws IOException {
+289
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
+290  @Override
+291  public void call(MasterObserver 
observer) throws IOException {
+292
observer.postGetNamespaceDescriptor(this, ns);
+293  }
+294});
+295  }
+296
+297  public void 
preListNamespaceDescriptors(final ListNamespaceDescriptor 
descriptors)
+298  throws IOException {
+299
execOperation(coprocEnvironments.isEmpty() ? null : new 

[30/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
index a97dfdc..2b1b6c6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
@@ -2370,1287 +2370,1292 @@
 2362  }
 2363
 2364  @Override
-2365  public long modifyTable(final 
TableName tableName, final TableDescriptor descriptor,
+2365  public long modifyTable(final 
TableName tableName, final TableDescriptor newDescriptor,
 2366  final long nonceGroup, final long 
nonce) throws IOException {
 2367checkInitialized();
-2368
sanityCheckTableDescriptor(descriptor);
+2368
sanityCheckTableDescriptor(newDescriptor);
 2369
 2370return 
MasterProcedureUtil.submitProcedure(
 2371new 
MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
 2372  @Override
 2373  protected void run() throws 
IOException {
-2374
getMaster().getMasterCoprocessorHost().preModifyTable(tableName, descriptor);
-2375
-2376
LOG.info(getClientIdAuditPrefix() + " modify " + tableName);
+2374TableDescriptor oldDescriptor = 
getMaster().getTableDescriptors().get(tableName);
+2375
getMaster().getMasterCoprocessorHost()
+2376  .preModifyTable(tableName, 
oldDescriptor, newDescriptor);
 2377
-2378// Execute the operation 
synchronously - wait for the operation completes before continuing.
-2379//
-2380// We need to wait for the 
procedure to potentially fail due to "prepare" sanity
-2381// checks. This will block only 
the beginning of the procedure. See HBASE-19953.
-2382ProcedurePrepareLatch latch = 
ProcedurePrepareLatch.createBlockingLatch();
-2383submitProcedure(new 
ModifyTableProcedure(procedureExecutor.getEnvironment(),
-2384descriptor, latch));
-2385latch.await();
-2386
-2387
getMaster().getMasterCoprocessorHost().postModifyTable(tableName, 
descriptor);
-2388  }
-2389
-2390  @Override
-2391  protected String getDescription() 
{
-2392return "ModifyTableProcedure";
-2393  }
-2394});
-2395  }
-2396
-2397  public long restoreSnapshot(final 
SnapshotDescription snapshotDesc,
-2398  final long nonceGroup, final long 
nonce, final boolean restoreAcl) throws IOException {
-2399checkInitialized();
-2400
getSnapshotManager().checkSnapshotSupport();
-2401
-2402// Ensure namespace exists. Will 
throw exception if non-known NS.
-2403final TableName dstTable = 
TableName.valueOf(snapshotDesc.getTable());
-2404
getClusterSchema().getNamespace(dstTable.getNamespaceAsString());
-2405
-2406return 
MasterProcedureUtil.submitProcedure(
-2407new 
MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
-2408  @Override
-2409  protected void run() throws 
IOException {
-2410  setProcId(
-2411
getSnapshotManager().restoreOrCloneSnapshot(snapshotDesc, getNonceKey(), 
restoreAcl));
-2412  }
-2413
-2414  @Override
-2415  protected String getDescription() 
{
-2416return 
"RestoreSnapshotProcedure";
-2417  }
-2418});
-2419  }
-2420
-2421  private void checkTableExists(final 
TableName tableName)
-2422  throws IOException, 
TableNotFoundException {
-2423if 
(!MetaTableAccessor.tableExists(getConnection(), tableName)) {
-2424  throw new 
TableNotFoundException(tableName);
-2425}
-2426  }
-2427
-2428  @Override
-2429  public void checkTableModifiable(final 
TableName tableName)
-2430  throws IOException, 
TableNotFoundException, TableNotDisabledException {
-2431if (isCatalogTable(tableName)) {
-2432  throw new IOException("Can't 
modify catalog tables");
-2433}
-2434checkTableExists(tableName);
-2435TableState ts = 
getTableStateManager().getTableState(tableName);
-2436if (!ts.isDisabled()) {
-2437  throw new 
TableNotDisabledException("Not DISABLED; " + ts);
-2438}
-2439  }
-2440
-2441  public ClusterMetrics 
getClusterMetricsWithoutCoprocessor() throws InterruptedIOException {
-2442return 
getClusterMetricsWithoutCoprocessor(EnumSet.allOf(Option.class));
-2443  }
-2444
-2445  public ClusterMetrics 
getClusterMetricsWithoutCoprocessor(EnumSetOption options)
-2446  throws InterruptedIOException {
-2447ClusterMetricsBuilder builder = 
ClusterMetricsBuilder.newBuilder();
-2448// given that hbase1 can't submit 
the request with Option,
-2449// we return all information to 
client if the list of Option is empty.
-2450if (options.isEmpty()) {
-2451  options = 

[32/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.WriterLength.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.WriterLength.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.WriterLength.html
index 5404ea1..1812a55 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.WriterLength.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.WriterLength.html
@@ -35,901 +35,908 @@
 027import java.net.InetSocketAddress;
 028import java.net.URLDecoder;
 029import java.net.URLEncoder;
-030import java.util.ArrayList;
-031import java.util.Arrays;
-032import java.util.List;
-033import java.util.Map;
-034import java.util.Set;
-035import java.util.TreeMap;
-036import java.util.TreeSet;
-037import java.util.UUID;
-038import java.util.function.Function;
-039import java.util.stream.Collectors;
-040
-041import 
org.apache.commons.lang3.StringUtils;
-042import 
org.apache.hadoop.conf.Configuration;
-043import org.apache.hadoop.fs.FileSystem;
-044import org.apache.hadoop.fs.Path;
-045import org.apache.hadoop.hbase.Cell;
-046import 
org.apache.hadoop.hbase.CellComparator;
-047import 
org.apache.hadoop.hbase.CellUtil;
-048import 
org.apache.hadoop.hbase.HConstants;
-049import 
org.apache.hadoop.hbase.HRegionLocation;
-050import 
org.apache.hadoop.hbase.HTableDescriptor;
-051import 
org.apache.hadoop.hbase.KeyValue;
-052import 
org.apache.hadoop.hbase.PrivateCellUtil;
-053import 
org.apache.hadoop.hbase.TableName;
-054import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-055import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-056import 
org.apache.hadoop.hbase.client.Connection;
-057import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-058import 
org.apache.hadoop.hbase.client.Put;
-059import 
org.apache.hadoop.hbase.client.RegionLocator;
-060import 
org.apache.hadoop.hbase.client.Table;
-061import 
org.apache.hadoop.hbase.client.TableDescriptor;
-062import 
org.apache.hadoop.hbase.fs.HFileSystem;
-063import 
org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-064import 
org.apache.hadoop.hbase.io.compress.Compression;
-065import 
org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
-066import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-067import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-068import 
org.apache.hadoop.hbase.io.hfile.HFile;
-069import 
org.apache.hadoop.hbase.io.hfile.HFileContext;
-070import 
org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-071import 
org.apache.hadoop.hbase.io.hfile.HFileWriterImpl;
-072import 
org.apache.hadoop.hbase.regionserver.BloomType;
-073import 
org.apache.hadoop.hbase.regionserver.HStore;
-074import 
org.apache.hadoop.hbase.regionserver.StoreFileWriter;
-075import 
org.apache.hadoop.hbase.util.Bytes;
-076import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-077import 
org.apache.hadoop.hbase.util.FSUtils;
-078import 
org.apache.hadoop.hbase.util.MapReduceExtendedCell;
-079import 
org.apache.hadoop.io.NullWritable;
-080import 
org.apache.hadoop.io.SequenceFile;
-081import org.apache.hadoop.io.Text;
-082import org.apache.hadoop.mapreduce.Job;
-083import 
org.apache.hadoop.mapreduce.OutputCommitter;
-084import 
org.apache.hadoop.mapreduce.OutputFormat;
-085import 
org.apache.hadoop.mapreduce.RecordWriter;
-086import 
org.apache.hadoop.mapreduce.TaskAttemptContext;
-087import 
org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
-088import 
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-089import 
org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;
-090import 
org.apache.yetus.audience.InterfaceAudience;
-091import org.slf4j.Logger;
-092import org.slf4j.LoggerFactory;
-093
-094import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-095
-096/**
-097 * Writes HFiles. Passed Cells must 
arrive in order.
-098 * Writes current time as the sequence id 
for the file. Sets the major compacted
-099 * attribute on created @{link {@link 
HFile}s. Calling write(null,null) will forcibly roll
-100 * all HFiles being written.
-101 * p
-102 * Using this class as part of a 
MapReduce job is best done
-103 * using {@link 
#configureIncrementalLoad(Job, TableDescriptor, RegionLocator)}.
-104 */
-105@InterfaceAudience.Public
-106public class HFileOutputFormat2
-107extends 
FileOutputFormatImmutableBytesWritable, Cell {
-108  private static final Logger LOG = 
LoggerFactory.getLogger(HFileOutputFormat2.class);
-109  static class TableInfo {
-110private TableDescriptor 
tableDesctiptor;
-111private RegionLocator 
regionLocator;
-112
-113public TableInfo(TableDescriptor 
tableDesctiptor, RegionLocator regionLocator) {
-114  this.tableDesctiptor = 
tableDesctiptor;
-115 

[09/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialWriteTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialWriteTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialWriteTest.html
index 3f8844b..cdb9398 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialWriteTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialWriteTest.html
@@ -140,2712 +140,2713 @@
 132public class PerformanceEvaluation 
extends Configured implements Tool {
 133  static final String RANDOM_SEEK_SCAN = 
"randomSeekScan";
 134  static final String RANDOM_READ = 
"randomRead";
-135  private static final Logger LOG = 
LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
-136  private static final ObjectMapper 
MAPPER = new ObjectMapper();
-137  static {
-138
MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
-139  }
-140
-141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final String 
FAMILY_NAME_BASE = "info";
-143  public static final byte[] FAMILY_ZERO 
= Bytes.toBytes("info0");
-144  public static final byte[] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-145  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-146  public static final int ROW_LENGTH = 
26;
-147
-148  private static final int ONE_GB = 1024 
* 1024 * 1000;
-149  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-150  // TODO : should we make this 
configurable
-151  private static final int TAG_LENGTH = 
256;
-152  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-153  private static final MathContext CXT = 
MathContext.DECIMAL64;
-154  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-155  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-156  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-157
-158  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-159  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-160
-161  static {
-162
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-163"Run async random read test");
-164
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-165"Run async random write test");
-166
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-167"Run async sequential read 
test");
-168
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-169"Run async sequential write 
test");
-170
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-171"Run async scan test (read every 
row)");
-172
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-173  "Run random read test");
-174
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-175  "Run random seek and scan 100 
test");
-176
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-177  "Run random seek scan with both 
start and stop row (max 10 rows)");
-178
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-179  "Run random seek scan with both 
start and stop row (max 100 rows)");
-180
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-181  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-182
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-183  "Run random seek scan with both 
start and stop row (max 1 rows)");
-184
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-185  "Run random write test");
-186
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-187  "Run sequential read test");
-188
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-189  "Run sequential write test");
-190addCommandDescriptor(ScanTest.class, 
"scan",
-191  "Run scan test (read every 
row)");
-192
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-193  "Run scan test using a filter to 
find a specific row based on it's value " +
-194  "(make sure to use --rows=20)");
-195
addCommandDescriptor(IncrementTest.class, "increment",
-196  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-197
addCommandDescriptor(AppendTest.class, "append",
-198  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-199
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-200  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent 

[22/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html
index 25c940a..59f97b4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html
@@ -428,25 +428,27 @@
 420if (cpHost != null) {
 421  switch (state) {
 422case 
MODIFY_TABLE_PRE_OPERATION:
-423  
cpHost.preModifyTableAction(getTableName(), modifiedTableDescriptor, 
getUser());
-424  break;
-425case 
MODIFY_TABLE_POST_OPERATION:
-426  
cpHost.postCompletedModifyTableAction(getTableName(), 
modifiedTableDescriptor,getUser());
-427  break;
-428default:
-429  throw new 
UnsupportedOperationException(this + " unhandled state=" + state);
-430  }
-431}
-432  }
-433
-434  private ListRegionInfo 
getRegionInfoList(final MasterProcedureEnv env) throws IOException {
-435if (regionInfoList == null) {
-436  regionInfoList = 
env.getAssignmentManager().getRegionStates()
-437  
.getRegionsOfTable(getTableName());
-438}
-439return regionInfoList;
-440  }
-441}
+423  
cpHost.preModifyTableAction(getTableName(), unmodifiedTableDescriptor,
+424modifiedTableDescriptor, 
getUser());
+425  break;
+426case 
MODIFY_TABLE_POST_OPERATION:
+427  
cpHost.postCompletedModifyTableAction(getTableName(), 
unmodifiedTableDescriptor,
+428
modifiedTableDescriptor,getUser());
+429  break;
+430default:
+431  throw new 
UnsupportedOperationException(this + " unhandled state=" + state);
+432  }
+433}
+434  }
+435
+436  private ListRegionInfo 
getRegionInfoList(final MasterProcedureEnv env) throws IOException {
+437if (regionInfoList == null) {
+438  regionInfoList = 
env.getAssignmentManager().getRegionStates()
+439  
.getRegionsOfTable(getTableName());
+440}
+441return regionInfoList;
+442  }
+443}
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/src-html/org/apache/hadoop/hbase/util/JSONBean.Writer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/JSONBean.Writer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/JSONBean.Writer.html
index 52440ca..1a2cb3b 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/JSONBean.Writer.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/JSONBean.Writer.html
@@ -318,48 +318,52 @@
 310jg.writeEndArray();
 311  } else if(value instanceof Number) 
{
 312Number n = (Number)value;
-313jg.writeNumber(n.toString());
-314  } else if(value instanceof Boolean) 
{
-315Boolean b = (Boolean)value;
-316jg.writeBoolean(b);
-317  } else if(value instanceof 
CompositeData) {
-318CompositeData cds = 
(CompositeData)value;
-319CompositeType comp = 
cds.getCompositeType();
-320SetString keys = 
comp.keySet();
-321jg.writeStartObject();
-322for (String key: keys) {
-323  writeAttribute(jg, key, null, 
cds.get(key));
-324}
-325jg.writeEndObject();
-326  } else if(value instanceof 
TabularData) {
-327TabularData tds = 
(TabularData)value;
-328jg.writeStartArray();
-329for(Object entry : tds.values()) 
{
-330  writeObject(jg, description, 
entry);
-331}
-332jg.writeEndArray();
-333  } else {
-334
jg.writeString(value.toString());
-335  }
-336}
-337  }
-338
-339  /**
-340   * Dump out all registered mbeans as 
json on System.out.
-341   * @throws IOException
-342   * @throws 
MalformedObjectNameException
-343   */
-344  public static void dumpAllBeans() 
throws IOException, MalformedObjectNameException {
-345try (PrintWriter writer = new 
PrintWriter(
-346new 
OutputStreamWriter(System.out, StandardCharsets.UTF_8))) {
-347  JSONBean dumper = new JSONBean();
-348  try (JSONBean.Writer jsonBeanWriter 
= dumper.open(writer)) {
-349MBeanServer mbeanServer = 
ManagementFactory.getPlatformMBeanServer();
-350jsonBeanWriter.write(mbeanServer, 
new ObjectName("*:*"), null, false);
-351  }
-352}
-353  }
-354}
+313if 
(Double.isFinite(n.doubleValue())) {
+314  jg.writeNumber(n.toString());
+315} else {
+316  jg.writeString(n.toString());
+317}
+318  } else if(value instanceof Boolean) 
{
+319Boolean b = (Boolean)value;
+320jg.writeBoolean(b);
+321  } 

[15/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.FailingSplitMasterObserver.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.FailingSplitMasterObserver.html
 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.FailingSplitMasterObserver.html
index 93820c0..d375e8c 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.FailingSplitMasterObserver.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.FailingSplitMasterObserver.html
@@ -230,7 +230,7 @@ implements 
org.apache.hadoop.hbase.coprocessor.MasterCoprocessor, org.apache.had
 
 
 Methods inherited from 
interfaceorg.apache.hadoop.hbase.coprocessor.MasterObserver
-postAbortProcedure, postAddReplicationPeer, postAddRSGroup, postAssign, 
postBalance, postBalanceRSGroup, postBalanceSwitch, postClearDeadServers, 
postCloneSnapshot, postCompletedCreateTableAction, 
postCompletedDeleteTableAction, postCompletedDisableTableAction, 
postCompletedEnableTableAction, postCompletedMergeRegionsAction, 
postCompletedModifyTableAction, postCompletedSplitRegionAction, 
postCompletedTruncateTableAction, postCreateNamespace, postCreateTable, 
postDecommissionRegionServers, postDeleteNamespace, postDeleteSnapshot, 
postDeleteTable, postDisableReplicationPeer, postDisableTable, 
postEnableReplicationPeer, postEnableTable, postGetClusterMetrics, 
postGetLocks, postGetNamespaceDescriptor, postGetProcedures, 
postGetReplicationPeerConfig, postGetTableDescriptors, postGetTableNames, 
postListDecommissionedRegionServers, postListNamespaceDescriptors, 
postListReplicationPeers, postListSnapshot, postLockHeartbeat, 
postMergeRegions, postMergeRegionsCommitAction, postModifyNam
 espace, postModifyTable, postMove, postMoveServers, postMoveServersAndTables, 
postMoveTables, postRecommissionRegionServer, postRegionOffline, 
postRemoveReplicationPeer, postRemoveRSGroup, postRemoveServers, 
postRequestLock, postRestoreSnapshot, postRollBackMergeRegionsAction, 
postRollBackSplitRegionAction, postSetNamespaceQuota, 
postSetSplitOrMergeEnabled, postSetTableQuota, postSetUserQuota, 
postSetUserQuota, postSetUserQuota, postSnapshot, postStartMaster, 
postTableFlush, postTruncateTable, postUnassign, 
postUpdateReplicationPeerConfig, preAbortProcedure, preAddReplicationPeer, 
preAddRSGroup, preAssign, preBalance, preBalanceRSGroup, preBalanceSwitch, 
preClearDeadServers, preCloneSnapshot, preCreateNamespace, preCreateTable, 
preCreateTableAction, preDecommissionRegionServers, preDeleteNamespace, 
preDeleteSnapshot, preDeleteTable, preDeleteTableAction, 
preDisableReplicationPeer, preDisableTable, preDisableTableAction, 
preEnableReplicationPeer, preEnableTable, preEnableTableAction,
  preGetClusterMetrics, preGetLocks, preGetNamespaceDescriptor, 
preGetProcedures, preGetReplicationPeerConfig, preGetTableDescriptors, 
preGetTableNames, preListDecommissionedRegionServers, 
preListNamespaceDescriptors, preListReplicationPeers, preListSnapshot, 
preLockHeartbeat, preMasterInitialization, preMergeRegions, 
preMergeRegionsAction, preMergeRegionsCommitAction, preModifyNamespace, 
preModifyTable, preModifyTableAction, preMove, preMoveServers, 
preMoveServersAndTables, preMoveTables, preRecommissionRegionServer, 
preRegionOffline, preRemoveReplicationPeer, preRemoveRSGroup, preRemoveServers, 
preRequestLock, preRestoreSnapshot, preSetNamespaceQuota, 
preSetSplitOrMergeEnabled, preSetTableQuota, preSetUserQuota, preSetUserQuota, 
preSetUserQuota, preShutdown, preSnapshot, preSplitRegion, 
preSplitRegionAction, preSplitRegionAfterMETAAction, preStopMaster, 
preTableFlush, preTruncateTable, preTruncateTableAction, preUnassign, 
preUpdateReplicationPeerConfig
+postAbortProcedure, postAddReplicationPeer, postAddRSGroup, postAssign, 
postBalance, postBalanceRSGroup, postBalanceSwitch, postClearDeadServers, 
postCloneSnapshot, postCompletedCreateTableAction, 
postCompletedDeleteTableAction, postCompletedDisableTableAction, 
postCompletedEnableTableAction, postCompletedMergeRegionsAction, 
postCompletedModifyTableAction, postCompletedModifyTableAction, 
postCompletedSplitRegionAction, postCompletedTruncateTableAction, 
postCreateNamespace, postCreateTable, postDecommissionRegionServers, 
postDeleteNamespace, postDeleteSnapshot, postDeleteTable, 
postDisableReplicationPeer, postDisableTable, postEnableReplicationPeer, 
postEnableTable, postGetClusterMetrics, postGetLocks, 
postGetNamespaceDescriptor, postGetProcedures, postGetReplicationPeerConfig, 
postGetTableDescriptors, postGetTableNames, 
postListDecommissionedRegionServers, postListNamespaceDescriptors, 
postListReplicationPeers, postListSnapshot, postLockHeartbeat, 
postMergeRegions, postMergeRe
 gionsCommitAction, 

[29/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
index a97dfdc..2b1b6c6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
@@ -2370,1287 +2370,1292 @@
 2362  }
 2363
 2364  @Override
-2365  public long modifyTable(final 
TableName tableName, final TableDescriptor descriptor,
+2365  public long modifyTable(final 
TableName tableName, final TableDescriptor newDescriptor,
 2366  final long nonceGroup, final long 
nonce) throws IOException {
 2367checkInitialized();
-2368
sanityCheckTableDescriptor(descriptor);
+2368
sanityCheckTableDescriptor(newDescriptor);
 2369
 2370return 
MasterProcedureUtil.submitProcedure(
 2371new 
MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
 2372  @Override
 2373  protected void run() throws 
IOException {
-2374
getMaster().getMasterCoprocessorHost().preModifyTable(tableName, descriptor);
-2375
-2376
LOG.info(getClientIdAuditPrefix() + " modify " + tableName);
+2374TableDescriptor oldDescriptor = 
getMaster().getTableDescriptors().get(tableName);
+2375
getMaster().getMasterCoprocessorHost()
+2376  .preModifyTable(tableName, 
oldDescriptor, newDescriptor);
 2377
-2378// Execute the operation 
synchronously - wait for the operation completes before continuing.
-2379//
-2380// We need to wait for the 
procedure to potentially fail due to "prepare" sanity
-2381// checks. This will block only 
the beginning of the procedure. See HBASE-19953.
-2382ProcedurePrepareLatch latch = 
ProcedurePrepareLatch.createBlockingLatch();
-2383submitProcedure(new 
ModifyTableProcedure(procedureExecutor.getEnvironment(),
-2384descriptor, latch));
-2385latch.await();
-2386
-2387
getMaster().getMasterCoprocessorHost().postModifyTable(tableName, 
descriptor);
-2388  }
-2389
-2390  @Override
-2391  protected String getDescription() 
{
-2392return "ModifyTableProcedure";
-2393  }
-2394});
-2395  }
-2396
-2397  public long restoreSnapshot(final 
SnapshotDescription snapshotDesc,
-2398  final long nonceGroup, final long 
nonce, final boolean restoreAcl) throws IOException {
-2399checkInitialized();
-2400
getSnapshotManager().checkSnapshotSupport();
-2401
-2402// Ensure namespace exists. Will 
throw exception if non-known NS.
-2403final TableName dstTable = 
TableName.valueOf(snapshotDesc.getTable());
-2404
getClusterSchema().getNamespace(dstTable.getNamespaceAsString());
-2405
-2406return 
MasterProcedureUtil.submitProcedure(
-2407new 
MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
-2408  @Override
-2409  protected void run() throws 
IOException {
-2410  setProcId(
-2411
getSnapshotManager().restoreOrCloneSnapshot(snapshotDesc, getNonceKey(), 
restoreAcl));
-2412  }
-2413
-2414  @Override
-2415  protected String getDescription() 
{
-2416return 
"RestoreSnapshotProcedure";
-2417  }
-2418});
-2419  }
-2420
-2421  private void checkTableExists(final 
TableName tableName)
-2422  throws IOException, 
TableNotFoundException {
-2423if 
(!MetaTableAccessor.tableExists(getConnection(), tableName)) {
-2424  throw new 
TableNotFoundException(tableName);
-2425}
-2426  }
-2427
-2428  @Override
-2429  public void checkTableModifiable(final 
TableName tableName)
-2430  throws IOException, 
TableNotFoundException, TableNotDisabledException {
-2431if (isCatalogTable(tableName)) {
-2432  throw new IOException("Can't 
modify catalog tables");
-2433}
-2434checkTableExists(tableName);
-2435TableState ts = 
getTableStateManager().getTableState(tableName);
-2436if (!ts.isDisabled()) {
-2437  throw new 
TableNotDisabledException("Not DISABLED; " + ts);
-2438}
-2439  }
-2440
-2441  public ClusterMetrics 
getClusterMetricsWithoutCoprocessor() throws InterruptedIOException {
-2442return 
getClusterMetricsWithoutCoprocessor(EnumSet.allOf(Option.class));
-2443  }
-2444
-2445  public ClusterMetrics 
getClusterMetricsWithoutCoprocessor(EnumSetOption options)
-2446  throws InterruptedIOException {
-2447ClusterMetricsBuilder builder = 
ClusterMetricsBuilder.newBuilder();
-2448// given that hbase1 can't submit 
the request with Option,
-2449// we return all information to 
client if the list of Option is empty.
-2450if (options.isEmpty()) {
-2451  

[47/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterObserver.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterObserver.html 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterObserver.html
index 20d8c57..dea6213 100644
--- a/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterObserver.html
+++ b/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterObserver.html
@@ -18,8 +18,8 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":18,"i1":18,"i2":18,"i3":18,"i4":18,"i5":18,"i6":18,"i7":18,"i8":18,"i9":18,"i10":18,"i11":18,"i12":18,"i13":18,"i14":18,"i15":18,"i16":18,"i17":18,"i18":18,"i19":18,"i20":18,"i21":18,"i22":18,"i23":18,"i24":18,"i25":18,"i26":18,"i27":18,"i28":18,"i29":18,"i30":18,"i31":18,"i32":18,"i33":18,"i34":18,"i35":18,"i36":18,"i37":18,"i38":18,"i39":18,"i40":18,"i41":18,"i42":18,"i43":18,"i44":18,"i45":18,"i46":18,"i47":18,"i48":18,"i49":18,"i50":18,"i51":18,"i52":18,"i53":18,"i54":18,"i55":18,"i56":18,"i57":18,"i58":18,"i59":18,"i60":18,"i61":18,"i62":18,"i63":18,"i64":18,"i65":18,"i66":18,"i67":18,"i68":18,"i69":18,"i70":18,"i71":18,"i72":18,"i73":18,"i74":18,"i75":18,"i76":18,"i77":18,"i78":18,"i79":18,"i80":18,"i81":18,"i82":18,"i83":18,"i84":18,"i85":18,"i86":18,"i87":18,"i88":18,"i89":18,"i90":18,"i91":18,"i92":18,"i93":18,"i94":18,"i95":18,"i96":18,"i97":18,"i98":18,"i99":18,"i100":18,"i101":18,"i102":18,"i103":18,"i104":18,"i105":18,"i106":18,"i107":18,"i108":18,"i
 
109":18,"i110":18,"i111":18,"i112":18,"i113":18,"i114":18,"i115":18,"i116":18,"i117":18,"i118":18,"i119":18,"i120":18,"i121":18,"i122":18,"i123":18,"i124":18,"i125":18,"i126":18,"i127":18,"i128":18,"i129":18,"i130":18,"i131":18,"i132":18,"i133":18,"i134":18,"i135":18,"i136":18,"i137":18,"i138":18};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],16:["t5","Default Methods"]};
+var methods = 
{"i0":18,"i1":18,"i2":18,"i3":18,"i4":18,"i5":18,"i6":18,"i7":18,"i8":18,"i9":18,"i10":18,"i11":18,"i12":18,"i13":18,"i14":50,"i15":18,"i16":18,"i17":18,"i18":18,"i19":18,"i20":18,"i21":18,"i22":18,"i23":18,"i24":18,"i25":18,"i26":18,"i27":18,"i28":18,"i29":18,"i30":18,"i31":18,"i32":18,"i33":18,"i34":18,"i35":18,"i36":18,"i37":18,"i38":18,"i39":18,"i40":18,"i41":18,"i42":50,"i43":18,"i44":50,"i45":18,"i46":18,"i47":18,"i48":18,"i49":18,"i50":18,"i51":18,"i52":18,"i53":18,"i54":18,"i55":18,"i56":18,"i57":18,"i58":18,"i59":18,"i60":18,"i61":18,"i62":18,"i63":18,"i64":18,"i65":18,"i66":18,"i67":18,"i68":18,"i69":18,"i70":18,"i71":18,"i72":18,"i73":18,"i74":18,"i75":18,"i76":18,"i77":18,"i78":18,"i79":18,"i80":18,"i81":18,"i82":18,"i83":18,"i84":18,"i85":18,"i86":18,"i87":18,"i88":18,"i89":18,"i90":18,"i91":18,"i92":18,"i93":18,"i94":18,"i95":18,"i96":18,"i97":18,"i98":18,"i99":18,"i100":18,"i101":18,"i102":18,"i103":18,"i104":18,"i105":18,"i106":18,"i107":18,"i108":18,"i
 
109":18,"i110":50,"i111":18,"i112":50,"i113":18,"i114":50,"i115":18,"i116":18,"i117":18,"i118":18,"i119":18,"i120":18,"i121":18,"i122":18,"i123":18,"i124":18,"i125":18,"i126":18,"i127":18,"i128":18,"i129":18,"i130":18,"i131":18,"i132":18,"i133":18,"i134":18,"i135":18,"i136":18,"i137":18,"i138":18,"i139":18,"i140":18,"i141":18,"i142":18,"i143":18,"i144":18};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],16:["t5","Default Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
@@ -145,7 +145,7 @@ public interface 
-All MethodsInstance MethodsDefault Methods
+All MethodsInstance MethodsDefault MethodsDeprecated Methods
 
 Modifier and Type
 Method and Description
@@ -259,19 +259,30 @@ public interface default void
 postCompletedModifyTableAction(ObserverContextMasterCoprocessorEnvironmentctx,
   TableNametableName,
-  TableDescriptorhtd)
-Called after to modifying a table's properties.
+  TableDescriptorcurrentDescriptor)
+Deprecated.
+Since 2.1. Will be removed 
in 3.0.
+
 
 
 
 default void
+postCompletedModifyTableAction(ObserverContextMasterCoprocessorEnvironmentctx,
+  TableNametableName,
+  TableDescriptoroldDescriptor,
+  TableDescriptorcurrentDescriptor)
+Called after to modifying a table's properties.
+
+
+
+default void
 postCompletedSplitRegionAction(ObserverContextMasterCoprocessorEnvironmentc,
   RegionInforegionInfoA,
   RegionInforegionInfoB)
 Called after the region is split.
 
 
-
+
 default void
 postCompletedTruncateTableAction(ObserverContextMasterCoprocessorEnvironmentctx,
 TableNametableName)
@@ -279,14 +290,14 @@ public interface 
+
 default void
 postCreateNamespace(ObserverContextMasterCoprocessorEnvironmentctx,
  

[20/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
index df30a00..3d2b4ec 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
@@ -128,7 +128,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class PerformanceEvaluation.ScanTest
+static class PerformanceEvaluation.ScanTest
 extends PerformanceEvaluation.TableTest
 
 
@@ -264,7 +264,7 @@ extends 
 
 testScanner
-privateorg.apache.hadoop.hbase.client.ResultScanner testScanner
+privateorg.apache.hadoop.hbase.client.ResultScanner testScanner
 
 
 
@@ -281,7 +281,7 @@ extends 
 
 ScanTest
-ScanTest(org.apache.hadoop.hbase.client.Connectioncon,
+ScanTest(org.apache.hadoop.hbase.client.Connectioncon,
  PerformanceEvaluation.TestOptionsoptions,
  PerformanceEvaluation.Statusstatus)
 
@@ -300,7 +300,7 @@ extends 
 
 testTakedown
-voidtestTakedown()
+voidtestTakedown()
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Overrides:
@@ -316,7 +316,7 @@ extends 
 
 testRow
-voidtestRow(inti)
+voidtestRow(inti)
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
index fe182c6..d72b414 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
@@ -128,7 +128,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class PerformanceEvaluation.SequentialReadTest
+static class PerformanceEvaluation.SequentialReadTest
 extends PerformanceEvaluation.TableTest
 
 
@@ -249,7 +249,7 @@ extends 
 
 SequentialReadTest
-SequentialReadTest(org.apache.hadoop.hbase.client.Connectioncon,
+SequentialReadTest(org.apache.hadoop.hbase.client.Connectioncon,
PerformanceEvaluation.TestOptionsoptions,
PerformanceEvaluation.Statusstatus)
 
@@ -268,7 +268,7 @@ extends 
 
 testRow
-voidtestRow(inti)
+voidtestRow(inti)
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
index d5d599f..056a44e 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
@@ -128,7 +128,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class PerformanceEvaluation.SequentialWriteTest
+static class PerformanceEvaluation.SequentialWriteTest
 extends PerformanceEvaluation.BufferedMutatorTest
 
 
@@ -249,7 +249,7 @@ extends 
 
 SequentialWriteTest
-SequentialWriteTest(org.apache.hadoop.hbase.client.Connectioncon,
+SequentialWriteTest(org.apache.hadoop.hbase.client.Connectioncon,
 PerformanceEvaluation.TestOptionsoptions,
 PerformanceEvaluation.Statusstatus)
 
@@ -268,7 +268,7 @@ extends 
 
 testRow
-voidtestRow(inti)
+voidtestRow(inti)
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.Status.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.Status.html 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.Status.html
index 02fdcb3..a64ee82 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.Status.html
+++ 

[49/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/book.html
--
diff --git a/book.html b/book.html
index 2b38a02..b8f6378 100644
--- a/book.html
+++ b/book.html
@@ -37873,7 +37873,7 @@ The server will return cellblocks compressed using this 
same compressor as long
 
 
 Version 3.0.0-SNAPSHOT
-Last updated 2018-05-16 18:31:46 UTC
+Last updated 2018-05-17 14:29:55 UTC
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/bulk-loads.html
--
diff --git a/bulk-loads.html b/bulk-loads.html
index c442050..f3a6e17 100644
--- a/bulk-loads.html
+++ b/bulk-loads.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase   
   Bulk Loads in Apache HBase (TM)
@@ -306,7 +306,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-05-16
+  Last Published: 
2018-05-17
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index a82e052..c930e42 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -284,7 +284,7 @@
 3613
 0
 0
-15865
+15867
 
 Files
 
@@ -4692,7 +4692,7 @@
 org/apache/hadoop/hbase/master/HMaster.java
 0
 0
-160
+162
 
 org/apache/hadoop/hbase/master/HMasterCommandLine.java
 0
@@ -10335,7 +10335,7 @@
 caseIndent: 2
 basicOffset: 2
 lineWrappingIndentation: 2
-4627
+4629
 Error
 
 javadoc
@@ -16964,223 +16964,223 @@
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-258
+259
 
 Error
 indentation
 Indentation
 'method def' child have incorrect indentation level 11, expected level 
should be one of the following: 10, 12.
-306
+307
 
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 105).
-322
+323
 
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-427
+428
 
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-460
+461
 
 Error
 indentation
 Indentation
 'method call rparen' have incorrect indentation level 6, expected level 
should be 4.
-540
+541
 
 Error
 misc
 UpperEll
 Should use uppercase 'L'.
-678
+679
 
 Error
 misc
 UpperEll
 Should use uppercase 'L'.
-678
+679
 
 Error
 misc
 UpperEll
 Should use uppercase 'L'.
-678
+679
 
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-1122
+1123
 
 Error
 blocks
 NeedBraces
 'for' construct must use '{}'s.
-1123
+1124
 
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-1127
+1128
 
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-1136
+1137
 
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 103).
-1195
+1196
 
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 106).
-1198
+1199
 
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-1290
+1291
 
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-1292
+1293
 
 Error
 whitespace
 ParenPad
 '(' is followed by whitespace.
-1297
+1298
 
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-1453
+1454
 
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 102).
-1459
+1460
 
 Error
 coding
 InnerAssignment
 Inner assignments should be avoided.
-1717
+1718
 
 Error
 coding
 InnerAssignment
 Inner assignments should be avoided.
-1764
+1765
 
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-1873
+1874
 
 Error
 misc
 UpperEll
 Should use uppercase 'L'.
-2028
+2029
 
 Error
 misc
 ArrayTypeStyle
 Array brackets at illegal position.
-2148
+2149
 
 Error
 coding
 InnerAssignment
 Inner assignments should be avoided.
-2192
+2193
 
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 110).
-2233
+2234
 
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-2348
+2349
 
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-2349
+2350
 
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 101).
-2381
+2382
 
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 103).
-2427
+2428
 
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 106).
-2441
+2442
 
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 118).
-2445
+2446
 
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 116).
-2449
+2450
 
 Error
 sizes
 MethodLength
 Method length is 279 lines (max allowed is 150).
-2478
+2479
 
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-2618
+2619
 
 Error
 whitespace
 ParenPad
 ')' is preceded with whitespace.
-2647
+2648
 
 

[38/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html 
b/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html
index 6d9b2df..3a370e4 100644
--- a/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html
+++ b/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html
@@ -1368,7 +1368,7 @@ implements MasterObserver
-postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 postBalance,
 postBalanceRSGroup, postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 
 postCompletedModifyTableAction, postCompletedSplitRegionAction,
 postCompletedTruncateTableAction,
 postCreateNamespace,
 postCreateTable, postDecommissionRegionServers,
 postDeleteSnapshot,
 postDisableReplicationPeer,
 postDisableTable,
 postEnableReplicationPeer,
 postEnableTable,
 postGetClusterMetrics,
 postGetLocks,
 postGetNamespaceDescriptor,
 postGetProcedures,
 postGetReplicationPeerConfig,
 postListDecommissionedRegionServers,
 postListReplicationPeers,
 postListSnapshot, postLockHeartbeat,
 postMergeRegions,
 postMergeRegionsCommitAction,
 postModifyNamespace,
 postMove,
 postMoveServers,
 postMoveServersAndTables,
 postMoveTables,
 postRecommissionRegionServer,
 postRegionOffline,
 postRemoveReplicationPeer,
 postRemoveRSGroup,
 postRemoveServers,
 postRequestLock,
 postRestoreSnapshot,
 postRollBackMergeRegionsAction,
 postRollBackSplitRegionAction,
 postSetNamespaceQuota,
 postSetSplitOrMergeEnabled,
 postSetTableQuota,
 postSetUserQuota,
 postSetUserQuota,
 postSetUserQuota,
 postSnapshot,
 postTableFlush,
 postUnassign,
 postUpdateReplicationPeerConfig,
 preAddRSGroup,
 preBalanceRSGroup,
 preCreateTableAction,
 preDeleteTableAction,
 preDisableTableAction,
 preEnableTableAction,
 preGetClusterMetrics,
 
 preGetTableNames,
 preListNamespaceDescriptors,
 preMasterInitialization,
 preMergeRegionsAction,
 preMergeRegionsCommitAction,
 preModifyTableAction,
 preMoveServers,
 preMoveServersAndTables,
 preMoveTabl
 es, preRemoveRSGroup,
 preRemoveServers,
 preSplitRegionAction,
 preSplitRegionAfterMETAAction,
 preSplitRegionBeforeMETAAction, preTruncateTableAction
+postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 postBalance,
 postBalanceRSGroup, postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 
 postCompletedModifyTableAction, postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTableAction,
 postCreateNamespace,
 postCreateTable,
 postDecommissionRegionServers,
 postDeleteSnapshot,
 postDisableReplicationPeer, postDisableTable,
 postEnableReplicationPeer,
 postEnableTable,
 postGetClusterMetrics,
 postGetLocks,
 postGetNamespaceDescriptor,
 postGetProcedures,
 postGetReplicationPeerConfig,
 postListDecommissionedRegionServers,
 postListReplicationPeers,
 postListSnapshot,
 postLockHeartbeat,
 postMergeRegions,
 postMergeRegionsCommitAction, postModifyNamespace,
 postModifyNamespace,
 postModifyTable,
 postMove,
 postMoveServers,
 postMoveServersAndTables,
 postMoveTables,
 postRecommissionRegionServer, href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#postRegionOffline-org.apache.hadoop.hbase.coprocessor.ObserverContext-org.apache.hadoop.hbase.client.RegionInfo-">postRegionOffline,
 > href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#postRemoveReplicationPeer-org.apache.hadoop.hbase.coprocessor.ObserverContext-java.lang.String-">postRemoveReplicationPeer,
 > href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#postRemoveRSGroup-org.apache.hadoop.hbase.coprocessor.ObserverContext-java.lang.String-">postRemoveRSGroup,
 > href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#postRemoveServers-org.apache.hadoop.hbase.coprocessor.ObserverContext-java.util.Set-">postRemoveServers,
 > href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#postRequestLock-org.apache.hadoop.hbase.coprocessor.ObserverC
 
ontext-java.lang.String-org.apache.hadoop.hbase.TableName-org.apache.hadoop.hbase.client.RegionInfo:A-java.lang.String-">postRequestLock,
 postRestoreSnapshot,
 postRollBackMergeRegionsAction,
 postRollBackSplitRegionAction,
 postSetNamespaceQuota,
 postSetSplitOrMergeEnabled,
 postSetTableQuota,
 postSetUserQuota,
 postSetUserQuota,
 postSetUserQuota,
 postSnapshot,
 postTableFlush,
 postUnassign,
 postUpdateReplicationPeerConfig,
 preAddRSGroup,
 preBalanceRSGroup,
 preCreateTableAction, 

[16/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.html 
b/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.html
index 3fd4061..188d432 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.html
@@ -785,7 +785,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 runIncrementalPELoad
-privatevoidrunIncrementalPELoad(org.apache.hadoop.conf.Configurationconf,
+privatevoidrunIncrementalPELoad(org.apache.hadoop.conf.Configurationconf,
   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.hbase.mapreduce.HFileOutputFormat2.TableInfotableInfo,
   org.apache.hadoop.fs.PathoutDir,
   booleanputSortReducer)
@@ -806,7 +806,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 testSerializeDeserializeFamilyCompressionMap
-publicvoidtestSerializeDeserializeFamilyCompressionMap()
+publicvoidtestSerializeDeserializeFamilyCompressionMap()
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Test for 
HFileOutputFormat2#configureCompression(Configuration, 
HTableDescriptor) and
  HFileOutputFormat2.createFamilyCompressionMap(Configuration).
@@ -824,7 +824,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 setupMockColumnFamiliesForCompression
-privatevoidsetupMockColumnFamiliesForCompression(org.apache.hadoop.hbase.client.Tabletable,
+privatevoidsetupMockColumnFamiliesForCompression(org.apache.hadoop.hbase.client.Tabletable,
https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">String,org.apache.hadoop.hbase.io.compress.Compression.AlgorithmfamilyToCompression)
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
@@ -839,7 +839,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getMockColumnFamiliesForCompression
-privatehttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">String,org.apache.hadoop.hbase.io.compress.Compression.AlgorithmgetMockColumnFamiliesForCompression(intnumCfs)
+privatehttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">String,org.apache.hadoop.hbase.io.compress.Compression.AlgorithmgetMockColumnFamiliesForCompression(intnumCfs)
 
 Returns:
 a map from column family names to compression algorithms for
@@ -853,7 +853,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 testSerializeDeserializeFamilyBloomTypeMap
-publicvoidtestSerializeDeserializeFamilyBloomTypeMap()
+publicvoidtestSerializeDeserializeFamilyBloomTypeMap()
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Test for 
HFileOutputFormat2#configureBloomType(HTableDescriptor, 
Configuration) and
  HFileOutputFormat2.createFamilyBloomTypeMap(Configuration).
@@ -871,7 +871,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 setupMockColumnFamiliesForBloomType
-privatevoidsetupMockColumnFamiliesForBloomType(org.apache.hadoop.hbase.client.Tabletable,
+privatevoidsetupMockColumnFamiliesForBloomType(org.apache.hadoop.hbase.client.Tabletable,
  https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">String,org.apache.hadoop.hbase.regionserver.BloomTypefamilyToDataBlockEncoding)
   throws 

[50/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html 
b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
index 5404ea1..1812a55 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
@@ -35,901 +35,908 @@
 027import java.net.InetSocketAddress;
 028import java.net.URLDecoder;
 029import java.net.URLEncoder;
-030import java.util.ArrayList;
-031import java.util.Arrays;
-032import java.util.List;
-033import java.util.Map;
-034import java.util.Set;
-035import java.util.TreeMap;
-036import java.util.TreeSet;
-037import java.util.UUID;
-038import java.util.function.Function;
-039import java.util.stream.Collectors;
-040
-041import 
org.apache.commons.lang3.StringUtils;
-042import 
org.apache.hadoop.conf.Configuration;
-043import org.apache.hadoop.fs.FileSystem;
-044import org.apache.hadoop.fs.Path;
-045import org.apache.hadoop.hbase.Cell;
-046import 
org.apache.hadoop.hbase.CellComparator;
-047import 
org.apache.hadoop.hbase.CellUtil;
-048import 
org.apache.hadoop.hbase.HConstants;
-049import 
org.apache.hadoop.hbase.HRegionLocation;
-050import 
org.apache.hadoop.hbase.HTableDescriptor;
-051import 
org.apache.hadoop.hbase.KeyValue;
-052import 
org.apache.hadoop.hbase.PrivateCellUtil;
-053import 
org.apache.hadoop.hbase.TableName;
-054import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-055import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-056import 
org.apache.hadoop.hbase.client.Connection;
-057import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-058import 
org.apache.hadoop.hbase.client.Put;
-059import 
org.apache.hadoop.hbase.client.RegionLocator;
-060import 
org.apache.hadoop.hbase.client.Table;
-061import 
org.apache.hadoop.hbase.client.TableDescriptor;
-062import 
org.apache.hadoop.hbase.fs.HFileSystem;
-063import 
org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-064import 
org.apache.hadoop.hbase.io.compress.Compression;
-065import 
org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
-066import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-067import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-068import 
org.apache.hadoop.hbase.io.hfile.HFile;
-069import 
org.apache.hadoop.hbase.io.hfile.HFileContext;
-070import 
org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-071import 
org.apache.hadoop.hbase.io.hfile.HFileWriterImpl;
-072import 
org.apache.hadoop.hbase.regionserver.BloomType;
-073import 
org.apache.hadoop.hbase.regionserver.HStore;
-074import 
org.apache.hadoop.hbase.regionserver.StoreFileWriter;
-075import 
org.apache.hadoop.hbase.util.Bytes;
-076import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-077import 
org.apache.hadoop.hbase.util.FSUtils;
-078import 
org.apache.hadoop.hbase.util.MapReduceExtendedCell;
-079import 
org.apache.hadoop.io.NullWritable;
-080import 
org.apache.hadoop.io.SequenceFile;
-081import org.apache.hadoop.io.Text;
-082import org.apache.hadoop.mapreduce.Job;
-083import 
org.apache.hadoop.mapreduce.OutputCommitter;
-084import 
org.apache.hadoop.mapreduce.OutputFormat;
-085import 
org.apache.hadoop.mapreduce.RecordWriter;
-086import 
org.apache.hadoop.mapreduce.TaskAttemptContext;
-087import 
org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
-088import 
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-089import 
org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;
-090import 
org.apache.yetus.audience.InterfaceAudience;
-091import org.slf4j.Logger;
-092import org.slf4j.LoggerFactory;
-093
-094import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-095
-096/**
-097 * Writes HFiles. Passed Cells must 
arrive in order.
-098 * Writes current time as the sequence id 
for the file. Sets the major compacted
-099 * attribute on created @{link {@link 
HFile}s. Calling write(null,null) will forcibly roll
-100 * all HFiles being written.
-101 * p
-102 * Using this class as part of a 
MapReduce job is best done
-103 * using {@link 
#configureIncrementalLoad(Job, TableDescriptor, RegionLocator)}.
-104 */
-105@InterfaceAudience.Public
-106public class HFileOutputFormat2
-107extends 
FileOutputFormatImmutableBytesWritable, Cell {
-108  private static final Logger LOG = 
LoggerFactory.getLogger(HFileOutputFormat2.class);
-109  static class TableInfo {
-110private TableDescriptor 
tableDesctiptor;
-111private RegionLocator 
regionLocator;
-112
-113public TableInfo(TableDescriptor 
tableDesctiptor, RegionLocator regionLocator) {
-114  this.tableDesctiptor = 
tableDesctiptor;
-115  this.regionLocator = 
regionLocator;
-116}
-117
-118/**
-119 * The 

[27/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
index a97dfdc..2b1b6c6 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
@@ -2370,1287 +2370,1292 @@
 2362  }
 2363
 2364  @Override
-2365  public long modifyTable(final 
TableName tableName, final TableDescriptor descriptor,
+2365  public long modifyTable(final 
TableName tableName, final TableDescriptor newDescriptor,
 2366  final long nonceGroup, final long 
nonce) throws IOException {
 2367checkInitialized();
-2368
sanityCheckTableDescriptor(descriptor);
+2368
sanityCheckTableDescriptor(newDescriptor);
 2369
 2370return 
MasterProcedureUtil.submitProcedure(
 2371new 
MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
 2372  @Override
 2373  protected void run() throws 
IOException {
-2374
getMaster().getMasterCoprocessorHost().preModifyTable(tableName, descriptor);
-2375
-2376
LOG.info(getClientIdAuditPrefix() + " modify " + tableName);
+2374TableDescriptor oldDescriptor = 
getMaster().getTableDescriptors().get(tableName);
+2375
getMaster().getMasterCoprocessorHost()
+2376  .preModifyTable(tableName, 
oldDescriptor, newDescriptor);
 2377
-2378// Execute the operation 
synchronously - wait for the operation completes before continuing.
-2379//
-2380// We need to wait for the 
procedure to potentially fail due to "prepare" sanity
-2381// checks. This will block only 
the beginning of the procedure. See HBASE-19953.
-2382ProcedurePrepareLatch latch = 
ProcedurePrepareLatch.createBlockingLatch();
-2383submitProcedure(new 
ModifyTableProcedure(procedureExecutor.getEnvironment(),
-2384descriptor, latch));
-2385latch.await();
-2386
-2387
getMaster().getMasterCoprocessorHost().postModifyTable(tableName, 
descriptor);
-2388  }
-2389
-2390  @Override
-2391  protected String getDescription() 
{
-2392return "ModifyTableProcedure";
-2393  }
-2394});
-2395  }
-2396
-2397  public long restoreSnapshot(final 
SnapshotDescription snapshotDesc,
-2398  final long nonceGroup, final long 
nonce, final boolean restoreAcl) throws IOException {
-2399checkInitialized();
-2400
getSnapshotManager().checkSnapshotSupport();
-2401
-2402// Ensure namespace exists. Will 
throw exception if non-known NS.
-2403final TableName dstTable = 
TableName.valueOf(snapshotDesc.getTable());
-2404
getClusterSchema().getNamespace(dstTable.getNamespaceAsString());
-2405
-2406return 
MasterProcedureUtil.submitProcedure(
-2407new 
MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
-2408  @Override
-2409  protected void run() throws 
IOException {
-2410  setProcId(
-2411
getSnapshotManager().restoreOrCloneSnapshot(snapshotDesc, getNonceKey(), 
restoreAcl));
-2412  }
-2413
-2414  @Override
-2415  protected String getDescription() 
{
-2416return 
"RestoreSnapshotProcedure";
-2417  }
-2418});
-2419  }
-2420
-2421  private void checkTableExists(final 
TableName tableName)
-2422  throws IOException, 
TableNotFoundException {
-2423if 
(!MetaTableAccessor.tableExists(getConnection(), tableName)) {
-2424  throw new 
TableNotFoundException(tableName);
-2425}
-2426  }
-2427
-2428  @Override
-2429  public void checkTableModifiable(final 
TableName tableName)
-2430  throws IOException, 
TableNotFoundException, TableNotDisabledException {
-2431if (isCatalogTable(tableName)) {
-2432  throw new IOException("Can't 
modify catalog tables");
-2433}
-2434checkTableExists(tableName);
-2435TableState ts = 
getTableStateManager().getTableState(tableName);
-2436if (!ts.isDisabled()) {
-2437  throw new 
TableNotDisabledException("Not DISABLED; " + ts);
-2438}
-2439  }
-2440
-2441  public ClusterMetrics 
getClusterMetricsWithoutCoprocessor() throws InterruptedIOException {
-2442return 
getClusterMetricsWithoutCoprocessor(EnumSet.allOf(Option.class));
-2443  }
-2444
-2445  public ClusterMetrics 
getClusterMetricsWithoutCoprocessor(EnumSetOption options)
-2446  throws InterruptedIOException {
-2447ClusterMetricsBuilder builder = 
ClusterMetricsBuilder.newBuilder();
-2448// given that hbase1 can't submit 
the request with Option,
-2449// we return all information to 
client if the list of Option is empty.
-2450if (options.isEmpty()) {
-2451  options = 
EnumSet.allOf(Option.class);
-2452}
-2453
-2454for (Option opt : options) {
-2455  switch (opt) {

[18/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.html 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.html
index 0653ad2..edce544 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.html
@@ -386,26 +386,30 @@ implements org.apache.hadoop.util.Tool
 ONE_GB
 
 
+(package private) static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+PE_COMMAND_SHORTNAME
+
+
 private static org.apache.hadoop.fs.Path
 PERF_EVAL_DIR
 
-
+
 (package private) static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 RANDOM_READ
 
-
+
 (package private) static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 RANDOM_SEEK_SCAN
 
-
+
 static int
 ROW_LENGTH
 
-
+
 static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 TABLE_NAME
 
-
+
 private static int
 TAG_LENGTH
 
@@ -545,7 +549,7 @@ implements org.apache.hadoop.util.Tool
 
 
 protected static void
-printUsage(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringclassName,
+printUsage(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringshortName,
   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringmessage)
 
 
@@ -644,13 +648,26 @@ implements org.apache.hadoop.util.Tool
 
 
 
+
+
+
+
+
+PE_COMMAND_SHORTNAME
+static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String PE_COMMAND_SHORTNAME
+
+See Also:
+Constant
 Field Values
+
+
+
 
 
 
 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -659,7 +676,7 @@ implements org.apache.hadoop.util.Tool
 
 
 MAPPER
-private static finalcom.fasterxml.jackson.databind.ObjectMapper MAPPER
+private static finalcom.fasterxml.jackson.databind.ObjectMapper MAPPER
 
 
 
@@ -668,7 +685,7 @@ implements org.apache.hadoop.util.Tool
 
 
 TABLE_NAME
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String TABLE_NAME
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String TABLE_NAME
 
 See Also:
 Constant
 Field Values
@@ -681,7 +698,7 @@ implements org.apache.hadoop.util.Tool
 
 
 FAMILY_NAME_BASE
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FAMILY_NAME_BASE
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FAMILY_NAME_BASE
 
 See Also:
 Constant
 Field Values
@@ -694,7 +711,7 @@ implements org.apache.hadoop.util.Tool
 
 
 FAMILY_ZERO
-public static finalbyte[] FAMILY_ZERO
+public static finalbyte[] FAMILY_ZERO
 
 
 
@@ -703,7 +720,7 @@ implements org.apache.hadoop.util.Tool
 
 
 COLUMN_ZERO
-public static finalbyte[] COLUMN_ZERO
+public static finalbyte[] COLUMN_ZERO
 
 
 
@@ -712,7 +729,7 @@ implements org.apache.hadoop.util.Tool
 
 
 DEFAULT_VALUE_LENGTH
-public static finalint DEFAULT_VALUE_LENGTH
+public static finalint DEFAULT_VALUE_LENGTH
 
 See Also:
 Constant
 Field Values
@@ -725,7 +742,7 @@ implements org.apache.hadoop.util.Tool
 
 
 ROW_LENGTH
-public static finalint ROW_LENGTH
+public static finalint ROW_LENGTH
 
 See Also:
 Constant
 Field Values
@@ -738,7 +755,7 @@ implements org.apache.hadoop.util.Tool
 
 
 ONE_GB
-private static finalint ONE_GB
+private static finalint ONE_GB
 
 See Also:
 Constant
 Field Values
@@ -751,7 +768,7 @@ implements org.apache.hadoop.util.Tool
 
 
 DEFAULT_ROWS_PER_GB
-private static finalint DEFAULT_ROWS_PER_GB
+private static finalint DEFAULT_ROWS_PER_GB
 
 See Also:
 Constant
 Field Values
@@ -764,7 +781,7 @@ implements org.apache.hadoop.util.Tool
 
 
 TAG_LENGTH
-private static finalint TAG_LENGTH
+private static finalint TAG_LENGTH
 
 See Also:
 Constant
 Field Values
@@ -777,7 +794,7 @@ implements org.apache.hadoop.util.Tool
 
 
 FMT
-private static finalhttps://docs.oracle.com/javase/8/docs/api/java/text/DecimalFormat.html?is-external=true;
 title="class or interface in java.text">DecimalFormat FMT

[35/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/MasterObserver.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/MasterObserver.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/MasterObserver.html
index c8405ee..7c87df2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/MasterObserver.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/MasterObserver.html
@@ -217,1117 +217,1215 @@
 209   * table RPC call.
 210   * @param ctx the environment to 
interact with the framework and master
 211   * @param tableName the name of the 
table
-212   * @param htd the TableDescriptor
-213   */
-214  default void preModifyTable(final 
ObserverContextMasterCoprocessorEnvironment ctx,
-215  final TableName tableName, 
TableDescriptor htd) throws IOException {}
-216
-217  /**
-218   * Called after the modifyTable 
operation has been requested.  Called as part
-219   * of modify table RPC call.
-220   * @param ctx the environment to 
interact with the framework and master
-221   * @param tableName the name of the 
table
-222   * @param htd the TableDescriptor
-223   */
-224  default void postModifyTable(final 
ObserverContextMasterCoprocessorEnvironment ctx,
-225  final TableName tableName, 
TableDescriptor htd) throws IOException {}
-226
-227  /**
-228   * Called prior to modifying a table's 
properties.  Called as part of modify
-229   * table procedure and it is async to 
the modify table RPC call.
-230   *
-231   * @param ctx the environment to 
interact with the framework and master
-232   * @param tableName the name of the 
table
-233   * @param htd the TableDescriptor
-234   */
-235  default void preModifyTableAction(
-236  final 
ObserverContextMasterCoprocessorEnvironment ctx,
-237  final TableName tableName,
-238  final TableDescriptor htd) throws 
IOException {}
-239
-240  /**
-241   * Called after to modifying a table's 
properties.  Called as part of modify
-242   * table procedure and it is async to 
the modify table RPC call.
-243   *
-244   * @param ctx the environment to 
interact with the framework and master
-245   * @param tableName the name of the 
table
-246   * @param htd the TableDescriptor
-247   */
-248  default void 
postCompletedModifyTableAction(
-249  final 
ObserverContextMasterCoprocessorEnvironment ctx,
-250  final TableName tableName,
-251  final TableDescriptor htd) throws 
IOException {}
-252
-253  /**
-254   * Called prior to enabling a table.  
Called as part of enable table RPC call.
-255   * @param ctx the environment to 
interact with the framework and master
-256   * @param tableName the name of the 
table
-257   */
-258  default void preEnableTable(final 
ObserverContextMasterCoprocessorEnvironment ctx,
-259  final TableName tableName) throws 
IOException {}
-260
-261  /**
-262   * Called after the enableTable 
operation has been requested.  Called as part
-263   * of enable table RPC call.
-264   * @param ctx the environment to 
interact with the framework and master
-265   * @param tableName the name of the 
table
-266   */
-267  default void postEnableTable(final 
ObserverContextMasterCoprocessorEnvironment ctx,
-268  final TableName tableName) throws 
IOException {}
-269
-270  /**
-271   * Called prior to enabling a table.  
Called as part of enable table procedure
-272   * and it is async to the enable table 
RPC call.
-273   *
-274   * @param ctx the environment to 
interact with the framework and master
-275   * @param tableName the name of the 
table
-276   */
-277  default void preEnableTableAction(
-278  final 
ObserverContextMasterCoprocessorEnvironment ctx,
-279  final TableName tableName) throws 
IOException {}
-280
-281  /**
-282   * Called after the enableTable 
operation has been requested.  Called as part
-283   * of enable table procedure and it is 
async to the enable table RPC call.
-284   *
-285   * @param ctx the environment to 
interact with the framework and master
-286   * @param tableName the name of the 
table
-287   */
-288  default void 
postCompletedEnableTableAction(
-289  final 
ObserverContextMasterCoprocessorEnvironment ctx,
-290  final TableName tableName) throws 
IOException {}
-291
-292  /**
-293   * Called prior to disabling a table.  
Called as part of disable table RPC
-294   * call.
+212   * @param newDescriptor after modify 
operation, table will have this descriptor
+213   * @deprecated Since 2.1. Will be 
removed in 3.0.
+214   */
+215  @Deprecated
+216  default void preModifyTable(final 
ObserverContextMasterCoprocessorEnvironment ctx,
+217final TableName tableName, 
TableDescriptor newDescriptor) throws IOException {}
+218
+219  /**
+220   * Called prior to modifying a table's 
properties.  Called as part of modify
+221   * table RPC call.
+222   * @param ctx the 

[43/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.TableInfo.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.TableInfo.html
 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.TableInfo.html
index 6a10c31..3adb296 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.TableInfo.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.TableInfo.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class HFileOutputFormat2.TableInfo
+static class HFileOutputFormat2.TableInfo
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
 
@@ -218,7 +218,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 tableDesctiptor
-privateTableDescriptor tableDesctiptor
+privateTableDescriptor tableDesctiptor
 
 
 
@@ -227,7 +227,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 regionLocator
-privateRegionLocator regionLocator
+privateRegionLocator regionLocator
 
 
 
@@ -244,7 +244,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 TableInfo
-publicTableInfo(TableDescriptortableDesctiptor,
+publicTableInfo(TableDescriptortableDesctiptor,
  RegionLocatorregionLocator)
 
 
@@ -263,7 +263,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 getHTableDescriptor
 https://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
 title="class or interface in java.lang">@Deprecated
-publicHTableDescriptorgetHTableDescriptor()
+publicHTableDescriptorgetHTableDescriptor()
 Deprecated.use getTableDescriptor()
 The modification for the returned HTD doesn't affect the 
inner TD.
 
@@ -278,7 +278,7 @@ public
 
 getTableDescriptor
-publicTableDescriptorgetTableDescriptor()
+publicTableDescriptorgetTableDescriptor()
 
 
 
@@ -287,7 +287,7 @@ public
 
 getRegionLocator
-publicRegionLocatorgetRegionLocator()
+publicRegionLocatorgetRegionLocator()
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.WriterLength.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.WriterLength.html
 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.WriterLength.html
index a558085..7d66d1c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.WriterLength.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.WriterLength.html
@@ -107,7 +107,7 @@
 
 
 
-static class HFileOutputFormat2.WriterLength
+static class HFileOutputFormat2.WriterLength
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
 
@@ -188,7 +188,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 written
-long written
+long written
 
 
 
@@ -197,7 +197,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 writer
-StoreFileWriter writer
+StoreFileWriter writer
 
 
 
@@ -214,7 +214,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 WriterLength
-WriterLength()
+WriterLength()
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
index e793e5d..d234a60 100644
--- a/devapidocs/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
+++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
@@ -124,7 +124,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class HFileOutputFormat2
+public class HFileOutputFormat2
 extends org.apache.hadoop.mapreduce.lib.output.FileOutputFormatImmutableBytesWritable,Cell
 Writes HFiles. Passed Cells must arrive in order.
  Writes current time as the sequence id for the file. Sets the major compacted
@@ -457,7 +457,7 @@ extends 
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -466,7 +466,7 @@ extends 
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
 
 tableSeparator
-protected static finalbyte[] tableSeparator
+protected static finalbyte[] tableSeparator
 
 
 
@@ -475,7 +475,7 @@ extends 
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
 
 COMPRESSION_FAMILIES_CONF_KEY
-static 

[21/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html
index 4639252..f54d04b 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-abstract static class PerformanceEvaluation.AsyncTest
+abstract static class PerformanceEvaluation.AsyncTest
 extends PerformanceEvaluation.TestBase
 
 
@@ -230,7 +230,7 @@ extends 
 
 connection
-protectedorg.apache.hadoop.hbase.client.AsyncConnection connection
+protectedorg.apache.hadoop.hbase.client.AsyncConnection connection
 
 
 
@@ -247,7 +247,7 @@ extends 
 
 AsyncTest
-AsyncTest(org.apache.hadoop.hbase.client.AsyncConnectioncon,
+AsyncTest(org.apache.hadoop.hbase.client.AsyncConnectioncon,
   PerformanceEvaluation.TestOptionsoptions,
   PerformanceEvaluation.Statusstatus)
 
@@ -266,7 +266,7 @@ extends 
 
 createConnection
-voidcreateConnection()
+voidcreateConnection()
 
 Specified by:
 createConnectionin
 classPerformanceEvaluation.TestBase
@@ -279,7 +279,7 @@ extends 
 
 closeConnection
-voidcloseConnection()
+voidcloseConnection()
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
index a75ee34..ee238e0 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-abstract static class PerformanceEvaluation.BufferedMutatorTest
+abstract static class PerformanceEvaluation.BufferedMutatorTest
 extends PerformanceEvaluation.Test
 
 
@@ -253,7 +253,7 @@ extends 
 
 mutator
-protectedorg.apache.hadoop.hbase.client.BufferedMutator mutator
+protectedorg.apache.hadoop.hbase.client.BufferedMutator mutator
 
 
 
@@ -262,7 +262,7 @@ extends 
 
 table
-protectedorg.apache.hadoop.hbase.client.Table table
+protectedorg.apache.hadoop.hbase.client.Table table
 
 
 
@@ -279,7 +279,7 @@ extends 
 
 BufferedMutatorTest
-BufferedMutatorTest(org.apache.hadoop.hbase.client.Connectioncon,
+BufferedMutatorTest(org.apache.hadoop.hbase.client.Connectioncon,
 PerformanceEvaluation.TestOptionsoptions,
 PerformanceEvaluation.Statusstatus)
 
@@ -298,7 +298,7 @@ extends 
 
 onStartup
-voidonStartup()
+voidonStartup()
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:
@@ -314,7 +314,7 @@ extends 
 
 onTakedown
-voidonTakedown()
+voidonTakedown()
  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
index 217e0da..ae27398 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
@@ -132,7 +132,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-abstract static class PerformanceEvaluation.CASTableTest
+abstract static class PerformanceEvaluation.CASTableTest
 extends PerformanceEvaluation.TableTest
 Base class for operations that are CAS-like; that read a 
value and then set it based off what
  they read. In this category is increment, append, checkAndPut, etc.
@@ -278,7 +278,7 @@ extends 
 
 qualifier
-private finalbyte[] qualifier
+private finalbyte[] qualifier
 
 
 
@@ -295,7 +295,7 @@ extends 
 
 CASTableTest
-CASTableTest(org.apache.hadoop.hbase.client.Connectioncon,
+CASTableTest(org.apache.hadoop.hbase.client.Connectioncon,
  PerformanceEvaluation.TestOptionsoptions,

[42/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
index 52e5608..9412c02 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
@@ -1215,7 +1215,7 @@ implements 
 (package private) long
-modifyNamespace(NamespaceDescriptornamespaceDescriptor,
+modifyNamespace(NamespaceDescriptornewNsDescriptor,
longnonceGroup,
longnonce)
 Modify an existing Namespace.
@@ -1224,7 +1224,7 @@ implements 
 long
 modifyTable(TableNametableName,
-   TableDescriptordescriptor,
+   TableDescriptornewDescriptor,
longnonceGroup,
longnonce)
 Modify the descriptor of an existing table
@@ -3258,7 +3258,7 @@ implements 
 modifyTable
 publiclongmodifyTable(TableNametableName,
-TableDescriptordescriptor,
+TableDescriptornewDescriptor,
 longnonceGroup,
 longnonce)
  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -3269,7 +3269,7 @@ implements modifyTablein
 interfaceMasterServices
 Parameters:
 tableName - The table name
-descriptor - The updated table descriptor
+newDescriptor - The updated table descriptor
 Throws:
 https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
@@ -3281,7 +3281,7 @@ implements 
 
 restoreSnapshot
-publiclongrestoreSnapshot(org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescriptionsnapshotDesc,
+publiclongrestoreSnapshot(org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescriptionsnapshotDesc,
 longnonceGroup,
 longnonce,
 booleanrestoreAcl)
@@ -3298,7 +3298,7 @@ implements 
 
 checkTableExists
-privatevoidcheckTableExists(TableNametableName)
+privatevoidcheckTableExists(TableNametableName)
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException,
   TableNotFoundException
 
@@ -3314,7 +3314,7 @@ implements 
 
 checkTableModifiable
-publicvoidcheckTableModifiable(TableNametableName)
+publicvoidcheckTableModifiable(TableNametableName)
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException,
  TableNotFoundException,
  TableNotDisabledException
@@ -3338,7 +3338,7 @@ implements 
 
 getClusterMetricsWithoutCoprocessor
-publicClusterMetricsgetClusterMetricsWithoutCoprocessor()
+publicClusterMetricsgetClusterMetricsWithoutCoprocessor()
throws https://docs.oracle.com/javase/8/docs/api/java/io/InterruptedIOException.html?is-external=true;
 title="class or interface in java.io">InterruptedIOException
 
 Throws:
@@ -3352,7 +3352,7 @@ implements 
 
 getClusterMetricsWithoutCoprocessor
-publicClusterMetricsgetClusterMetricsWithoutCoprocessor(https://docs.oracle.com/javase/8/docs/api/java/util/EnumSet.html?is-external=true;
 title="class or interface in java.util">EnumSetClusterMetrics.Optionoptions)
+publicClusterMetricsgetClusterMetricsWithoutCoprocessor(https://docs.oracle.com/javase/8/docs/api/java/util/EnumSet.html?is-external=true;
 title="class or interface in java.util">EnumSetClusterMetrics.Optionoptions)
throws https://docs.oracle.com/javase/8/docs/api/java/io/InterruptedIOException.html?is-external=true;
 title="class or interface in java.io">InterruptedIOException
 
 Throws:
@@ -3366,7 +3366,7 @@ implements 
 
 getClusterMetrics
-publicClusterMetricsgetClusterMetrics()
+publicClusterMetricsgetClusterMetrics()
  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Returns:
@@ -3382,7 +3382,7 @@ implements 
 
 getClusterMetrics
-publicClusterMetricsgetClusterMetrics(https://docs.oracle.com/javase/8/docs/api/java/util/EnumSet.html?is-external=true;
 title="class or interface in java.util">EnumSetClusterMetrics.Optionoptions)
+publicClusterMetricsgetClusterMetrics(https://docs.oracle.com/javase/8/docs/api/java/util/EnumSet.html?is-external=true;
 title="class or interface in java.util">EnumSetClusterMetrics.Optionoptions)
  

[34/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/src-html/org/apache/hadoop/hbase/http/log/LogLevel.Servlet.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/http/log/LogLevel.Servlet.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/http/log/LogLevel.Servlet.html
index a7a3aed..4f83768 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/http/log/LogLevel.Servlet.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/http/log/LogLevel.Servlet.html
@@ -113,76 +113,94 @@
 105  response)) {
 106return;
 107  }
-108
-109  PrintWriter out = 
ServletUtil.initHTML(response, "Log Level");
-110  String logName = 
ServletUtil.getParameter(request, "log");
-111  String level = 
ServletUtil.getParameter(request, "level");
-112
-113  if (logName != null) {
-114out.println("br /hr 
/h3Results/h3");
-115out.println(MARKER
-116+ "Submitted Log Name: 
b" + logName + "/bbr /");
-117
-118Logger log = 
LoggerFactory.getLogger(logName);
+108  
response.setContentType("text/html");
+109  String requestedURL = 
"header.jsp?pageTitle=Log Level";
+110  
request.getRequestDispatcher(requestedURL).include(request, response);
+111  PrintWriter out = 
response.getWriter();
+112  out.println(FORMS);
+113
+114  String logName = 
ServletUtil.getParameter(request, "log");
+115  String level = 
ServletUtil.getParameter(request, "level");
+116
+117  if (logName != null) {
+118
out.println("pResults:/p");
 119out.println(MARKER
-120+ "Log Class: b" + 
log.getClass().getName() +"/bbr /");
-121if (level != null) {
-122  out.println(MARKER + "Submitted 
Level: b" + level + "/bbr /");
-123}
-124
-125if (log instanceof Log4JLogger) 
{
-126  
process(((Log4JLogger)log).getLogger(), level, out);
-127} else if (log instanceof 
Jdk14Logger) {
-128  
process(((Jdk14Logger)log).getLogger(), level, out);
-129} else if (log instanceof 
Log4jLoggerAdapter) {
-130  
process(LogManager.getLogger(logName), level, out);
-131} else {
-132  out.println("Sorry, " + 
log.getClass() + " not supported.br /");
-133}
-134  }
-135
-136  out.println(FORMS);
-137  
out.println(ServletUtil.HTML_TAIL);
-138}
-139
-140static final String FORMS = "\nbr 
/hr /h3Get / Set/h3"
-141+ "\nformLog: input 
type='text' size='50' name='log' / "
-142+ "input type='submit' 
value='Get Log Level' /"
-143+ "/form"
-144+ "\nformLog: input 
type='text' size='50' name='log' / "
-145+ "Level: input type='text' 
name='level' / "
-146+ "input type='submit' 
value='Set Log Level' /"
-147+ "/form";
-148
-149private static void 
process(org.apache.log4j.Logger log, String level,
-150PrintWriter out) throws 
IOException {
-151  if (level != null) {
-152if 
(!level.equals(org.apache.log4j.Level.toLevel(level).toString())) {
-153  out.println(MARKER + "Bad level 
: b" + level + "/bbr /");
-154} else {
-155  
log.setLevel(org.apache.log4j.Level.toLevel(level));
-156  out.println(MARKER + "Setting 
Level to " + level + " ...br /");
-157}
-158  }
-159  out.println(MARKER
-160  + "Effective level: b" 
+ log.getEffectiveLevel() + "/bbr /");
-161}
-162
-163private static void 
process(java.util.logging.Logger log, String level,
-164PrintWriter out) throws 
IOException {
-165  if (level != null) {
-166
log.setLevel(java.util.logging.Level.parse(level));
-167out.println(MARKER + "Setting 
Level to " + level + " ...br /");
-168  }
-169
-170  java.util.logging.Level lev;
-171  for(; (lev = log.getLevel()) == 
null; log = log.getParent());
-172  out.println(MARKER + "Effective 
level: b" + lev + "/bbr /");
-173}
-174  }
-175
-176  private LogLevel() {}
-177}
+120+ "Submitted Log Name: 
b" + logName + "/bbr /");
+121
+122Logger log = 
LoggerFactory.getLogger(logName);
+123out.println(MARKER
+124+ "Log Class: b" + 
log.getClass().getName() +"/bbr /");
+125if (level != null) {
+126  out.println(MARKER + "Submitted 
Level: b" + level + "/bbr /");
+127}
+128
+129if (log instanceof Log4JLogger) 
{
+130  
process(((Log4JLogger)log).getLogger(), level, out);
+131} else if (log instanceof 
Jdk14Logger) {
+132  
process(((Jdk14Logger)log).getLogger(), level, out);
+133} else if (log instanceof 
Log4jLoggerAdapter) {
+134  
process(LogManager.getLogger(logName), level, out);
+135} else {
+136  out.println("Sorry, " + 
log.getClass() + " not supported.br /");
+137}
+138  }
+139  out.println("/div");
+140  

[36/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
 
b/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
index 501bf01..2bfa577 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
@@ -539,7 +539,7 @@ implements MasterObserver
-postAbortProcedure,
 postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 postBalance, postBalanceRSGroup,
 postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedCreateTableAction,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTableAction, postCreateNamespace,
 postCreateTable,
 postDecommissionRegionServers,
 postDeleteNamespace,
 postDeleteSnapshot,
 postDeleteTable,
 postDisableReplicationPeer,
 postDisableTable,
 postEnableReplicationPeer, postEnableTable,
 postGetClusterMetrics,
 postGetLocks,
 postGetNamespaceDescriptor,
 postGetProcedures, postGetReplicationPeerConfig,
 postGetTableDescriptors,
 postGetTableNames,
 postListDecommissionedRegionServers,
 postListNamespaceDescriptors,
 postListReplicationPeers,
 postListSnapshot,
 postLockHeartbeat,
 postMergeRegions,
 postMergeRegionsCommitAction,
 postModifyNamespace,
 postModifyTable,
 postMove, postMoveServers,
 postMoveServersAndTables,
 postMoveTables,
 postRecommissionRegionServer,
 postRegionOffline,
 postRemoveReplicationPeer,
 postRemoveRSGroup,
 postRemoveServers,
 postRequestLock, postRestoreSnapshot,
 postRollBackMergeRegionsAction,
 postRollBackSplitRegionAction,
 
 postSetNamespaceQuota, postSetTableQuota,
 postSetUserQuota,
 postSetUserQuota,
 postSetUserQuota, postSnapshot,
 postTableFlush,
 postTruncateTable,
 postUnassign,
 postUpdateReplicationPeerConfig,
 preAbortProcedure,
 preAddReplicationPeer,
 preAddRSGroup,
 preAssign,
 preBalance,
 preBalanceRSGroup,
 preBalanceSwitch,
 preClearDeadServers,
 preCloneSnapshot,
 preCreateNamespace,
 preCreateTable,
 preCreateTableAction,
 preDecommissionRegionServers,
 preDeleteNamespace,
 preDeleteSnapshot,
 preDeleteTable,
 preDeleteTableAction,
 preDisableReplicationPeer,
 preDisableTableAction,
 preEnableReplicationPeer,
 preEnableTable,
 preEnableTableAc
 tion, preGetClusterMetrics,
 preGetLocks,
 preGetNamespaceDescriptor,
 preGetProcedures,
 preGetReplicationPeerConfig,
 preGetTableDescriptors,
 preGetTableNames,
 preListDecommissionedRegionServers,
 preListNamespaceDescriptors,
 preListReplicationPeers, preListSnapshot,
 preLockHeartbeat,
 preMasterInitialization,
 preMergeRegions,
 preMergeRegionsAction,
 preMergeRegionsCommitAction,
 preModifyNamespace,
 preModifyTableAction,
 preMove,
 preMoveServers,
 preMoveServersAndTables,
 preMoveTables,
 preRecommissionRegionServer, preRegionOffline,
 preRemoveReplicationPeer,
 preRemoveRSGroup,
 preRemoveServers,
 preRequestLock,
 preRestoreSnapshot,
 preSetNamespaceQuota,
 preSetSplitOrMergeEnabled,
 preSetTableQuota,
 preSetUserQuota,
 preSetUserQuota,
 preSetUserQuota,
 preShutdown,
 preSnapshot,
 preSplitRegion,
 preSplitRegionAction,
 preSplitRegionAfterMETAAction, preSplitRegionBeforeMETAAction,
 preStopMaster,
 preTableFlush,
 preTruncateTable,
 preTruncateTableAction, preUnassign,
 preUpdateReplicationPeerConfig
+postAbortProcedure,
 postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 postBalance, postBalanceRSGroup,
 postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedCreateTableAction,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 postCompletedModifyTableAction,
 postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTableAction,
 postCreateNamespace,
 postCreateTable,
 postDecommissionRegionServers,
 postDeleteNamespace,
 postDeleteSnapshot,
 postDeleteTable,
 postDisableReplicationPeer,
 postDisableTable,
 postEnableReplicationPeer,
 postEnableTable,
 postGetClusterMetrics,
 postGetLocks,
 postGetNamespaceDescriptor,
 postGetProcedures,
 postGetReplicationPeerConfig,
 postGetTableDescriptors,
 postGetTableNames, postListDecommissionedRegionServers,
 postListNamespaceDescriptors,
 postListReplicationPeers,
 postListSnapshot,
 postLockHeartbeat,
 postMergeRegions,
 postMergeRegionsCommitAction,
 postModifyNamespace,
 postModifyNamespace,
 postModifyTable,
 postModifyTable,
 postMove,
 postMoveServers,
 postMoveServersAndTables,
 postMoveTables,
 postRecommissionRegionServer,
 postRegionOffline, postRemoveReplicationPeer,
 postRemoveRSGroup,
 

[44/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.html
 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.html
index 1cef255..ba037f8 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.html
@@ -288,7 +288,7 @@ implements MasterObserver
-postAbortProcedure,
 postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 postBalance, postBalanceRSGroup,
 postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedCreateTableAction,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTableAction, postCreateNamespace,
 postDecommissionRegionServers,
 postDeleteNamespace,
 postDeleteSnapshot,
 postDeleteTable,
 postDisableReplicationPeer,
 postDisableTable,
 postEnableReplicationPeer,
 postEnableTable,
 postGetClusterMetrics,
 postGetLocks,
 postGetNamespaceDescriptor,
 postGetProcedures,
 postGetReplicationPeer
 Config, postGetTableDescriptors,
 postGetTableNames,
 postListDecommissionedRegionServers,
 postListNamespaceDescriptors,
 postListReplicationPeers,
 postListSnapshot,
 postLockHeartbeat,
 postMergeRegions,
 postMergeRegionsComm
 itAction, postModifyNamespace,
 postModifyTable,
 postMove,
 postMoveServers,
 postMoveServersAndTables,
 postMoveTables,
 postRecommissionRegionServer,
 postRegionOffline,
 postRemoveReplicationPeer,
 postRemoveRSGroup,
 postRemoveServers,
 postRequestLock,
 postRestoreSnapshot, postRollBackMergeRegionsAction,
 postRollBackSplitRegionAction,
 postSetNamespaceQuota,
 postSetSplitOrMerg
 eEnabled, postSetTableQuota,
 postSetUserQuota,
 postSetUserQuota,
 postSetUserQuota, postSnapshot,
 postStartMaster,
 postTableFlush,
 postTruncateTable,
 postUnassign,
 postUpdateReplicationPeerConfig,
 preAbortProcedure,
 preAddReplicationPeer,
 preAddRSGroup, preAssign,
 preBalance,
 preBalanceRSGroup,
 preBalanceSwitch,
 preClearDeadServers,
 preCloneSnapshot,
 preCreateNamespace,
 preCreateTableAction,
 preDecommissionRegionServers,
 preDeleteNamespace,
 preDeleteSnapshot,
 preDeleteTable,
 preDeleteTableAction,
 preDisableReplicationPeer, preDisableTableAction,
 preEnableReplicationPeer,
 preEnableTable,
 preEnableTableAction,
 preGetClusterMetrics,
 preGetLocks,
 preGetNamespaceDescriptor,
 preGetProcedures,
 preGetReplicationPeerConfig,
 preGetTableDescriptors,
 preGetTableNames,
 preListDecommissionedRegionServers,
 preListNamespaceDescriptors,
 preListReplicationPeers,
 preListSnapshot,
 preLockHeartbeat,
 preMasterInitialization,
 preMergeRegions,
 preMergeRegionsAction, preMergeRegionsCommitAction,
 preModifyNamespace,
 preModifyTable,
 preModifyTableAction, preMove,
 preMoveServers,
 preMoveServersAndTables,
 preMoveTables,
 preRecommissionRegionServer,
 preRegionOffline,
 preRemoveReplicationPeer,
 preRemoveRSGroup,
 preRemoveServ
 ers, preRequestLock,
 preRestoreSnapshot,
 preSetNamespaceQuota,
 preSetSplitOrMergeEnabled, preSetTableQuota,
 preSetUserQuota,
 preSetUserQuota,
 preSetUserQuota,
 preShutdown,
 preSnapshot,
 preSplitRegion,
 preSplitRegionAction,
 preSplitRegionAfterMETAAction,
 preSplitRegionBeforeMETAAction,
 preStopMaster,
 preTableFlush,
 preTruncateTable, href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#preTruncateTableAction-org.apache.hadoop.hbase.coprocessor.ObserverContext-org.apache.hadoop.hbase.TableName-">preTruncateTableAction,
 > href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#preUnassign-org.apache.hadoop.hbase.coprocessor.ObserverContext-org.apache.hadoop.hbase.client.RegionInfo-boolean-">preUnassign,
 > href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#preUpdateReplicationPeerConfig-org.apache.hadoop.hbase.coprocessor.ObserverContext-java.lang.String-org.apache.hadoop.hbase.replication.ReplicationPeerConfig-">preUpdateReplicationPeerConfig
+postAbortProcedure,
 postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 postBalance, postBalanceRSGroup,
 postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedCreateTableAction,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 postCompletedModifyTableAction,
 postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTableAction,
 postCreateNamespace,
 postDecommissionRegionServers,
 postDeleteNamespace, postDeleteSnapshot,
 postDeleteTable,
 

[31/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
index 5404ea1..1812a55 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
@@ -35,901 +35,908 @@
 027import java.net.InetSocketAddress;
 028import java.net.URLDecoder;
 029import java.net.URLEncoder;
-030import java.util.ArrayList;
-031import java.util.Arrays;
-032import java.util.List;
-033import java.util.Map;
-034import java.util.Set;
-035import java.util.TreeMap;
-036import java.util.TreeSet;
-037import java.util.UUID;
-038import java.util.function.Function;
-039import java.util.stream.Collectors;
-040
-041import 
org.apache.commons.lang3.StringUtils;
-042import 
org.apache.hadoop.conf.Configuration;
-043import org.apache.hadoop.fs.FileSystem;
-044import org.apache.hadoop.fs.Path;
-045import org.apache.hadoop.hbase.Cell;
-046import 
org.apache.hadoop.hbase.CellComparator;
-047import 
org.apache.hadoop.hbase.CellUtil;
-048import 
org.apache.hadoop.hbase.HConstants;
-049import 
org.apache.hadoop.hbase.HRegionLocation;
-050import 
org.apache.hadoop.hbase.HTableDescriptor;
-051import 
org.apache.hadoop.hbase.KeyValue;
-052import 
org.apache.hadoop.hbase.PrivateCellUtil;
-053import 
org.apache.hadoop.hbase.TableName;
-054import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-055import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-056import 
org.apache.hadoop.hbase.client.Connection;
-057import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-058import 
org.apache.hadoop.hbase.client.Put;
-059import 
org.apache.hadoop.hbase.client.RegionLocator;
-060import 
org.apache.hadoop.hbase.client.Table;
-061import 
org.apache.hadoop.hbase.client.TableDescriptor;
-062import 
org.apache.hadoop.hbase.fs.HFileSystem;
-063import 
org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-064import 
org.apache.hadoop.hbase.io.compress.Compression;
-065import 
org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
-066import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-067import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-068import 
org.apache.hadoop.hbase.io.hfile.HFile;
-069import 
org.apache.hadoop.hbase.io.hfile.HFileContext;
-070import 
org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-071import 
org.apache.hadoop.hbase.io.hfile.HFileWriterImpl;
-072import 
org.apache.hadoop.hbase.regionserver.BloomType;
-073import 
org.apache.hadoop.hbase.regionserver.HStore;
-074import 
org.apache.hadoop.hbase.regionserver.StoreFileWriter;
-075import 
org.apache.hadoop.hbase.util.Bytes;
-076import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-077import 
org.apache.hadoop.hbase.util.FSUtils;
-078import 
org.apache.hadoop.hbase.util.MapReduceExtendedCell;
-079import 
org.apache.hadoop.io.NullWritable;
-080import 
org.apache.hadoop.io.SequenceFile;
-081import org.apache.hadoop.io.Text;
-082import org.apache.hadoop.mapreduce.Job;
-083import 
org.apache.hadoop.mapreduce.OutputCommitter;
-084import 
org.apache.hadoop.mapreduce.OutputFormat;
-085import 
org.apache.hadoop.mapreduce.RecordWriter;
-086import 
org.apache.hadoop.mapreduce.TaskAttemptContext;
-087import 
org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
-088import 
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-089import 
org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;
-090import 
org.apache.yetus.audience.InterfaceAudience;
-091import org.slf4j.Logger;
-092import org.slf4j.LoggerFactory;
-093
-094import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-095
-096/**
-097 * Writes HFiles. Passed Cells must 
arrive in order.
-098 * Writes current time as the sequence id 
for the file. Sets the major compacted
-099 * attribute on created @{link {@link 
HFile}s. Calling write(null,null) will forcibly roll
-100 * all HFiles being written.
-101 * p
-102 * Using this class as part of a 
MapReduce job is best done
-103 * using {@link 
#configureIncrementalLoad(Job, TableDescriptor, RegionLocator)}.
-104 */
-105@InterfaceAudience.Public
-106public class HFileOutputFormat2
-107extends 
FileOutputFormatImmutableBytesWritable, Cell {
-108  private static final Logger LOG = 
LoggerFactory.getLogger(HFileOutputFormat2.class);
-109  static class TableInfo {
-110private TableDescriptor 
tableDesctiptor;
-111private RegionLocator 
regionLocator;
-112
-113public TableInfo(TableDescriptor 
tableDesctiptor, RegionLocator regionLocator) {
-114  this.tableDesctiptor = 
tableDesctiptor;
-115  this.regionLocator = 
regionLocator;
-116}
-117
-118/**

[33/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.TableInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.TableInfo.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.TableInfo.html
index 5404ea1..1812a55 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.TableInfo.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.TableInfo.html
@@ -35,901 +35,908 @@
 027import java.net.InetSocketAddress;
 028import java.net.URLDecoder;
 029import java.net.URLEncoder;
-030import java.util.ArrayList;
-031import java.util.Arrays;
-032import java.util.List;
-033import java.util.Map;
-034import java.util.Set;
-035import java.util.TreeMap;
-036import java.util.TreeSet;
-037import java.util.UUID;
-038import java.util.function.Function;
-039import java.util.stream.Collectors;
-040
-041import 
org.apache.commons.lang3.StringUtils;
-042import 
org.apache.hadoop.conf.Configuration;
-043import org.apache.hadoop.fs.FileSystem;
-044import org.apache.hadoop.fs.Path;
-045import org.apache.hadoop.hbase.Cell;
-046import 
org.apache.hadoop.hbase.CellComparator;
-047import 
org.apache.hadoop.hbase.CellUtil;
-048import 
org.apache.hadoop.hbase.HConstants;
-049import 
org.apache.hadoop.hbase.HRegionLocation;
-050import 
org.apache.hadoop.hbase.HTableDescriptor;
-051import 
org.apache.hadoop.hbase.KeyValue;
-052import 
org.apache.hadoop.hbase.PrivateCellUtil;
-053import 
org.apache.hadoop.hbase.TableName;
-054import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-055import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-056import 
org.apache.hadoop.hbase.client.Connection;
-057import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-058import 
org.apache.hadoop.hbase.client.Put;
-059import 
org.apache.hadoop.hbase.client.RegionLocator;
-060import 
org.apache.hadoop.hbase.client.Table;
-061import 
org.apache.hadoop.hbase.client.TableDescriptor;
-062import 
org.apache.hadoop.hbase.fs.HFileSystem;
-063import 
org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-064import 
org.apache.hadoop.hbase.io.compress.Compression;
-065import 
org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
-066import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-067import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-068import 
org.apache.hadoop.hbase.io.hfile.HFile;
-069import 
org.apache.hadoop.hbase.io.hfile.HFileContext;
-070import 
org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-071import 
org.apache.hadoop.hbase.io.hfile.HFileWriterImpl;
-072import 
org.apache.hadoop.hbase.regionserver.BloomType;
-073import 
org.apache.hadoop.hbase.regionserver.HStore;
-074import 
org.apache.hadoop.hbase.regionserver.StoreFileWriter;
-075import 
org.apache.hadoop.hbase.util.Bytes;
-076import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-077import 
org.apache.hadoop.hbase.util.FSUtils;
-078import 
org.apache.hadoop.hbase.util.MapReduceExtendedCell;
-079import 
org.apache.hadoop.io.NullWritable;
-080import 
org.apache.hadoop.io.SequenceFile;
-081import org.apache.hadoop.io.Text;
-082import org.apache.hadoop.mapreduce.Job;
-083import 
org.apache.hadoop.mapreduce.OutputCommitter;
-084import 
org.apache.hadoop.mapreduce.OutputFormat;
-085import 
org.apache.hadoop.mapreduce.RecordWriter;
-086import 
org.apache.hadoop.mapreduce.TaskAttemptContext;
-087import 
org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
-088import 
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-089import 
org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;
-090import 
org.apache.yetus.audience.InterfaceAudience;
-091import org.slf4j.Logger;
-092import org.slf4j.LoggerFactory;
-093
-094import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-095
-096/**
-097 * Writes HFiles. Passed Cells must 
arrive in order.
-098 * Writes current time as the sequence id 
for the file. Sets the major compacted
-099 * attribute on created @{link {@link 
HFile}s. Calling write(null,null) will forcibly roll
-100 * all HFiles being written.
-101 * p
-102 * Using this class as part of a 
MapReduce job is best done
-103 * using {@link 
#configureIncrementalLoad(Job, TableDescriptor, RegionLocator)}.
-104 */
-105@InterfaceAudience.Public
-106public class HFileOutputFormat2
-107extends 
FileOutputFormatImmutableBytesWritable, Cell {
-108  private static final Logger LOG = 
LoggerFactory.getLogger(HFileOutputFormat2.class);
-109  static class TableInfo {
-110private TableDescriptor 
tableDesctiptor;
-111private RegionLocator 
regionLocator;
-112
-113public TableInfo(TableDescriptor 
tableDesctiptor, RegionLocator regionLocator) {
-114  this.tableDesctiptor = 
tableDesctiptor;
-115  

[51/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
Published site at cf529f18a9959589fa635f78df4840472526ea2c.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/7bcc960d
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/7bcc960d
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/7bcc960d

Branch: refs/heads/asf-site
Commit: 7bcc960d3b97851edc15f3007d79641917fcbd66
Parents: 74b8701
Author: jenkins 
Authored: Thu May 17 14:47:41 2018 +
Committer: jenkins 
Committed: Thu May 17 14:47:41 2018 +

--
 acid-semantics.html |4 +-
 apache_hbase_reference_guide.pdf|4 +-
 .../hbase/mapreduce/HFileOutputFormat2.html |   26 +-
 .../hbase/mapreduce/HFileOutputFormat2.html | 1777 +++---
 book.html   |2 +-
 bulk-loads.html |4 +-
 checkstyle-aggregate.html   |  318 +-
 checkstyle.rss  |4 +-
 coc.html|4 +-
 dependencies.html   |4 +-
 dependency-convergence.html |4 +-
 dependency-info.html|4 +-
 dependency-management.html  |4 +-
 devapidocs/constant-values.html |8 +-
 devapidocs/deprecated-list.html |   38 +-
 devapidocs/index-all.html   |   48 +-
 .../hadoop/hbase/backup/package-tree.html   |4 +-
 .../hbase/class-use/NamespaceDescriptor.html|   36 +-
 .../hadoop/hbase/class-use/TableName.html   |   86 +-
 .../hbase/client/class-use/TableDescriptor.html |   80 +-
 .../hadoop/hbase/client/package-tree.html   |   26 +-
 .../hbase/coprocessor/MasterObserver.html   |  758 ++-
 .../class-use/MasterCoprocessorEnvironment.html |  262 +-
 .../coprocessor/class-use/ObserverContext.html  |  370 +-
 .../ExampleMasterObserverWithMetrics.html   |2 +-
 .../hadoop/hbase/filter/package-tree.html   |   10 +-
 .../hadoop/hbase/http/log/LogLevel.Servlet.html |6 +-
 .../apache/hadoop/hbase/http/log/LogLevel.html  |2 +-
 .../hadoop/hbase/io/hfile/CacheStats.html   |   28 +-
 .../hadoop/hbase/io/hfile/package-tree.html |6 +-
 .../mapreduce/HFileOutputFormat2.TableInfo.html |   14 +-
 .../HFileOutputFormat2.WriterLength.html|8 +-
 .../hbase/mapreduce/HFileOutputFormat2.html |   74 +-
 .../hadoop/hbase/mapreduce/package-tree.html|2 +-
 .../master/HMaster.MasterStoppedException.html  |4 +-
 .../org/apache/hadoop/hbase/master/HMaster.html |  208 +-
 .../hbase/master/MasterCoprocessorHost.html |  322 +-
 .../hadoop/hbase/master/package-tree.html   |6 +-
 .../master/procedure/ModifyTableProcedure.html  |2 +-
 .../hbase/master/procedure/package-tree.html|2 +-
 .../hadoop/hbase/monitoring/package-tree.html   |2 +-
 .../org/apache/hadoop/hbase/package-tree.html   |   16 +-
 .../hadoop/hbase/procedure2/package-tree.html   |4 +-
 .../hbase/quotas/MasterSpaceQuotaObserver.html  |2 +-
 .../hadoop/hbase/quotas/package-tree.html   |   10 +-
 .../hadoop/hbase/regionserver/package-tree.html |   20 +-
 .../regionserver/querymatcher/package-tree.html |2 +-
 .../hbase/regionserver/wal/package-tree.html|2 +-
 .../replication/regionserver/package-tree.html  |2 +-
 .../hbase/rsgroup/RSGroupAdminEndpoint.html |4 +-
 .../hbase/security/access/AccessController.html |8 +-
 .../CoprocessorWhitelistMasterObserver.html |4 +-
 .../hbase/security/access/package-tree.html |2 +-
 .../hadoop/hbase/security/class-use/User.html   |   10 +-
 .../visibility/VisibilityController.html|4 +-
 .../hadoop/hbase/thrift/package-tree.html   |2 +-
 .../org/apache/hadoop/hbase/util/JSONBean.html  |2 +-
 .../apache/hadoop/hbase/util/package-tree.html  |8 +-
 .../org/apache/hadoop/hbase/Version.html|6 +-
 .../hbase/coprocessor/MasterObserver.html   | 2210 +++
 .../hadoop/hbase/http/log/LogLevel.Servlet.html |  156 +-
 .../apache/hadoop/hbase/http/log/LogLevel.html  |  156 +-
 .../hadoop/hbase/io/hfile/CacheStats.html   |  164 +-
 .../mapreduce/HFileOutputFormat2.TableInfo.html | 1777 +++---
 .../HFileOutputFormat2.WriterLength.html| 1777 +++---
 .../hbase/mapreduce/HFileOutputFormat2.html | 1777 +++---
 .../master/HMaster.InitializationMonitor.html   | 2521 
 .../master/HMaster.MasterStoppedException.html  | 2521 
 .../hbase/master/HMaster.RedirectServlet.html   | 2521 
 .../org/apache/hadoop/hbase/master/HMaster.html | 2521 
 ...MasterCoprocessorHost.MasterEnvironment.html | 2812 -
 ...st.MasterEnvironmentForCoreCoprocessors.html | 2812 -
 

[40/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
index 3d42576..6d426da 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
@@ -346,11 +346,11 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus
+org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective
 org.apache.hadoop.hbase.master.MetricsMasterSourceFactoryImpl.FactoryStorage
-org.apache.hadoop.hbase.master.RegionState.State
 org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode
-org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective
+org.apache.hadoop.hbase.master.RegionState.State
+org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html
index 9883bd0..1edc39a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html
@@ -958,7 +958,7 @@ extends 
 
 getRegionInfoList
-privatehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfogetRegionInfoList(MasterProcedureEnvenv)
+privatehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfogetRegionInfoList(MasterProcedureEnvenv)
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
index 0fa081d..a7119b7 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
@@ -208,9 +208,9 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface.ServerOperationType
 org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface.PeerOperationType
 org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType
+org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface.ServerOperationType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html
index 9eb46aa..3e1ea7f 100644
--- a/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html
@@ -125,8 +125,8 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface 

hbase-site git commit: INFRA-10751 Empty commit

2018-05-17 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 7bcc960d3 -> 4a5d182c5


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/4a5d182c
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/4a5d182c
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/4a5d182c

Branch: refs/heads/asf-site
Commit: 4a5d182c5f0dfb779003ddc88f39b45e249ff4b6
Parents: 7bcc960
Author: jenkins 
Authored: Thu May 17 14:48:04 2018 +
Committer: jenkins 
Committed: Thu May 17 14:48:04 2018 +

--

--




[37/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html
 
b/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html
index 9799d8b..c2f510f 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html
@@ -254,7 +254,7 @@ implements MasterObserver
-postAbortProcedure,
 postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 postBalance, postBalanceRSGroup,
 postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedCreateTableAction,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTableAction, postCreateNamespace,
 postCreateTable,
 postDecommissionRegionServers,
 postDeleteNamespace,
 postDeleteSnapshot,
 postDeleteTable,
 postDisableReplicationPeer,
 postDisableTable,
 postEnableReplicationPeer, postEnableTable,
 postGetClusterMetrics,
 postGetLocks,
 postGetNamespaceDescriptor,
 postGetProcedures, postGetReplicationPeerConfig,
 postGetTableDescriptors,
 postGetTableNames,
 postListDecommissionedRegionServers,
 postListNamespaceDescriptors,
 postListReplicationPeers,
 postListSnapshot,
 postLockHeartbeat,
 postMergeRegions,
 postMergeRegionsCommitAction,
 postModifyNamespace,
 postModifyTable,
 postMove, postMoveServers,
 postMoveServersAndTables,
 postMoveTables,
 postRecommissionRegionServer,
 postRegionOffline,
 postRemoveReplicationPeer,
 postRemoveRSGroup,
 postRemoveServers,
 postRequestLock, postRestoreSnapshot,
 postRollBackMergeRegionsAction,
 postRollBackSplitRegionAction,
 
 postSetNamespaceQuota, postSetSplitOrMergeEnabled,
 postSetTableQuota,
 postSetUserQuota,
 postSetUserQuota, postSetUserQuota,
 postSnapshot,
 postStartMaster,
 postTableFlush,
 postTruncateTable,
 postUnassign,
 postUpdateReplicationPeerConfig,
 preAbortProcedure,
 preAddReplicationPeer,
 preAddRSGroup,
 preAssign,
 preBalance,
 preBalanceRSGroup,
 preBalanceSwitch, preClearDeadServers,
 preCloneSnapshot,
 preCreateNamespace,
 preCreateTableAction, preDecommissionRegionServers,
 preDeleteNamespace,
 preDeleteSnapshot,
 preDeleteTable,
 preDeleteTableAction,
 preDisableReplicationPeer,
 preDisableTable,
 preDisableTableAction,
 preEnableReplicationPeer,
 preEnableTable,
 preEnableTableAction,
 preGetClusterMetrics,
 preGetLocks,
 preGetNamespaceDescriptor,
 preGetProcedures,
 preGetReplicationPeerConfig,
 preGetTableDescriptors,
 preGetTableNames,
 preListDecommissionedRegionServers,
 preListNamespaceDescriptors,
 preListReplicationPeers,
 preListSnapshot,
 preLockHeartbeat,
 preMasterInitialization, preMergeRegions,
 preMergeRegionsAction,
 preMergeRegionsCommitAction,
 preModifyNamespace,
 preModifyTableAction,
 preMove,
 preMoveServers,
 preMoveServersAndTables,
 preMoveTables,
 preRecommissionRegionServer,
 preRegionOffline,
 preRemoveReplicationPeer,
 preRemoveRSGroup, preRemoveServers,
 preRequestLock,
 preRestoreSnapshot,
 preSetNamespaceQuota, preSetSplitOrMergeEnabled,
 preSetTableQuota,
 preSetUserQuota,
 preSetUserQuota, preSetUserQuota,
 preShutdown,
 preSnapshot,
 preSplitRegion,
 preSplitRegionAction,
 preSplitRegionAfterMETAAction,
 preSplitRegionBeforeMETAAction,
 preStopMaster,
 preTab
 leFlush, preTruncateTable,
 preTruncateTableAction,
 preUnassign,
 preUpdateReplicationPeerConfig
+postAbortProcedure,
 postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 postBalance, postBalanceRSGroup,
 postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedCreateTableAction,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 postCompletedModifyTableAction,
 postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTableAction,
 postCreateNamespace,
 postCreateTable,
 postDecommissionRegionServers,
 postDeleteNamespace,
 postDeleteSnapshot,
 postDeleteTable,
 postDisableReplicationPeer,
 postDisableTable,
 postEnableReplicationPeer,
 postEnableTable,
 postGetClusterMetrics,
 postGetLocks,
 postGetNamespaceDescriptor,
 postGetProcedures,
 postGetReplicationPeerConfig,
 postGetTableDescriptors,
 postGetTableNames, postListDecommissionedRegionServers,
 postListNamespaceDescriptors,
 postListReplicationPeers,
 postListSnapshot,
 postLockHeartbeat,
 postMergeRegions,
 postMergeRegionsCommitAction,
 postModifyNamespace,
 postModifyNamespace,
 postModifyTable,
 postModifyTable,
 postMove,
 postMoveServers,
 postMoveServersAndTables,
 postMoveTables,
 

[28/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
index a97dfdc..2b1b6c6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
@@ -2370,1287 +2370,1292 @@
 2362  }
 2363
 2364  @Override
-2365  public long modifyTable(final 
TableName tableName, final TableDescriptor descriptor,
+2365  public long modifyTable(final 
TableName tableName, final TableDescriptor newDescriptor,
 2366  final long nonceGroup, final long 
nonce) throws IOException {
 2367checkInitialized();
-2368
sanityCheckTableDescriptor(descriptor);
+2368
sanityCheckTableDescriptor(newDescriptor);
 2369
 2370return 
MasterProcedureUtil.submitProcedure(
 2371new 
MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
 2372  @Override
 2373  protected void run() throws 
IOException {
-2374
getMaster().getMasterCoprocessorHost().preModifyTable(tableName, descriptor);
-2375
-2376
LOG.info(getClientIdAuditPrefix() + " modify " + tableName);
+2374TableDescriptor oldDescriptor = 
getMaster().getTableDescriptors().get(tableName);
+2375
getMaster().getMasterCoprocessorHost()
+2376  .preModifyTable(tableName, 
oldDescriptor, newDescriptor);
 2377
-2378// Execute the operation 
synchronously - wait for the operation completes before continuing.
-2379//
-2380// We need to wait for the 
procedure to potentially fail due to "prepare" sanity
-2381// checks. This will block only 
the beginning of the procedure. See HBASE-19953.
-2382ProcedurePrepareLatch latch = 
ProcedurePrepareLatch.createBlockingLatch();
-2383submitProcedure(new 
ModifyTableProcedure(procedureExecutor.getEnvironment(),
-2384descriptor, latch));
-2385latch.await();
-2386
-2387
getMaster().getMasterCoprocessorHost().postModifyTable(tableName, 
descriptor);
-2388  }
-2389
-2390  @Override
-2391  protected String getDescription() 
{
-2392return "ModifyTableProcedure";
-2393  }
-2394});
-2395  }
-2396
-2397  public long restoreSnapshot(final 
SnapshotDescription snapshotDesc,
-2398  final long nonceGroup, final long 
nonce, final boolean restoreAcl) throws IOException {
-2399checkInitialized();
-2400
getSnapshotManager().checkSnapshotSupport();
-2401
-2402// Ensure namespace exists. Will 
throw exception if non-known NS.
-2403final TableName dstTable = 
TableName.valueOf(snapshotDesc.getTable());
-2404
getClusterSchema().getNamespace(dstTable.getNamespaceAsString());
-2405
-2406return 
MasterProcedureUtil.submitProcedure(
-2407new 
MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
-2408  @Override
-2409  protected void run() throws 
IOException {
-2410  setProcId(
-2411
getSnapshotManager().restoreOrCloneSnapshot(snapshotDesc, getNonceKey(), 
restoreAcl));
-2412  }
-2413
-2414  @Override
-2415  protected String getDescription() 
{
-2416return 
"RestoreSnapshotProcedure";
-2417  }
-2418});
-2419  }
-2420
-2421  private void checkTableExists(final 
TableName tableName)
-2422  throws IOException, 
TableNotFoundException {
-2423if 
(!MetaTableAccessor.tableExists(getConnection(), tableName)) {
-2424  throw new 
TableNotFoundException(tableName);
-2425}
-2426  }
-2427
-2428  @Override
-2429  public void checkTableModifiable(final 
TableName tableName)
-2430  throws IOException, 
TableNotFoundException, TableNotDisabledException {
-2431if (isCatalogTable(tableName)) {
-2432  throw new IOException("Can't 
modify catalog tables");
-2433}
-2434checkTableExists(tableName);
-2435TableState ts = 
getTableStateManager().getTableState(tableName);
-2436if (!ts.isDisabled()) {
-2437  throw new 
TableNotDisabledException("Not DISABLED; " + ts);
-2438}
-2439  }
-2440
-2441  public ClusterMetrics 
getClusterMetricsWithoutCoprocessor() throws InterruptedIOException {
-2442return 
getClusterMetricsWithoutCoprocessor(EnumSet.allOf(Option.class));
-2443  }
-2444
-2445  public ClusterMetrics 
getClusterMetricsWithoutCoprocessor(EnumSetOption options)
-2446  throws InterruptedIOException {
-2447ClusterMetricsBuilder builder = 
ClusterMetricsBuilder.newBuilder();
-2448// given that hbase1 can't submit 
the request with Option,
-2449// we return all information to 
client if the list of Option is empty.
-2450if (options.isEmpty()) {
-2451  options = 
EnumSet.allOf(Option.class);

[48/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/org/apache/hadoop/hbase/class-use/NamespaceDescriptor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/class-use/NamespaceDescriptor.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/NamespaceDescriptor.html
index 39db3f3..911d72d 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/NamespaceDescriptor.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/NamespaceDescriptor.html
@@ -404,22 +404,42 @@
 
 default void
 MasterObserver.postModifyNamespace(ObserverContextMasterCoprocessorEnvironmentctx,
-   NamespaceDescriptorns)
-Called after the modifyNamespace operation has been 
requested.
+   NamespaceDescriptorcurrentNsDescriptor)
+Deprecated.
+Since 2.1. Will be removed 
in 3.0.
+
 
 
 
 default void
+MasterObserver.postModifyNamespace(ObserverContextMasterCoprocessorEnvironmentctx,
+   NamespaceDescriptoroldNsDescriptor,
+   NamespaceDescriptorcurrentNsDescriptor)
+Called after the modifyNamespace operation has been 
requested.
+
+
+
+default void
 MasterObserver.preCreateNamespace(ObserverContextMasterCoprocessorEnvironmentctx,
   NamespaceDescriptorns)
 Called before a new namespace is created by
  HMaster.
 
 
-
+
 default void
 MasterObserver.preModifyNamespace(ObserverContextMasterCoprocessorEnvironmentctx,
-  NamespaceDescriptorns)
+  NamespaceDescriptornewNsDescriptor)
+Deprecated.
+Since 2.1. Will be removed 
in 3.0.
+
+
+
+
+default void
+MasterObserver.preModifyNamespace(ObserverContextMasterCoprocessorEnvironmentctx,
+  NamespaceDescriptorcurrentNsDescriptor,
+  NamespaceDescriptornewNsDescriptor)
 Called prior to modifying a namespace's properties.
 
 
@@ -578,7 +598,7 @@
 
 
 (package private) long
-HMaster.modifyNamespace(NamespaceDescriptornamespaceDescriptor,
+HMaster.modifyNamespace(NamespaceDescriptornewNsDescriptor,
longnonceGroup,
longnonce)
 Modify an existing Namespace.
@@ -608,7 +628,8 @@
 
 
 void
-MasterCoprocessorHost.postModifyNamespace(NamespaceDescriptorns)
+MasterCoprocessorHost.postModifyNamespace(NamespaceDescriptoroldNsDescriptor,
+   NamespaceDescriptorcurrentNsDescriptor)
 
 
 void
@@ -616,7 +637,8 @@
 
 
 void
-MasterCoprocessorHost.preModifyNamespace(NamespaceDescriptorns)
+MasterCoprocessorHost.preModifyNamespace(NamespaceDescriptorcurrentNsDescriptor,
+  NamespaceDescriptornewNsDescriptor)
 
 
 void

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
index ad1b394..43221ce 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
@@ -5463,44 +5463,66 @@ service.
 default void
 MasterObserver.postCompletedModifyTableAction(ObserverContextMasterCoprocessorEnvironmentctx,
   TableNametableName,
-  TableDescriptorhtd)
-Called after to modifying a table's properties.
+  TableDescriptorcurrentDescriptor)
+Deprecated.
+Since 2.1. Will be removed 
in 3.0.
+
 
 
 
 default void
+MasterObserver.postCompletedModifyTableAction(ObserverContextMasterCoprocessorEnvironmentctx,
+  TableNametableName,
+  TableDescriptoroldDescriptor,
+  TableDescriptorcurrentDescriptor)
+Called after to modifying a table's properties.
+
+
+
+default void
 MasterObserver.postCompletedTruncateTableAction(ObserverContextMasterCoprocessorEnvironmentctx,
 TableNametableName)
 Called after HMaster truncates a
  table.
 
 
-
+
 default void
 MasterObserver.postDeleteTable(ObserverContextMasterCoprocessorEnvironmentctx,
TableNametableName)
 Called after the deleteTable operation has been 
requested.
 
 
-
+
 default void
 MasterObserver.postDisableTable(ObserverContextMasterCoprocessorEnvironmentctx,
 TableNametableName)
 Called after the disableTable operation has been 
requested.
 
 
-
+
 default void
 MasterObserver.postEnableTable(ObserverContextMasterCoprocessorEnvironmentctx,
TableNametableName)
 Called after the enableTable operation has been 
requested.
 
 
-
+
 default void
 MasterObserver.postModifyTable(ObserverContextMasterCoprocessorEnvironmentctx,
TableNametableName,
-   TableDescriptorhtd)
+   TableDescriptorcurrentDescriptor)
+Deprecated.
+Since 2.1. Will be removed 
in 3.0.
+
+
+
+

[41/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html 
b/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
index 51a2cee..08907ec 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
@@ -316,8 +316,9 @@ extends 
 void
-postCompletedModifyTableAction(TableNametableName,
-  TableDescriptorhtd,
+postCompletedModifyTableAction(TableNametableName,
+  TableDescriptoroldDescriptor,
+  TableDescriptorcurrentDescriptor,
   Useruser)
 
 
@@ -441,12 +442,14 @@ extends 
 void
-postModifyNamespace(NamespaceDescriptorns)
+postModifyNamespace(NamespaceDescriptoroldNsDescriptor,
+   NamespaceDescriptorcurrentNsDescriptor)
 
 
 void
-postModifyTable(TableNametableName,
-   TableDescriptorhtd)
+postModifyTable(TableNametableName,
+   TableDescriptoroldDescriptor,
+   TableDescriptorcurrentDescriptor)
 
 
 void
@@ -755,17 +758,20 @@ extends 
 void
-preModifyNamespace(NamespaceDescriptorns)
+preModifyNamespace(NamespaceDescriptorcurrentNsDescriptor,
+  NamespaceDescriptornewNsDescriptor)
 
 
 void
-preModifyTable(TableNametableName,
-  TableDescriptorhtd)
+preModifyTable(TableNametableName,
+  TableDescriptorcurrentDescriptor,
+  TableDescriptornewDescriptor)
 
 
 void
-preModifyTableAction(TableNametableName,
-TableDescriptorhtd,
+preModifyTableAction(TableNametableName,
+TableDescriptorcurrentDescriptor,
+TableDescriptornewDescriptor,
 Useruser)
 
 
@@ -1101,13 +1107,14 @@ extends 
+
 
 
 
 
 preModifyNamespace
-publicvoidpreModifyNamespace(NamespaceDescriptorns)
+publicvoidpreModifyNamespace(NamespaceDescriptorcurrentNsDescriptor,
+   NamespaceDescriptornewNsDescriptor)
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -1115,13 +1122,14 @@ extends 
+
 
 
 
 
 postModifyNamespace
-publicvoidpostModifyNamespace(NamespaceDescriptorns)
+publicvoidpostModifyNamespace(NamespaceDescriptoroldNsDescriptor,
+NamespaceDescriptorcurrentNsDescriptor)
  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -1135,7 +1143,7 @@ extends 
 
 preGetNamespaceDescriptor
-publicvoidpreGetNamespaceDescriptor(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringnamespaceName)
+publicvoidpreGetNamespaceDescriptor(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringnamespaceName)
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -1149,7 +1157,7 @@ extends 
 
 postGetNamespaceDescriptor
-publicvoidpostGetNamespaceDescriptor(NamespaceDescriptorns)
+publicvoidpostGetNamespaceDescriptor(NamespaceDescriptorns)
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -1163,7 +1171,7 @@ extends 
 
 preListNamespaceDescriptors
-publicvoidpreListNamespaceDescriptors(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListNamespaceDescriptordescriptors)
+publicvoidpreListNamespaceDescriptors(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListNamespaceDescriptordescriptors)
  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -1177,7 +1185,7 @@ extends 
 
 postListNamespaceDescriptors
-publicvoidpostListNamespaceDescriptors(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListNamespaceDescriptordescriptors)
+publicvoidpostListNamespaceDescriptors(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListNamespaceDescriptordescriptors)
   throws 

[04/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
index 3f8844b..cdb9398 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
@@ -140,2712 +140,2713 @@
 132public class PerformanceEvaluation 
extends Configured implements Tool {
 133  static final String RANDOM_SEEK_SCAN = 
"randomSeekScan";
 134  static final String RANDOM_READ = 
"randomRead";
-135  private static final Logger LOG = 
LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
-136  private static final ObjectMapper 
MAPPER = new ObjectMapper();
-137  static {
-138
MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
-139  }
-140
-141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final String 
FAMILY_NAME_BASE = "info";
-143  public static final byte[] FAMILY_ZERO 
= Bytes.toBytes("info0");
-144  public static final byte[] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-145  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-146  public static final int ROW_LENGTH = 
26;
-147
-148  private static final int ONE_GB = 1024 
* 1024 * 1000;
-149  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-150  // TODO : should we make this 
configurable
-151  private static final int TAG_LENGTH = 
256;
-152  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-153  private static final MathContext CXT = 
MathContext.DECIMAL64;
-154  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-155  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-156  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-157
-158  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-159  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-160
-161  static {
-162
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-163"Run async random read test");
-164
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-165"Run async random write test");
-166
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-167"Run async sequential read 
test");
-168
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-169"Run async sequential write 
test");
-170
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-171"Run async scan test (read every 
row)");
-172
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-173  "Run random read test");
-174
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-175  "Run random seek and scan 100 
test");
-176
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-177  "Run random seek scan with both 
start and stop row (max 10 rows)");
-178
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-179  "Run random seek scan with both 
start and stop row (max 100 rows)");
-180
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-181  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-182
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-183  "Run random seek scan with both 
start and stop row (max 1 rows)");
-184
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-185  "Run random write test");
-186
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-187  "Run sequential read test");
-188
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-189  "Run sequential write test");
-190addCommandDescriptor(ScanTest.class, 
"scan",
-191  "Run scan test (read every 
row)");
-192
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-193  "Run scan test using a filter to 
find a specific row based on it's value " +
-194  "(make sure to use --rows=20)");
-195
addCommandDescriptor(IncrementTest.class, "increment",
-196  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-197
addCommandDescriptor(AppendTest.class, "append",
-198  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-199
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-200  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-201

[26/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
index 8b2674f..274eb54 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
@@ -262,1413 +262,1417 @@
 254});
 255  }
 256
-257  public void preModifyNamespace(final 
NamespaceDescriptor ns) throws IOException {
-258
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-259  @Override
-260  public void call(MasterObserver 
observer) throws IOException {
-261observer.preModifyNamespace(this, 
ns);
-262  }
-263});
-264  }
-265
-266  public void postModifyNamespace(final 
NamespaceDescriptor ns) throws IOException {
-267
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-268  @Override
-269  public void call(MasterObserver 
observer) throws IOException {
-270
observer.postModifyNamespace(this, ns);
-271  }
-272});
-273  }
-274
-275  public void 
preGetNamespaceDescriptor(final String namespaceName)
-276  throws IOException {
-277
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-278  @Override
-279  public void call(MasterObserver 
observer) throws IOException {
-280
observer.preGetNamespaceDescriptor(this, namespaceName);
-281  }
-282});
-283  }
-284
-285  public void 
postGetNamespaceDescriptor(final NamespaceDescriptor ns)
-286  throws IOException {
-287
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-288  @Override
-289  public void call(MasterObserver 
observer) throws IOException {
-290
observer.postGetNamespaceDescriptor(this, ns);
-291  }
-292});
-293  }
-294
-295  public void 
preListNamespaceDescriptors(final ListNamespaceDescriptor 
descriptors)
-296  throws IOException {
-297
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-298  @Override
-299  public void call(MasterObserver 
observer) throws IOException {
-300
observer.preListNamespaceDescriptors(this, descriptors);
-301  }
-302});
-303  }
-304
-305  public void 
postListNamespaceDescriptors(final ListNamespaceDescriptor 
descriptors)
-306  throws IOException {
-307
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-308  @Override
-309  public void call(MasterObserver 
observer) throws IOException {
-310
observer.postListNamespaceDescriptors(this, descriptors);
-311  }
-312});
-313  }
-314
-315  /* Implementation of hooks for invoking 
MasterObservers */
+257  public void preModifyNamespace(final 
NamespaceDescriptor currentNsDescriptor,
+258final NamespaceDescriptor 
newNsDescriptor) throws IOException {
+259
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
+260  @Override
+261  public void call(MasterObserver 
observer) throws IOException {
+262observer.preModifyNamespace(this, 
currentNsDescriptor, newNsDescriptor);
+263  }
+264});
+265  }
+266
+267  public void postModifyNamespace(final 
NamespaceDescriptor oldNsDescriptor,
+268final NamespaceDescriptor 
currentNsDescriptor) throws IOException {
+269
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
+270  @Override
+271  public void call(MasterObserver 
observer) throws IOException {
+272
observer.postModifyNamespace(this, oldNsDescriptor, currentNsDescriptor);
+273  }
+274});
+275  }
+276
+277  public void 
preGetNamespaceDescriptor(final String namespaceName)
+278  throws IOException {
+279
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
+280  @Override
+281  public void call(MasterObserver 
observer) throws IOException {
+282
observer.preGetNamespaceDescriptor(this, namespaceName);
+283  }
+284});
+285  }
+286
+287  public void 
postGetNamespaceDescriptor(final NamespaceDescriptor ns)
+288  throws IOException {
+289
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
+290  @Override
+291  public void call(MasterObserver 
observer) throws IOException {
+292
observer.postGetNamespaceDescriptor(this, ns);
+293  }
+294});
+295  }
+296
+297  public void 
preListNamespaceDescriptors(final ListNamespaceDescriptor 
descriptors)
+298  throws 

[10/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialReadTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialReadTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialReadTest.html
index 3f8844b..cdb9398 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialReadTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialReadTest.html
@@ -140,2712 +140,2713 @@
 132public class PerformanceEvaluation 
extends Configured implements Tool {
 133  static final String RANDOM_SEEK_SCAN = 
"randomSeekScan";
 134  static final String RANDOM_READ = 
"randomRead";
-135  private static final Logger LOG = 
LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
-136  private static final ObjectMapper 
MAPPER = new ObjectMapper();
-137  static {
-138
MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
-139  }
-140
-141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final String 
FAMILY_NAME_BASE = "info";
-143  public static final byte[] FAMILY_ZERO 
= Bytes.toBytes("info0");
-144  public static final byte[] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-145  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-146  public static final int ROW_LENGTH = 
26;
-147
-148  private static final int ONE_GB = 1024 
* 1024 * 1000;
-149  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-150  // TODO : should we make this 
configurable
-151  private static final int TAG_LENGTH = 
256;
-152  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-153  private static final MathContext CXT = 
MathContext.DECIMAL64;
-154  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-155  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-156  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-157
-158  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-159  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-160
-161  static {
-162
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-163"Run async random read test");
-164
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-165"Run async random write test");
-166
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-167"Run async sequential read 
test");
-168
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-169"Run async sequential write 
test");
-170
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-171"Run async scan test (read every 
row)");
-172
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-173  "Run random read test");
-174
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-175  "Run random seek and scan 100 
test");
-176
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-177  "Run random seek scan with both 
start and stop row (max 10 rows)");
-178
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-179  "Run random seek scan with both 
start and stop row (max 100 rows)");
-180
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-181  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-182
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-183  "Run random seek scan with both 
start and stop row (max 1 rows)");
-184
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-185  "Run random write test");
-186
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-187  "Run sequential read test");
-188
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-189  "Run sequential write test");
-190addCommandDescriptor(ScanTest.class, 
"scan",
-191  "Run scan test (read every 
row)");
-192
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-193  "Run scan test using a filter to 
find a specific row based on it's value " +
-194  "(make sure to use --rows=20)");
-195
addCommandDescriptor(IncrementTest.class, "increment",
-196  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-197
addCommandDescriptor(AppendTest.class, "append",
-198  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-199
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-200  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");

[13/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
index 3f8844b..cdb9398 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
@@ -140,2712 +140,2713 @@
 132public class PerformanceEvaluation 
extends Configured implements Tool {
 133  static final String RANDOM_SEEK_SCAN = 
"randomSeekScan";
 134  static final String RANDOM_READ = 
"randomRead";
-135  private static final Logger LOG = 
LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
-136  private static final ObjectMapper 
MAPPER = new ObjectMapper();
-137  static {
-138
MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
-139  }
-140
-141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final String 
FAMILY_NAME_BASE = "info";
-143  public static final byte[] FAMILY_ZERO 
= Bytes.toBytes("info0");
-144  public static final byte[] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-145  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-146  public static final int ROW_LENGTH = 
26;
-147
-148  private static final int ONE_GB = 1024 
* 1024 * 1000;
-149  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-150  // TODO : should we make this 
configurable
-151  private static final int TAG_LENGTH = 
256;
-152  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-153  private static final MathContext CXT = 
MathContext.DECIMAL64;
-154  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-155  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-156  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-157
-158  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-159  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-160
-161  static {
-162
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-163"Run async random read test");
-164
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-165"Run async random write test");
-166
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-167"Run async sequential read 
test");
-168
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-169"Run async sequential write 
test");
-170
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-171"Run async scan test (read every 
row)");
-172
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-173  "Run random read test");
-174
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-175  "Run random seek and scan 100 
test");
-176
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-177  "Run random seek scan with both 
start and stop row (max 10 rows)");
-178
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-179  "Run random seek scan with both 
start and stop row (max 100 rows)");
-180
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-181  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-182
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-183  "Run random seek scan with both 
start and stop row (max 1 rows)");
-184
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-185  "Run random write test");
-186
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-187  "Run sequential read test");
-188
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-189  "Run sequential write test");
-190addCommandDescriptor(ScanTest.class, 
"scan",
-191  "Run scan test (read every 
row)");
-192
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-193  "Run scan test using a filter to 
find a specific row based on it's value " +
-194  "(make sure to use --rows=20)");
-195
addCommandDescriptor(IncrementTest.class, "increment",
-196  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-197
addCommandDescriptor(AppendTest.class, "append",
-198  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-199
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-200  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-201

[25/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html
index 8b2674f..274eb54 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html
@@ -262,1413 +262,1417 @@
 254});
 255  }
 256
-257  public void preModifyNamespace(final 
NamespaceDescriptor ns) throws IOException {
-258
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-259  @Override
-260  public void call(MasterObserver 
observer) throws IOException {
-261observer.preModifyNamespace(this, 
ns);
-262  }
-263});
-264  }
-265
-266  public void postModifyNamespace(final 
NamespaceDescriptor ns) throws IOException {
-267
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-268  @Override
-269  public void call(MasterObserver 
observer) throws IOException {
-270
observer.postModifyNamespace(this, ns);
-271  }
-272});
-273  }
-274
-275  public void 
preGetNamespaceDescriptor(final String namespaceName)
-276  throws IOException {
-277
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-278  @Override
-279  public void call(MasterObserver 
observer) throws IOException {
-280
observer.preGetNamespaceDescriptor(this, namespaceName);
-281  }
-282});
-283  }
-284
-285  public void 
postGetNamespaceDescriptor(final NamespaceDescriptor ns)
-286  throws IOException {
-287
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-288  @Override
-289  public void call(MasterObserver 
observer) throws IOException {
-290
observer.postGetNamespaceDescriptor(this, ns);
-291  }
-292});
-293  }
-294
-295  public void 
preListNamespaceDescriptors(final ListNamespaceDescriptor 
descriptors)
-296  throws IOException {
-297
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-298  @Override
-299  public void call(MasterObserver 
observer) throws IOException {
-300
observer.preListNamespaceDescriptors(this, descriptors);
-301  }
-302});
-303  }
-304
-305  public void 
postListNamespaceDescriptors(final ListNamespaceDescriptor 
descriptors)
-306  throws IOException {
-307
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-308  @Override
-309  public void call(MasterObserver 
observer) throws IOException {
-310
observer.postListNamespaceDescriptors(this, descriptors);
-311  }
-312});
-313  }
-314
-315  /* Implementation of hooks for invoking 
MasterObservers */
+257  public void preModifyNamespace(final 
NamespaceDescriptor currentNsDescriptor,
+258final NamespaceDescriptor 
newNsDescriptor) throws IOException {
+259
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
+260  @Override
+261  public void call(MasterObserver 
observer) throws IOException {
+262observer.preModifyNamespace(this, 
currentNsDescriptor, newNsDescriptor);
+263  }
+264});
+265  }
+266
+267  public void postModifyNamespace(final 
NamespaceDescriptor oldNsDescriptor,
+268final NamespaceDescriptor 
currentNsDescriptor) throws IOException {
+269
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
+270  @Override
+271  public void call(MasterObserver 
observer) throws IOException {
+272
observer.postModifyNamespace(this, oldNsDescriptor, currentNsDescriptor);
+273  }
+274});
+275  }
+276
+277  public void 
preGetNamespaceDescriptor(final String namespaceName)
+278  throws IOException {
+279
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
+280  @Override
+281  public void call(MasterObserver 
observer) throws IOException {
+282
observer.preGetNamespaceDescriptor(this, namespaceName);
+283  }
+284});
+285  }
+286
+287  public void 
postGetNamespaceDescriptor(final NamespaceDescriptor ns)
+288  throws IOException {
+289
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
+290  @Override
+291  public void call(MasterObserver 
observer) throws IOException {
+292
observer.postGetNamespaceDescriptor(this, ns);
+293  }
+294});
+295  }
+296
+297  public void 

[08/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTableTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTableTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTableTest.html
index 3f8844b..cdb9398 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTableTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTableTest.html
@@ -140,2712 +140,2713 @@
 132public class PerformanceEvaluation 
extends Configured implements Tool {
 133  static final String RANDOM_SEEK_SCAN = 
"randomSeekScan";
 134  static final String RANDOM_READ = 
"randomRead";
-135  private static final Logger LOG = 
LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
-136  private static final ObjectMapper 
MAPPER = new ObjectMapper();
-137  static {
-138
MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
-139  }
-140
-141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final String 
FAMILY_NAME_BASE = "info";
-143  public static final byte[] FAMILY_ZERO 
= Bytes.toBytes("info0");
-144  public static final byte[] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-145  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-146  public static final int ROW_LENGTH = 
26;
-147
-148  private static final int ONE_GB = 1024 
* 1024 * 1000;
-149  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-150  // TODO : should we make this 
configurable
-151  private static final int TAG_LENGTH = 
256;
-152  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-153  private static final MathContext CXT = 
MathContext.DECIMAL64;
-154  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-155  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-156  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-157
-158  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-159  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-160
-161  static {
-162
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-163"Run async random read test");
-164
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-165"Run async random write test");
-166
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-167"Run async sequential read 
test");
-168
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-169"Run async sequential write 
test");
-170
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-171"Run async scan test (read every 
row)");
-172
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-173  "Run random read test");
-174
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-175  "Run random seek and scan 100 
test");
-176
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-177  "Run random seek scan with both 
start and stop row (max 10 rows)");
-178
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-179  "Run random seek scan with both 
start and stop row (max 100 rows)");
-180
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-181  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-182
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-183  "Run random seek scan with both 
start and stop row (max 1 rows)");
-184
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-185  "Run random write test");
-186
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-187  "Run sequential read test");
-188
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-189  "Run sequential write test");
-190addCommandDescriptor(ScanTest.class, 
"scan",
-191  "Run scan test (read every 
row)");
-192
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-193  "Run scan test using a filter to 
find a specific row based on it's value " +
-194  "(make sure to use --rows=20)");
-195
addCommandDescriptor(IncrementTest.class, "increment",
-196  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-197
addCommandDescriptor(AppendTest.class, "append",
-198  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-199
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-200  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-201

[24/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
index 8b2674f..274eb54 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
@@ -262,1413 +262,1417 @@
 254});
 255  }
 256
-257  public void preModifyNamespace(final 
NamespaceDescriptor ns) throws IOException {
-258
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-259  @Override
-260  public void call(MasterObserver 
observer) throws IOException {
-261observer.preModifyNamespace(this, 
ns);
-262  }
-263});
-264  }
-265
-266  public void postModifyNamespace(final 
NamespaceDescriptor ns) throws IOException {
-267
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-268  @Override
-269  public void call(MasterObserver 
observer) throws IOException {
-270
observer.postModifyNamespace(this, ns);
-271  }
-272});
-273  }
-274
-275  public void 
preGetNamespaceDescriptor(final String namespaceName)
-276  throws IOException {
-277
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-278  @Override
-279  public void call(MasterObserver 
observer) throws IOException {
-280
observer.preGetNamespaceDescriptor(this, namespaceName);
-281  }
-282});
-283  }
-284
-285  public void 
postGetNamespaceDescriptor(final NamespaceDescriptor ns)
-286  throws IOException {
-287
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-288  @Override
-289  public void call(MasterObserver 
observer) throws IOException {
-290
observer.postGetNamespaceDescriptor(this, ns);
-291  }
-292});
-293  }
-294
-295  public void 
preListNamespaceDescriptors(final ListNamespaceDescriptor 
descriptors)
-296  throws IOException {
-297
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-298  @Override
-299  public void call(MasterObserver 
observer) throws IOException {
-300
observer.preListNamespaceDescriptors(this, descriptors);
-301  }
-302});
-303  }
-304
-305  public void 
postListNamespaceDescriptors(final ListNamespaceDescriptor 
descriptors)
-306  throws IOException {
-307
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
-308  @Override
-309  public void call(MasterObserver 
observer) throws IOException {
-310
observer.postListNamespaceDescriptors(this, descriptors);
-311  }
-312});
-313  }
-314
-315  /* Implementation of hooks for invoking 
MasterObservers */
+257  public void preModifyNamespace(final 
NamespaceDescriptor currentNsDescriptor,
+258final NamespaceDescriptor 
newNsDescriptor) throws IOException {
+259
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
+260  @Override
+261  public void call(MasterObserver 
observer) throws IOException {
+262observer.preModifyNamespace(this, 
currentNsDescriptor, newNsDescriptor);
+263  }
+264});
+265  }
+266
+267  public void postModifyNamespace(final 
NamespaceDescriptor oldNsDescriptor,
+268final NamespaceDescriptor 
currentNsDescriptor) throws IOException {
+269
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
+270  @Override
+271  public void call(MasterObserver 
observer) throws IOException {
+272
observer.postModifyNamespace(this, oldNsDescriptor, currentNsDescriptor);
+273  }
+274});
+275  }
+276
+277  public void 
preGetNamespaceDescriptor(final String namespaceName)
+278  throws IOException {
+279
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
+280  @Override
+281  public void call(MasterObserver 
observer) throws IOException {
+282
observer.preGetNamespaceDescriptor(this, namespaceName);
+283  }
+284});
+285  }
+286
+287  public void 
postGetNamespaceDescriptor(final NamespaceDescriptor ns)
+288  throws IOException {
+289
execOperation(coprocEnvironments.isEmpty() ? null : new 
MasterObserverOperation() {
+290  @Override
+291  public void call(MasterObserver 
observer) throws IOException {
+292
observer.postGetNamespaceDescriptor(this, ns);
+293  }
+294});
+295  }
+296
+297  public void 
preListNamespaceDescriptors(final ListNamespaceDescriptor 

[06/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
index 3f8844b..cdb9398 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
@@ -140,2712 +140,2713 @@
 132public class PerformanceEvaluation 
extends Configured implements Tool {
 133  static final String RANDOM_SEEK_SCAN = 
"randomSeekScan";
 134  static final String RANDOM_READ = 
"randomRead";
-135  private static final Logger LOG = 
LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
-136  private static final ObjectMapper 
MAPPER = new ObjectMapper();
-137  static {
-138
MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
-139  }
-140
-141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final String 
FAMILY_NAME_BASE = "info";
-143  public static final byte[] FAMILY_ZERO 
= Bytes.toBytes("info0");
-144  public static final byte[] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-145  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-146  public static final int ROW_LENGTH = 
26;
-147
-148  private static final int ONE_GB = 1024 
* 1024 * 1000;
-149  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-150  // TODO : should we make this 
configurable
-151  private static final int TAG_LENGTH = 
256;
-152  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-153  private static final MathContext CXT = 
MathContext.DECIMAL64;
-154  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-155  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-156  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-157
-158  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-159  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-160
-161  static {
-162
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-163"Run async random read test");
-164
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-165"Run async random write test");
-166
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-167"Run async sequential read 
test");
-168
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-169"Run async sequential write 
test");
-170
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-171"Run async scan test (read every 
row)");
-172
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-173  "Run random read test");
-174
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-175  "Run random seek and scan 100 
test");
-176
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-177  "Run random seek scan with both 
start and stop row (max 10 rows)");
-178
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-179  "Run random seek scan with both 
start and stop row (max 100 rows)");
-180
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-181  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-182
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-183  "Run random seek scan with both 
start and stop row (max 1 rows)");
-184
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-185  "Run random write test");
-186
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-187  "Run sequential read test");
-188
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-189  "Run sequential write test");
-190addCommandDescriptor(ScanTest.class, 
"scan",
-191  "Run scan test (read every 
row)");
-192
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-193  "Run scan test using a filter to 
find a specific row based on it's value " +
-194  "(make sure to use --rows=20)");
-195
addCommandDescriptor(IncrementTest.class, "increment",
-196  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-197
addCommandDescriptor(AppendTest.class, "append",
-198  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-199
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-200  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-201

[19/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html
index 6313ac8..381917f 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class PerformanceEvaluation.TestOptions
+static class PerformanceEvaluation.TestOptions
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Wraps up options passed to PerformanceEvaluation.
  This makes tracking all these arguments a little easier.
@@ -695,7 +695,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 cmdName
-https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String cmdName
+https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String cmdName
 
 
 
@@ -704,7 +704,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 nomapred
-boolean nomapred
+boolean nomapred
 
 
 
@@ -713,7 +713,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 filterAll
-boolean filterAll
+boolean filterAll
 
 
 
@@ -722,7 +722,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 startRow
-int startRow
+int startRow
 
 
 
@@ -731,7 +731,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 size
-float size
+float size
 
 
 
@@ -740,7 +740,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 perClientRunRows
-int perClientRunRows
+int perClientRunRows
 
 
 
@@ -749,7 +749,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 numClientThreads
-int numClientThreads
+int numClientThreads
 
 
 
@@ -758,7 +758,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 totalRows
-int totalRows
+int totalRows
 
 
 
@@ -767,7 +767,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 measureAfter
-int measureAfter
+int measureAfter
 
 
 
@@ -776,7 +776,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 sampleRate
-float sampleRate
+float sampleRate
 
 
 
@@ -785,7 +785,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 traceRate
-double traceRate
+double traceRate
 
 
 
@@ -794,7 +794,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 tableName
-https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String tableName
+https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String tableName
 
 
 
@@ -803,7 +803,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 flushCommits
-boolean flushCommits
+boolean flushCommits
 
 
 
@@ -812,7 +812,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 writeToWAL
-boolean writeToWAL
+boolean writeToWAL
 
 
 
@@ -821,7 +821,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 autoFlush
-boolean autoFlush
+boolean autoFlush
 
 
 
@@ -830,7 +830,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 oneCon
-boolean oneCon
+boolean oneCon
 
 
 
@@ -839,7 +839,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 useTags
-boolean useTags
+boolean useTags
 
 
 
@@ -848,7 +848,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 noOfTags
-int noOfTags
+int noOfTags
 
 
 
@@ -857,7 +857,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 reportLatency
-boolean reportLatency
+boolean reportLatency
 
 
 
@@ -866,7 +866,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 multiGet
-int multiGet
+int multiGet
 
 
 
@@ -875,7 +875,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 randomSleep
-int randomSleep
+int randomSleep
 
 
 
@@ -884,7 +884,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 inMemoryCF
-boolean inMemoryCF
+boolean inMemoryCF
 
 
 
@@ -893,7 +893,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 presplitRegions
-int presplitRegions
+int presplitRegions
 
 
 
@@ -902,7 +902,7 @@ extends 

[11/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncScanTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncScanTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncScanTest.html
index 3f8844b..cdb9398 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncScanTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncScanTest.html
@@ -140,2712 +140,2713 @@
 132public class PerformanceEvaluation 
extends Configured implements Tool {
 133  static final String RANDOM_SEEK_SCAN = 
"randomSeekScan";
 134  static final String RANDOM_READ = 
"randomRead";
-135  private static final Logger LOG = 
LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
-136  private static final ObjectMapper 
MAPPER = new ObjectMapper();
-137  static {
-138
MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
-139  }
-140
-141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final String 
FAMILY_NAME_BASE = "info";
-143  public static final byte[] FAMILY_ZERO 
= Bytes.toBytes("info0");
-144  public static final byte[] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-145  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-146  public static final int ROW_LENGTH = 
26;
-147
-148  private static final int ONE_GB = 1024 
* 1024 * 1000;
-149  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-150  // TODO : should we make this 
configurable
-151  private static final int TAG_LENGTH = 
256;
-152  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-153  private static final MathContext CXT = 
MathContext.DECIMAL64;
-154  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-155  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-156  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-157
-158  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-159  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-160
-161  static {
-162
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-163"Run async random read test");
-164
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-165"Run async random write test");
-166
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-167"Run async sequential read 
test");
-168
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-169"Run async sequential write 
test");
-170
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-171"Run async scan test (read every 
row)");
-172
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-173  "Run random read test");
-174
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-175  "Run random seek and scan 100 
test");
-176
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-177  "Run random seek scan with both 
start and stop row (max 10 rows)");
-178
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-179  "Run random seek scan with both 
start and stop row (max 100 rows)");
-180
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-181  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-182
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-183  "Run random seek scan with both 
start and stop row (max 1 rows)");
-184
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-185  "Run random write test");
-186
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-187  "Run sequential read test");
-188
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-189  "Run sequential write test");
-190addCommandDescriptor(ScanTest.class, 
"scan",
-191  "Run scan test (read every 
row)");
-192
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-193  "Run scan test using a filter to 
find a specific row based on it's value " +
-194  "(make sure to use --rows=20)");
-195
addCommandDescriptor(IncrementTest.class, "increment",
-196  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-197
addCommandDescriptor(AppendTest.class, "append",
-198  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-199
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-200  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-201

[17/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/org/apache/hadoop/hbase/backup/package-tree.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/backup/package-tree.html 
b/testdevapidocs/org/apache/hadoop/hbase/backup/package-tree.html
index e159b3f..3168ee3 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/backup/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/backup/package-tree.html
@@ -145,8 +145,8 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.backup.TestBackupDeleteWithFailures.Failure
 org.apache.hadoop.hbase.backup.TestIncrementalBackupMergeWithFailures.FailurePhase
+org.apache.hadoop.hbase.backup.TestBackupDeleteWithFailures.Failure
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.TestMaxRetriesCoprocessor.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.TestMaxRetriesCoprocessor.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.TestMaxRetriesCoprocessor.html
index 2041495..d7f6323 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.TestMaxRetriesCoprocessor.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.TestMaxRetriesCoprocessor.html
@@ -225,7 +225,7 @@ implements 
org.apache.hadoop.hbase.coprocessor.MasterCoprocessor, org.apache.had
 
 
 Methods inherited from 
interfaceorg.apache.hadoop.hbase.coprocessor.MasterObserver
-postAbortProcedure, postAddReplicationPeer, postAddRSGroup, postAssign, 
postBalance, postBalanceRSGroup, postBalanceSwitch, postClearDeadServers, 
postCloneSnapshot, postCompletedCreateTableAction, 
postCompletedDeleteTableAction, postCompletedDisableTableAction, 
postCompletedEnableTableAction, postCompletedMergeRegionsAction, 
postCompletedModifyTableAction, postCompletedSplitRegionAction, 
postCompletedTruncateTableAction, postCreateNamespace, postCreateTable, 
postDecommissionRegionServers, postDeleteNamespace, postDeleteSnapshot, 
postDeleteTable, postDisableReplicationPeer, postDisableTable, 
postEnableReplicationPeer, postEnableTable, postGetClusterMetrics, 
postGetLocks, postGetNamespaceDescriptor, postGetProcedures, 
postGetReplicationPeerConfig, postGetTableDescriptors, postGetTableNames, 
postListDecommissionedRegionServers, postListNamespaceDescriptors, 
postListReplicationPeers, postListSnapshot, postLockHeartbeat, 
postMergeRegions, postMergeRegionsCommitAction, postModifyNam
 espace, postModifyTable, postMove, postMoveServers, postMoveServersAndTables, 
postMoveTables, postRecommissionRegionServer, postRegionOffline, 
postRemoveReplicationPeer, postRemoveRSGroup, postRemoveServers, 
postRequestLock, postRestoreSnapshot, postRollBackMergeRegionsAction, 
postRollBackSplitRegionAction, postSetNamespaceQuota, 
postSetSplitOrMergeEnabled, postSetTableQuota, postSetUserQuota, 
postSetUserQuota, postSetUserQuota, postSnapshot, postStartMaster, 
postTableFlush, postTruncateTable, postUnassign, 
postUpdateReplicationPeerConfig, preAbortProcedure, preAddReplicationPeer, 
preAddRSGroup, preAssign, preBalance, preBalanceRSGroup, preBalanceSwitch, 
preClearDeadServers, preCloneSnapshot, preCreateNamespace, preCreateTable, 
preCreateTableAction, preDecommissionRegionServers, preDeleteNamespace, 
preDeleteSnapshot, preDeleteTable, preDeleteTableAction, 
preDisableReplicationPeer, preDisableTable, preDisableTableAction, 
preEnableReplicationPeer, preEnableTable, preEnableTableAction,
  preGetClusterMetrics, preGetLocks, preGetProcedures, 
preGetReplicationPeerConfig, preGetTableDescriptors, preGetTableNames, 
preListDecommissionedRegionServers, preListNamespaceDescriptors, 
preListReplicationPeers, preListSnapshot, preLockHeartbeat, 
preMasterInitialization, preMergeRegions, preMergeRegionsAction, 
preMergeRegionsCommitAction, preModifyNamespace, preModifyTable, 
preModifyTableAction, preMove, preMoveServers, preMoveServersAndTables, 
preMoveTables, preRecommissionRegionServer, preRegionOffline, 
preRemoveReplicationPeer, preRemoveRSGroup, preRemoveServers, preRequestLock, 
preRestoreSnapshot, preSetNamespaceQuota, preSetSplitOrMergeEnabled, 
preSetTableQuota, preSetUserQuota, preSetUserQuota, preSetUserQuota, 
preShutdown, preSnapshot, preSplitRegion, preSplitRegionAction, 
preSplitRegionAfterMETAAction, preSplitRegionBeforeMETAAction, 

[14/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
index 3f8844b..cdb9398 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
@@ -140,2712 +140,2713 @@
 132public class PerformanceEvaluation 
extends Configured implements Tool {
 133  static final String RANDOM_SEEK_SCAN = 
"randomSeekScan";
 134  static final String RANDOM_READ = 
"randomRead";
-135  private static final Logger LOG = 
LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
-136  private static final ObjectMapper 
MAPPER = new ObjectMapper();
-137  static {
-138
MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
-139  }
-140
-141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final String 
FAMILY_NAME_BASE = "info";
-143  public static final byte[] FAMILY_ZERO 
= Bytes.toBytes("info0");
-144  public static final byte[] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-145  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-146  public static final int ROW_LENGTH = 
26;
-147
-148  private static final int ONE_GB = 1024 
* 1024 * 1000;
-149  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-150  // TODO : should we make this 
configurable
-151  private static final int TAG_LENGTH = 
256;
-152  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-153  private static final MathContext CXT = 
MathContext.DECIMAL64;
-154  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-155  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-156  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-157
-158  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-159  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-160
-161  static {
-162
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-163"Run async random read test");
-164
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-165"Run async random write test");
-166
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-167"Run async sequential read 
test");
-168
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-169"Run async sequential write 
test");
-170
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-171"Run async scan test (read every 
row)");
-172
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-173  "Run random read test");
-174
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-175  "Run random seek and scan 100 
test");
-176
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-177  "Run random seek scan with both 
start and stop row (max 10 rows)");
-178
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-179  "Run random seek scan with both 
start and stop row (max 100 rows)");
-180
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-181  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-182
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-183  "Run random seek scan with both 
start and stop row (max 1 rows)");
-184
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-185  "Run random write test");
-186
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-187  "Run sequential read test");
-188
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-189  "Run sequential write test");
-190addCommandDescriptor(ScanTest.class, 
"scan",
-191  "Run scan test (read every 
row)");
-192
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-193  "Run scan test using a filter to 
find a specific row based on it's value " +
-194  "(make sure to use --rows=20)");
-195
addCommandDescriptor(IncrementTest.class, "increment",
-196  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-197
addCommandDescriptor(AppendTest.class, "append",
-198  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-199
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-200  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-201
addCommandDescriptor(CheckAndPutTest.class, 

[02/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
index 3f8844b..cdb9398 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
@@ -140,2712 +140,2713 @@
 132public class PerformanceEvaluation 
extends Configured implements Tool {
 133  static final String RANDOM_SEEK_SCAN = 
"randomSeekScan";
 134  static final String RANDOM_READ = 
"randomRead";
-135  private static final Logger LOG = 
LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
-136  private static final ObjectMapper 
MAPPER = new ObjectMapper();
-137  static {
-138
MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
-139  }
-140
-141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final String 
FAMILY_NAME_BASE = "info";
-143  public static final byte[] FAMILY_ZERO 
= Bytes.toBytes("info0");
-144  public static final byte[] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-145  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-146  public static final int ROW_LENGTH = 
26;
-147
-148  private static final int ONE_GB = 1024 
* 1024 * 1000;
-149  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-150  // TODO : should we make this 
configurable
-151  private static final int TAG_LENGTH = 
256;
-152  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-153  private static final MathContext CXT = 
MathContext.DECIMAL64;
-154  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-155  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-156  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-157
-158  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-159  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-160
-161  static {
-162
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-163"Run async random read test");
-164
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-165"Run async random write test");
-166
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-167"Run async sequential read 
test");
-168
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-169"Run async sequential write 
test");
-170
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-171"Run async scan test (read every 
row)");
-172
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-173  "Run random read test");
-174
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-175  "Run random seek and scan 100 
test");
-176
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-177  "Run random seek scan with both 
start and stop row (max 10 rows)");
-178
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-179  "Run random seek scan with both 
start and stop row (max 100 rows)");
-180
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-181  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-182
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-183  "Run random seek scan with both 
start and stop row (max 1 rows)");
-184
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-185  "Run random write test");
-186
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-187  "Run sequential read test");
-188
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-189  "Run sequential write test");
-190addCommandDescriptor(ScanTest.class, 
"scan",
-191  "Run scan test (read every 
row)");
-192
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-193  "Run scan test using a filter to 
find a specific row based on it's value " +
-194  "(make sure to use --rows=20)");
-195
addCommandDescriptor(IncrementTest.class, "increment",
-196  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-197
addCommandDescriptor(AppendTest.class, "append",
-198  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-199
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-200  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-201

[01/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 74b8701f3 -> 7bcc960d3


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
index 3f8844b..cdb9398 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
@@ -140,2712 +140,2713 @@
 132public class PerformanceEvaluation 
extends Configured implements Tool {
 133  static final String RANDOM_SEEK_SCAN = 
"randomSeekScan";
 134  static final String RANDOM_READ = 
"randomRead";
-135  private static final Logger LOG = 
LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
-136  private static final ObjectMapper 
MAPPER = new ObjectMapper();
-137  static {
-138
MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
-139  }
-140
-141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final String 
FAMILY_NAME_BASE = "info";
-143  public static final byte[] FAMILY_ZERO 
= Bytes.toBytes("info0");
-144  public static final byte[] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-145  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-146  public static final int ROW_LENGTH = 
26;
-147
-148  private static final int ONE_GB = 1024 
* 1024 * 1000;
-149  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-150  // TODO : should we make this 
configurable
-151  private static final int TAG_LENGTH = 
256;
-152  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-153  private static final MathContext CXT = 
MathContext.DECIMAL64;
-154  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-155  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-156  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-157
-158  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-159  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-160
-161  static {
-162
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-163"Run async random read test");
-164
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-165"Run async random write test");
-166
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-167"Run async sequential read 
test");
-168
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-169"Run async sequential write 
test");
-170
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-171"Run async scan test (read every 
row)");
-172
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-173  "Run random read test");
-174
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-175  "Run random seek and scan 100 
test");
-176
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-177  "Run random seek scan with both 
start and stop row (max 10 rows)");
-178
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-179  "Run random seek scan with both 
start and stop row (max 100 rows)");
-180
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-181  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-182
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-183  "Run random seek scan with both 
start and stop row (max 1 rows)");
-184
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-185  "Run random write test");
-186
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-187  "Run sequential read test");
-188
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-189  "Run sequential write test");
-190addCommandDescriptor(ScanTest.class, 
"scan",
-191  "Run scan test (read every 
row)");
-192
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-193  "Run scan test using a filter to 
find a specific row based on it's value " +
-194  "(make sure to use --rows=20)");
-195
addCommandDescriptor(IncrementTest.class, "increment",
-196  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-197
addCommandDescriptor(AppendTest.class, "append",
-198  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-199
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-200  "CheckAndMutate on each row; 
clients overlap on 

[03/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
index 3f8844b..cdb9398 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
@@ -140,2712 +140,2713 @@
 132public class PerformanceEvaluation 
extends Configured implements Tool {
 133  static final String RANDOM_SEEK_SCAN = 
"randomSeekScan";
 134  static final String RANDOM_READ = 
"randomRead";
-135  private static final Logger LOG = 
LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
-136  private static final ObjectMapper 
MAPPER = new ObjectMapper();
-137  static {
-138
MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
-139  }
-140
-141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final String 
FAMILY_NAME_BASE = "info";
-143  public static final byte[] FAMILY_ZERO 
= Bytes.toBytes("info0");
-144  public static final byte[] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-145  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-146  public static final int ROW_LENGTH = 
26;
-147
-148  private static final int ONE_GB = 1024 
* 1024 * 1000;
-149  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-150  // TODO : should we make this 
configurable
-151  private static final int TAG_LENGTH = 
256;
-152  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-153  private static final MathContext CXT = 
MathContext.DECIMAL64;
-154  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-155  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-156  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-157
-158  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-159  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-160
-161  static {
-162
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-163"Run async random read test");
-164
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-165"Run async random write test");
-166
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-167"Run async sequential read 
test");
-168
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-169"Run async sequential write 
test");
-170
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-171"Run async scan test (read every 
row)");
-172
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-173  "Run random read test");
-174
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-175  "Run random seek and scan 100 
test");
-176
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-177  "Run random seek scan with both 
start and stop row (max 10 rows)");
-178
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-179  "Run random seek scan with both 
start and stop row (max 100 rows)");
-180
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-181  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-182
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-183  "Run random seek scan with both 
start and stop row (max 1 rows)");
-184
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-185  "Run random write test");
-186
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-187  "Run sequential read test");
-188
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-189  "Run sequential write test");
-190addCommandDescriptor(ScanTest.class, 
"scan",
-191  "Run scan test (read every 
row)");
-192
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-193  "Run scan test using a filter to 
find a specific row based on it's value " +
-194  "(make sure to use --rows=20)");
-195
addCommandDescriptor(IncrementTest.class, "increment",
-196  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-197
addCommandDescriptor(AppendTest.class, "append",
-198  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-199
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-200  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-201

[07/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html
index 3f8844b..cdb9398 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html
@@ -140,2712 +140,2713 @@
 132public class PerformanceEvaluation 
extends Configured implements Tool {
 133  static final String RANDOM_SEEK_SCAN = 
"randomSeekScan";
 134  static final String RANDOM_READ = 
"randomRead";
-135  private static final Logger LOG = 
LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
-136  private static final ObjectMapper 
MAPPER = new ObjectMapper();
-137  static {
-138
MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
-139  }
-140
-141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final String 
FAMILY_NAME_BASE = "info";
-143  public static final byte[] FAMILY_ZERO 
= Bytes.toBytes("info0");
-144  public static final byte[] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-145  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-146  public static final int ROW_LENGTH = 
26;
-147
-148  private static final int ONE_GB = 1024 
* 1024 * 1000;
-149  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-150  // TODO : should we make this 
configurable
-151  private static final int TAG_LENGTH = 
256;
-152  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-153  private static final MathContext CXT = 
MathContext.DECIMAL64;
-154  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-155  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-156  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-157
-158  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-159  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-160
-161  static {
-162
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-163"Run async random read test");
-164
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-165"Run async random write test");
-166
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-167"Run async sequential read 
test");
-168
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-169"Run async sequential write 
test");
-170
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-171"Run async scan test (read every 
row)");
-172
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-173  "Run random read test");
-174
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-175  "Run random seek and scan 100 
test");
-176
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-177  "Run random seek scan with both 
start and stop row (max 10 rows)");
-178
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-179  "Run random seek scan with both 
start and stop row (max 100 rows)");
-180
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-181  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-182
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-183  "Run random seek scan with both 
start and stop row (max 1 rows)");
-184
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-185  "Run random write test");
-186
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-187  "Run sequential read test");
-188
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-189  "Run sequential write test");
-190addCommandDescriptor(ScanTest.class, 
"scan",
-191  "Run scan test (read every 
row)");
-192
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-193  "Run scan test using a filter to 
find a specific row based on it's value " +
-194  "(make sure to use --rows=20)");
-195
addCommandDescriptor(IncrementTest.class, "increment",
-196  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-197
addCommandDescriptor(AppendTest.class, "append",
-198  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-199
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-200  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-201
addCommandDescriptor(CheckAndPutTest.class, "checkAndPut",

[05/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
index 3f8844b..cdb9398 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
@@ -140,2712 +140,2713 @@
 132public class PerformanceEvaluation 
extends Configured implements Tool {
 133  static final String RANDOM_SEEK_SCAN = 
"randomSeekScan";
 134  static final String RANDOM_READ = 
"randomRead";
-135  private static final Logger LOG = 
LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
-136  private static final ObjectMapper 
MAPPER = new ObjectMapper();
-137  static {
-138
MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
-139  }
-140
-141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final String 
FAMILY_NAME_BASE = "info";
-143  public static final byte[] FAMILY_ZERO 
= Bytes.toBytes("info0");
-144  public static final byte[] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-145  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-146  public static final int ROW_LENGTH = 
26;
-147
-148  private static final int ONE_GB = 1024 
* 1024 * 1000;
-149  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-150  // TODO : should we make this 
configurable
-151  private static final int TAG_LENGTH = 
256;
-152  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-153  private static final MathContext CXT = 
MathContext.DECIMAL64;
-154  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-155  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-156  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-157
-158  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-159  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-160
-161  static {
-162
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-163"Run async random read test");
-164
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-165"Run async random write test");
-166
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-167"Run async sequential read 
test");
-168
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-169"Run async sequential write 
test");
-170
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-171"Run async scan test (read every 
row)");
-172
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-173  "Run random read test");
-174
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-175  "Run random seek and scan 100 
test");
-176
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-177  "Run random seek scan with both 
start and stop row (max 10 rows)");
-178
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-179  "Run random seek scan with both 
start and stop row (max 100 rows)");
-180
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-181  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-182
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-183  "Run random seek scan with both 
start and stop row (max 1 rows)");
-184
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-185  "Run random write test");
-186
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-187  "Run sequential read test");
-188
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-189  "Run sequential write test");
-190addCommandDescriptor(ScanTest.class, 
"scan",
-191  "Run scan test (read every 
row)");
-192
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-193  "Run scan test using a filter to 
find a specific row based on it's value " +
-194  "(make sure to use --rows=20)");
-195
addCommandDescriptor(IncrementTest.class, "increment",
-196  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-197
addCommandDescriptor(AppendTest.class, "append",
-198  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-199
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-200  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-201
addCommandDescriptor(CheckAndPutTest.class, 

[12/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomWriteTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomWriteTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomWriteTest.html
index 3f8844b..cdb9398 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomWriteTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomWriteTest.html
@@ -140,2712 +140,2713 @@
 132public class PerformanceEvaluation 
extends Configured implements Tool {
 133  static final String RANDOM_SEEK_SCAN = 
"randomSeekScan";
 134  static final String RANDOM_READ = 
"randomRead";
-135  private static final Logger LOG = 
LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
-136  private static final ObjectMapper 
MAPPER = new ObjectMapper();
-137  static {
-138
MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
-139  }
-140
-141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final String 
FAMILY_NAME_BASE = "info";
-143  public static final byte[] FAMILY_ZERO 
= Bytes.toBytes("info0");
-144  public static final byte[] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-145  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-146  public static final int ROW_LENGTH = 
26;
-147
-148  private static final int ONE_GB = 1024 
* 1024 * 1000;
-149  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-150  // TODO : should we make this 
configurable
-151  private static final int TAG_LENGTH = 
256;
-152  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-153  private static final MathContext CXT = 
MathContext.DECIMAL64;
-154  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-155  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-156  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-157
-158  private static MapString, 
CmdDescriptor COMMANDS = new TreeMap();
-159  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-160
-161  static {
-162
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-163"Run async random read test");
-164
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-165"Run async random write test");
-166
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-167"Run async sequential read 
test");
-168
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-169"Run async sequential write 
test");
-170
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-171"Run async scan test (read every 
row)");
-172
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-173  "Run random read test");
-174
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-175  "Run random seek and scan 100 
test");
-176
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-177  "Run random seek scan with both 
start and stop row (max 10 rows)");
-178
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-179  "Run random seek scan with both 
start and stop row (max 100 rows)");
-180
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-181  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-182
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-183  "Run random seek scan with both 
start and stop row (max 1 rows)");
-184
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-185  "Run random write test");
-186
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-187  "Run sequential read test");
-188
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-189  "Run sequential write test");
-190addCommandDescriptor(ScanTest.class, 
"scan",
-191  "Run scan test (read every 
row)");
-192
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-193  "Run scan test using a filter to 
find a specific row based on it's value " +
-194  "(make sure to use --rows=20)");
-195
addCommandDescriptor(IncrementTest.class, "increment",
-196  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-197
addCommandDescriptor(AppendTest.class, "append",
-198  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-199
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-200  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-201

hbase git commit: HBASE-20488 Fix PE command help message

2018-05-17 Thread psomogyi
Repository: hbase
Updated Branches:
  refs/heads/branch-2 34c458c12 -> 56bb1fa06


HBASE-20488 Fix PE command help message

checkstyle fix

Signed-off-by: Peter Somogyi 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/56bb1fa0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/56bb1fa0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/56bb1fa0

Branch: refs/heads/branch-2
Commit: 56bb1fa06533e4cc86a62dfb6b5c1691f7ac291f
Parents: 34c458c
Author: Xu Cang 
Authored: Wed May 16 10:30:21 2018 -0700
Committer: Peter Somogyi 
Committed: Thu May 17 11:58:23 2018 +0200

--
 .../org/apache/hadoop/hbase/PerformanceEvaluation.java | 13 +++--
 1 file changed, 7 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/56bb1fa0/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
--
diff --git 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index b37b255..99940d4 100644
--- 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -132,6 +132,7 @@ import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFacto
 public class PerformanceEvaluation extends Configured implements Tool {
   static final String RANDOM_SEEK_SCAN = "randomSeekScan";
   static final String RANDOM_READ = "randomRead";
+  static final String PE_COMMAND_SHORTNAME = "pe";
   private static final Logger LOG = 
LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
   private static final ObjectMapper MAPPER = new ObjectMapper();
   static {
@@ -2356,11 +2357,11 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   }
 
   protected void printUsage() {
-printUsage(this.getClass().getName(), null);
+printUsage(PE_COMMAND_SHORTNAME, null);
   }
 
   protected static void printUsage(final String message) {
-printUsage(PerformanceEvaluation.class.getName(), message);
+printUsage(PE_COMMAND_SHORTNAME, message);
   }
 
   protected static void printUsageAndExit(final String message, final int 
exitCode) {
@@ -2368,11 +2369,11 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 System.exit(exitCode);
   }
 
-  protected static void printUsage(final String className, final String 
message) {
+  protected static void printUsage(final String shortName, final String 
message) {
 if (message != null && message.length() > 0) {
   System.err.println(message);
 }
-System.err.println("Usage: java " + className + " \\");
+System.err.print("Usage: hbase " + shortName);
 System.err.println("   

hbase git commit: HBASE-20488 Fix PE command help message

2018-05-17 Thread psomogyi
Repository: hbase
Updated Branches:
  refs/heads/master 60bdaf784 -> cf529f18a


HBASE-20488 Fix PE command help message

checkstyle fix

Signed-off-by: Peter Somogyi 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cf529f18
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cf529f18
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cf529f18

Branch: refs/heads/master
Commit: cf529f18a9959589fa635f78df4840472526ea2c
Parents: 60bdaf7
Author: Xu Cang 
Authored: Wed May 16 10:30:21 2018 -0700
Committer: Peter Somogyi 
Committed: Thu May 17 11:56:04 2018 +0200

--
 .../org/apache/hadoop/hbase/PerformanceEvaluation.java | 13 +++--
 1 file changed, 7 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cf529f18/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
--
diff --git 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 33267e0..42acb5c 100644
--- 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -132,6 +132,7 @@ import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFacto
 public class PerformanceEvaluation extends Configured implements Tool {
   static final String RANDOM_SEEK_SCAN = "randomSeekScan";
   static final String RANDOM_READ = "randomRead";
+  static final String PE_COMMAND_SHORTNAME = "pe";
   private static final Logger LOG = 
LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
   private static final ObjectMapper MAPPER = new ObjectMapper();
   static {
@@ -2356,11 +2357,11 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   }
 
   protected void printUsage() {
-printUsage(this.getClass().getName(), null);
+printUsage(PE_COMMAND_SHORTNAME, null);
   }
 
   protected static void printUsage(final String message) {
-printUsage(PerformanceEvaluation.class.getName(), message);
+printUsage(PE_COMMAND_SHORTNAME, message);
   }
 
   protected static void printUsageAndExit(final String message, final int 
exitCode) {
@@ -2368,11 +2369,11 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 System.exit(exitCode);
   }
 
-  protected static void printUsage(final String className, final String 
message) {
+  protected static void printUsage(final String shortName, final String 
message) {
 if (message != null && message.length() > 0) {
   System.err.println(message);
 }
-System.err.println("Usage: java " + className + " \\");
+System.err.print("Usage: hbase " + shortName);
 System.err.println("   

[17/36] hbase git commit: HBASE-19857 Complete the procedure for adding a sync replication peer

2018-05-17 Thread zhangduo
HBASE-19857 Complete the procedure for adding a sync replication peer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f453292c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f453292c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f453292c

Branch: refs/heads/HBASE-19064
Commit: f453292c21f6551f3eec1196c295c5bf78dd6c65
Parents: 6063da7
Author: zhangduo 
Authored: Thu Jan 25 20:09:00 2018 +0800
Committer: zhangduo 
Committed: Thu May 17 17:09:36 2018 +0800

--
 .../hbase/replication/ReplicationPeer.java  |   9 +
 .../hbase/replication/ReplicationPeerImpl.java  |  28 +--
 .../hbase/replication/ReplicationPeers.java |   3 +-
 .../regionserver/PeerActionListener.java|  10 +-
 .../SyncReplicationPeerProvider.java|  35 +++
 .../SynchronousReplicationPeerProvider.java |  35 ---
 .../hbase/wal/SyncReplicationWALProvider.java   | 234 +++
 .../wal/SynchronousReplicationWALProvider.java  | 225 --
 .../org/apache/hadoop/hbase/wal/WALFactory.java |   8 +-
 .../TestReplicationSourceManager.java   |   3 +
 .../wal/TestSyncReplicationWALProvider.java | 153 
 .../TestSynchronousReplicationWALProvider.java  | 153 
 12 files changed, 456 insertions(+), 440 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f453292c/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
index 2da3cce..0196a9a 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
@@ -54,6 +54,15 @@ public interface ReplicationPeer {
   PeerState getPeerState();
 
   /**
+   * Returns the sync replication state of the peer by reading local cache.
+   * 
+   * If the peer is not a synchronous replication peer, a {@link 
SyncReplicationState#NONE} will be
+   * returned.
+   * @return the sync replication state
+   */
+  SyncReplicationState getSyncReplicationState();
+
+  /**
* Test whether the peer is enabled.
* @return {@code true} if enabled, otherwise {@code false}.
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/f453292c/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
index d656466..ff3f662 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
@@ -36,6 +36,8 @@ public class ReplicationPeerImpl implements ReplicationPeer {
 
   private volatile PeerState peerState;
 
+  private volatile SyncReplicationState syncReplicationState;
+
   private final List peerConfigListeners;
 
   /**
@@ -45,12 +47,13 @@ public class ReplicationPeerImpl implements ReplicationPeer 
{
* @param id string representation of this peer's identifier
* @param peerConfig configuration for the replication peer
*/
-  public ReplicationPeerImpl(Configuration conf, String id, boolean peerState,
-  ReplicationPeerConfig peerConfig) {
+  public ReplicationPeerImpl(Configuration conf, String id, 
ReplicationPeerConfig peerConfig,
+  boolean peerState, SyncReplicationState syncReplicationState) {
 this.conf = conf;
 this.id = id;
 this.peerState = peerState ? PeerState.ENABLED : PeerState.DISABLED;
 this.peerConfig = peerConfig;
+this.syncReplicationState = syncReplicationState;
 this.peerConfigListeners = new ArrayList<>();
   }
 
@@ -77,37 +80,26 @@ public class ReplicationPeerImpl implements ReplicationPeer 
{
 return peerState;
   }
 
-  /**
-   * Get the peer config object
-   * @return the ReplicationPeerConfig for this peer
-   */
+  @Override
+  public SyncReplicationState getSyncReplicationState() {
+return syncReplicationState;
+  }
+
   @Override
   public ReplicationPeerConfig getPeerConfig() {
 return peerConfig;
   }
 
-  /**
-   * Get the configuration object required to communicate with this peer
-   * @return configuration object
-   */
   @Override
   public Configuration getConfiguration() {
 return conf;
   }
 
-  /**
- 

[32/36] hbase git commit: HBASE-20425 Do not write the cluster id of the current active cluster when writing remote WAL

2018-05-17 Thread zhangduo
HBASE-20425 Do not write the cluster id of the current active cluster when 
writing remote WAL


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/728e3b59
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/728e3b59
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/728e3b59

Branch: refs/heads/HBASE-19064
Commit: 728e3b5914f56da014878f536ca3e4661e7887bd
Parents: 6c77f8a
Author: huzheng 
Authored: Mon Apr 23 17:20:55 2018 +0800
Committer: zhangduo 
Committed: Thu May 17 17:26:40 2018 +0800

--
 .../replication/TestSyncReplicationActive.java  | 32 
 1 file changed, 32 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/728e3b59/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java
index bff4572..f9020a0 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java
@@ -17,9 +17,17 @@
  */
 package org.apache.hadoop.hbase.replication;
 
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
+import org.apache.hadoop.hbase.wal.WAL.Entry;
+import org.apache.hadoop.hbase.wal.WAL.Reader;
+import org.apache.hadoop.hbase.wal.WALFactory;
+import org.junit.Assert;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -49,6 +57,9 @@ public class TestSyncReplicationActive extends 
SyncReplicationTestBase {
 // peer is disabled so no data have been replicated
 verifyNotReplicatedThroughRegion(UTIL2, 0, 100);
 
+// Ensure that there's no cluster id in remote log entries.
+verifyNoClusterIdInRemoteLog(UTIL2, PEER_ID);
+
 UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID,
   SyncReplicationState.DOWNGRADE_ACTIVE);
 // confirm that peer with state DA will reject replication request.
@@ -72,4 +83,25 @@ public class TestSyncReplicationActive extends 
SyncReplicationTestBase {
 verifyReplicationRequestRejection(UTIL2, true);
 write(UTIL2, 200, 300);
   }
+
+  private void verifyNoClusterIdInRemoteLog(HBaseTestingUtility utility, 
String peerId)
+  throws Exception {
+FileSystem fs2 = utility.getTestFileSystem();
+Path remoteDir =
+new 
Path(utility.getMiniHBaseCluster().getMaster().getMasterFileSystem().getRootDir(),
+"remoteWALs").makeQualified(fs2.getUri(), 
fs2.getWorkingDirectory());
+FileStatus[] files = fs2.listStatus(new Path(remoteDir, peerId));
+Assert.assertTrue(files.length > 0);
+for (FileStatus file : files) {
+  try (Reader reader =
+  WALFactory.createReader(fs2, file.getPath(), 
utility.getConfiguration())) {
+Entry entry = reader.next();
+Assert.assertTrue(entry != null);
+while (entry != null) {
+  Assert.assertEquals(entry.getKey().getClusterIds().size(), 0);
+  entry = reader.next();
+}
+  }
+}
+  }
 }



[06/36] hbase git commit: HBASE-20571 JMXJsonServlet generates invalid JSON if it has NaN in metrics

2018-05-17 Thread zhangduo
HBASE-20571 JMXJsonServlet generates invalid JSON if it has NaN in metrics

- CacheStats won't generate NaN metrics.
- JSONBean class will serialize special floating point values as
  "NaN", "Infinity" or "-Infinity"

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6148b478
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6148b478
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6148b478

Branch: refs/heads/HBASE-19064
Commit: 6148b4785d5fb9b1f8fbe40e5c4293950ec03012
Parents: 8c9825a
Author: Balazs Meszaros 
Authored: Fri May 11 16:30:38 2018 +0200
Committer: Andrew Purtell 
Committed: Wed May 16 12:20:39 2018 -0700

--
 .../org/apache/hadoop/hbase/util/JSONBean.java  |  6 ++-
 .../hadoop/hbase/io/hfile/CacheStats.java   | 40 +---
 2 files changed, 40 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6148b478/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java
index 80ffa27..da89a41 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java
@@ -310,7 +310,11 @@ public class JSONBean {
 jg.writeEndArray();
   } else if(value instanceof Number) {
 Number n = (Number)value;
-jg.writeNumber(n.toString());
+if (Double.isFinite(n.doubleValue())) {
+  jg.writeNumber(n.toString());
+} else {
+  jg.writeString(n.toString());
+}
   } else if(value instanceof Boolean) {
 Boolean b = (Boolean)value;
 jg.writeBoolean(b);

http://git-wip-us.apache.org/repos/asf/hbase/blob/6148b478/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java
index 5edd259..c1c92e1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java
@@ -388,23 +388,53 @@ public class CacheStats {
   }
 
   public double getHitRatio() {
-return ((double) getHitCount() / (double) getRequestCount());
+double requestCount = getRequestCount();
+
+if (requestCount == 0) {
+  return 0;
+}
+
+return getHitCount() / requestCount;
   }
 
   public double getHitCachingRatio() {
-return ((double) getHitCachingCount() / (double) getRequestCachingCount());
+double requestCachingCount = getRequestCachingCount();
+
+if (requestCachingCount == 0) {
+  return 0;
+}
+
+return getHitCachingCount() / requestCachingCount;
   }
 
   public double getMissRatio() {
-return ((double) getMissCount() / (double) getRequestCount());
+double requestCount = getRequestCount();
+
+if (requestCount == 0) {
+  return 0;
+}
+
+return getMissCount() / requestCount;
   }
 
   public double getMissCachingRatio() {
-return ((double) getMissCachingCount() / (double) 
getRequestCachingCount());
+double requestCachingCount = getRequestCachingCount();
+
+if (requestCachingCount == 0) {
+  return 0;
+}
+
+return getMissCachingCount() / requestCachingCount;
   }
 
   public double evictedPerEviction() {
-return ((double) getEvictedCount() / (double) getEvictionCount());
+double evictionCount = getEvictionCount();
+
+if (evictionCount == 0) {
+  return 0;
+}
+
+return getEvictedCount() / evictionCount;
   }
 
   public long getFailedInserts() {



[23/36] hbase git commit: HBASE-19973 Implement a procedure to replay sync replication wal for standby cluster

2018-05-17 Thread zhangduo
HBASE-19973 Implement a procedure to replay sync replication wal for standby 
cluster


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/564bdf9f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/564bdf9f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/564bdf9f

Branch: refs/heads/HBASE-19064
Commit: 564bdf9f9af13e18ff74fe412ba71638b501d5f3
Parents: 99ad4e6
Author: Guanghao Zhang 
Authored: Fri Mar 2 18:43:25 2018 +0800
Committer: zhangduo 
Committed: Thu May 17 17:25:57 2018 +0800

--
 .../src/main/protobuf/MasterProcedure.proto |  22 +++
 .../apache/hadoop/hbase/executor/EventType.java |   9 +-
 .../hadoop/hbase/executor/ExecutorType.java |   3 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |   9 +
 .../hadoop/hbase/master/MasterServices.java |   6 +
 .../procedure/PeerProcedureInterface.java   |   3 +-
 .../hbase/master/procedure/PeerQueue.java   |   3 +-
 .../replication/RecoverStandbyProcedure.java| 114 +++
 .../ReplaySyncReplicationWALManager.java| 139 +
 .../ReplaySyncReplicationWALProcedure.java  | 193 +++
 .../hbase/regionserver/HRegionServer.java   |   9 +-
 .../ReplaySyncReplicationWALCallable.java   | 149 ++
 .../SyncReplicationPeerInfoProviderImpl.java|   3 +
 .../org/apache/hadoop/hbase/util/FSUtils.java   |   5 +
 .../hbase/master/MockNoopMasterServices.java|   8 +-
 .../master/TestRecoverStandbyProcedure.java | 186 ++
 16 files changed, 854 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/564bdf9f/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index e8b940e..01e4dae 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -459,3 +459,25 @@ message TransitPeerSyncReplicationStateStateData {
   optional SyncReplicationState fromState = 1;
   required SyncReplicationState toState = 2;
 }
+
+enum RecoverStandbyState {
+  RENAME_SYNC_REPLICATION_WALS_DIR = 1;
+  INIT_WORKERS = 2;
+  DISPATCH_TASKS = 3;
+  REMOVE_SYNC_REPLICATION_WALS_DIR = 4;
+}
+
+message RecoverStandbyStateData {
+  required string peer_id = 1;
+}
+
+message ReplaySyncReplicationWALStateData {
+  required string peer_id = 1;
+  required string wal = 2;
+  optional ServerName target_server = 3;
+}
+
+message ReplaySyncReplicationWALParameter {
+  required string peer_id = 1;
+  required string wal = 2;
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/564bdf9f/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java
index 922deb8..ad38d1c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java
@@ -281,7 +281,14 @@ public enum EventType {
*
* RS_REFRESH_PEER
*/
-  RS_REFRESH_PEER (84, ExecutorType.RS_REFRESH_PEER);
+  RS_REFRESH_PEER(84, ExecutorType.RS_REFRESH_PEER),
+
+  /**
+   * RS replay sync replication wal.
+   *
+   * RS_REPLAY_SYNC_REPLICATION_WAL
+   */
+  RS_REPLAY_SYNC_REPLICATION_WAL(85, 
ExecutorType.RS_REPLAY_SYNC_REPLICATION_WAL);
 
   private final int code;
   private final ExecutorType executor;

http://git-wip-us.apache.org/repos/asf/hbase/blob/564bdf9f/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java
index 7f130d1..ea97354 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java
@@ -47,7 +47,8 @@ public enum ExecutorType {
   RS_REGION_REPLICA_FLUSH_OPS  (28),
   RS_COMPACTED_FILES_DISCHARGER (29),
   RS_OPEN_PRIORITY_REGION(30),
-  RS_REFRESH_PEER   (31);
+  RS_REFRESH_PEER(31),
+  RS_REPLAY_SYNC_REPLICATION_WAL(32);
 
   ExecutorType(int value) {
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/564bdf9f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java

[01/36] hbase git commit: HBASE-20520 Failed effort upping default HDFS blocksize, hbase.regionserver.hlog.blocksize [Forced Update!]

2018-05-17 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/HBASE-19064 f60a710ef -> bcee6f8fc (forced update)


HBASE-20520 Failed effort upping default HDFS blocksize, 
hbase.regionserver.hlog.blocksize


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/060b8aca
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/060b8aca
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/060b8aca

Branch: refs/heads/HBASE-19064
Commit: 060b8aca8601d323c1a623defdac9c04b704c136
Parents: ab53329
Author: Michael Stack 
Authored: Wed May 2 11:30:03 2018 -0700
Committer: Michael Stack 
Committed: Wed May 16 09:19:24 2018 -0700

--
 .../hbase/regionserver/wal/AbstractFSWAL.java   |  12 ++-
 .../wal/AbstractProtobufLogWriter.java  |   8 +-
 .../hbase/regionserver/wal/AsyncFSWAL.java  |   4 +-
 .../hadoop/hbase/regionserver/wal/FSHLog.java   |   2 +-
 .../hadoop/hbase/regionserver/wal/WALUtil.java  |  16 +++
 .../hadoop/hbase/wal/AsyncFSWALProvider.java|  21 +++-
 .../apache/hadoop/hbase/wal/FSHLogProvider.java |  21 +++-
 .../org/apache/hadoop/hbase/wal/WALFactory.java |  11 +-
 .../regionserver/wal/AbstractTestWALReplay.java |   2 +-
 .../regionserver/wal/TestWALConfiguration.java  | 100 +++
 .../apache/hadoop/hbase/wal/IOTestProvider.java |   9 +-
 src/main/asciidoc/_chapters/upgrading.adoc  |   2 +-
 12 files changed, 175 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/060b8aca/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index ce8dafa..825ad17 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -178,6 +178,11 @@ public abstract class AbstractFSWAL 
implements WAL {
   // If > than this size, roll the log.
   protected final long logrollsize;
 
+  /**
+   * Block size to use writing files.
+   */
+  protected final long blocksize;
+
   /*
* If more than this many logs, force flush of oldest region to oldest edit 
goes to disk. If too
* many and we crash, then will take forever replaying. Keep the number of 
logs tidy.
@@ -405,10 +410,9 @@ public abstract class AbstractFSWAL 
implements WAL {
 // size as those made in hbase-1 (to prevent surprise), we now have 
default block size as
 // 2 times the DFS default: i.e. 2 * DFS default block size rolling at 50% 
full will generally
 // make similar size logs to 1 * DFS default block size rolling at 95% 
full. See HBASE-19148.
-final long blocksize = 
this.conf.getLong("hbase.regionserver.hlog.blocksize",
-  CommonFSUtils.getDefaultBlockSize(this.fs, this.walDir) * 2);
-this.logrollsize =
-  (long) (blocksize * 
conf.getFloat("hbase.regionserver.logroll.multiplier", 0.5f));
+this.blocksize = WALUtil.getWALBlockSize(this.conf, this.fs, this.walDir);
+float multiplier = conf.getFloat("hbase.regionserver.logroll.multiplier", 
0.5f);
+this.logrollsize = (long)(this.blocksize * multiplier);
 
 boolean maxLogsDefined = conf.get("hbase.regionserver.maxlogs") != null;
 if (maxLogsDefined) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/060b8aca/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java
index 475b890..50ac101 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java
@@ -153,18 +153,16 @@ public abstract class AbstractProtobufLogWriter {
 return doCompress;
   }
 
-  public void init(FileSystem fs, Path path, Configuration conf, boolean 
overwritable)
-  throws IOException, StreamLacksCapabilityException {
+  public void init(FileSystem fs, Path path, Configuration conf, boolean 
overwritable,
+  long blocksize) throws IOException, StreamLacksCapabilityException {
 this.conf = conf;
 boolean doCompress = initializeCompressionContext(conf, path);
 this.trailerWarnSize = conf.getInt(WAL_TRAILER_WARN_SIZE, 
DEFAULT_WAL_TRAILER_WARN_SIZE);
 int bufferSize = 

[07/36] hbase git commit: HBASE-20564 Tighter ByteBufferKeyValue Cell Comparator; ADDENDUM2 Add a Test

2018-05-17 Thread zhangduo
HBASE-20564 Tighter ByteBufferKeyValue Cell Comparator; ADDENDUM2 Add a Test

Run meta tests but using our new basis, the ByteBufferKeyValue
instead of the old byte array-backed KeyValue so we catch any
oddness in the Type processing or in the Comparator.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f4006b50
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f4006b50
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f4006b50

Branch: refs/heads/HBASE-19064
Commit: f4006b5039170ca1ac8adb63abbc84e2e9ee
Parents: 6148b47
Author: Michael Stack 
Authored: Wed May 16 11:01:21 2018 -0700
Committer: Michael Stack 
Committed: Wed May 16 12:56:08 2018 -0700

--
 .../apache/hadoop/hbase/TestCellComparator.java | 119 +++
 1 file changed, 119 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f4006b50/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java
--
diff --git 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java
index 8652d82..a318515 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java
@@ -21,6 +21,10 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 import java.nio.ByteBuffer;
+import java.util.Collections;
+import java.util.Set;
+import java.util.TreeSet;
+
 import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
@@ -128,4 +132,119 @@ public class TestCellComparator {
 assertEquals(0, comparator.compareRows(bbCell2, bbCell3));
 assertTrue(comparator.compareRows(bbCell1, bbCell2) < 0);
   }
+
+  /**
+   * Test meta comparisons using our new ByteBufferKeyValue Cell type, the 
type we use everywhere
+   * in 2.0.
+   */
+  @Test
+  public void testMetaComparisons() throws Exception {
+long now = System.currentTimeMillis();
+
+// Meta compares
+Cell aaa = createByteBufferKeyValueFromKeyValue(new KeyValue(
+Bytes.toBytes("TestScanMultipleVersions,row_0500,1236020145502"), 
now));
+Cell bbb = createByteBufferKeyValueFromKeyValue(new KeyValue(
+Bytes.toBytes("TestScanMultipleVersions,,99"), now));
+CellComparator c = CellComparatorImpl.META_COMPARATOR;
+assertTrue(c.compare(bbb, aaa) < 0);
+
+Cell ccc = createByteBufferKeyValueFromKeyValue(
+new KeyValue(Bytes.toBytes("TestScanMultipleVersions,,1236023996656"),
+Bytes.toBytes("info"), Bytes.toBytes("regioninfo"), 1236024396271L,
+(byte[])null));
+assertTrue(c.compare(ccc, bbb) < 0);
+
+Cell x = createByteBufferKeyValueFromKeyValue(
+new 
KeyValue(Bytes.toBytes("TestScanMultipleVersions,row_0500,1236034574162"),
+Bytes.toBytes("info"), Bytes.toBytes(""), 9223372036854775807L,
+(byte[])null));
+Cell y = createByteBufferKeyValueFromKeyValue(
+new 
KeyValue(Bytes.toBytes("TestScanMultipleVersions,row_0500,1236034574162"),
+Bytes.toBytes("info"), Bytes.toBytes("regioninfo"), 1236034574912L,
+(byte[])null));
+assertTrue(c.compare(x, y) < 0);
+  }
+
+  private static Cell createByteBufferKeyValueFromKeyValue(KeyValue kv) {
+ByteBuffer bb = ByteBuffer.wrap(kv.getBuffer());
+return new ByteBufferKeyValue(bb, 0, bb.remaining());
+  }
+
+  /**
+   * More tests using ByteBufferKeyValue copied over from TestKeyValue which 
uses old KVs only.
+   */
+  @Test
+  public void testMetaComparisons2() {
+long now = System.currentTimeMillis();
+CellComparator c = CellComparatorImpl.META_COMPARATOR;
+assertTrue(c.compare(createByteBufferKeyValueFromKeyValue(new KeyValue(
+
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,1"), now)),
+createByteBufferKeyValueFromKeyValue(new KeyValue(
+
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,1"), now))) == 
0);
+Cell a = createByteBufferKeyValueFromKeyValue(new KeyValue(
+Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,1"), 
now));
+Cell b = createByteBufferKeyValueFromKeyValue(new KeyValue(
+Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,2"), 
now));
+assertTrue(c.compare(a, b) < 0);
+assertTrue(c.compare(createByteBufferKeyValueFromKeyValue(new KeyValue(
+
Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,2"), now)),
+createByteBufferKeyValueFromKeyValue(new 

[28/36] hbase git commit: HBASE-20456 Support removing a ReplicationSourceShipper for a special wal group

2018-05-17 Thread zhangduo
HBASE-20456 Support removing a ReplicationSourceShipper for a special wal group


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dea04943
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dea04943
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dea04943

Branch: refs/heads/HBASE-19064
Commit: dea04943440a8af428035c1cd20a66345ae46a88
Parents: 728e3b5
Author: zhangduo 
Authored: Tue Apr 24 22:01:21 2018 +0800
Committer: zhangduo 
Committed: Thu May 17 17:26:40 2018 +0800

--
 .../hbase/regionserver/wal/AsyncFSWAL.java  |  1 +
 .../RecoveredReplicationSource.java | 13 +---
 .../RecoveredReplicationSourceShipper.java  |  7 --
 .../regionserver/ReplicationSource.java | 13 +++-
 .../regionserver/ReplicationSourceManager.java  | 19 -
 .../regionserver/ReplicationSourceShipper.java  | 20 +++--
 .../ReplicationSourceWALReader.java |  9 ++-
 .../regionserver/WALEntryStream.java|  3 +-
 .../hadoop/hbase/wal/AbstractFSWALProvider.java | 28 ---
 .../hbase/wal/SyncReplicationWALProvider.java   | 10 ++-
 .../TestReplicationSourceManager.java   |  5 +-
 .../TestSyncReplicationShipperQuit.java | 81 
 .../regionserver/TestWALEntryStream.java|  4 +-
 13 files changed, 163 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/dea04943/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
index d98ab75..9b4ce9c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
@@ -682,6 +682,7 @@ public class AsyncFSWAL extends AbstractFSWAL {
   protected void doShutdown() throws IOException {
 waitForSafePoint();
 closeWriter(this.writer);
+this.writer = null;
 closeExecutor.shutdown();
 try {
   if (!closeExecutor.awaitTermination(waitOnShutdownInSeconds, 
TimeUnit.SECONDS)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/dea04943/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
index a21ca44..f1bb538 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.replication.ReplicationPeer;
 import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -144,15 +143,9 @@ public class RecoveredReplicationSource extends 
ReplicationSource {
   }
 
   void tryFinish() {
-// use synchronize to make sure one last thread will clean the queue
-synchronized (workerThreads) {
-  Threads.sleep(100);// wait a short while for other worker thread to 
fully exit
-  boolean allTasksDone = workerThreads.values().stream().allMatch(w -> 
w.isFinished());
-  if (allTasksDone) {
-this.getSourceMetrics().clear();
-manager.removeRecoveredSource(this);
-LOG.info("Finished recovering queue {} with the following stats: {}", 
queueId, getStats());
-  }
+if (workerThreads.isEmpty()) {
+  this.getSourceMetrics().clear();
+  manager.finishRecoveredSource(this);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/dea04943/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java
index 91109cf..b0d4db0 100644
--- 

[14/36] hbase git commit: HBASE-19781 Add a new cluster state flag for synchronous replication

2018-05-17 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/6063da73/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
index 8911982..f5eca39 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
@@ -28,6 +28,7 @@ import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerStorage;
 import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
@@ -67,9 +68,9 @@ public class TestHBaseFsckReplication {
 String peerId1 = "1";
 String peerId2 = "2";
 peerStorage.addPeer(peerId1, 
ReplicationPeerConfig.newBuilder().setClusterKey("key").build(),
-  true);
+  true, SyncReplicationState.NONE);
 peerStorage.addPeer(peerId2, 
ReplicationPeerConfig.newBuilder().setClusterKey("key").build(),
-  true);
+  true, SyncReplicationState.NONE);
 for (int i = 0; i < 10; i++) {
   queueStorage.addWAL(ServerName.valueOf("localhost", 1 + i, 10 + 
i), peerId1,
 "file-" + i);

http://git-wip-us.apache.org/repos/asf/hbase/blob/6063da73/hbase-shell/src/main/ruby/hbase/replication_admin.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/replication_admin.rb 
b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
index d1f1344..5f86365 100644
--- a/hbase-shell/src/main/ruby/hbase/replication_admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
@@ -20,6 +20,7 @@
 include Java
 
 java_import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil
+java_import org.apache.hadoop.hbase.replication.SyncReplicationState
 java_import org.apache.hadoop.hbase.replication.ReplicationPeerConfig
 java_import org.apache.hadoop.hbase.util.Bytes
 java_import org.apache.hadoop.hbase.zookeeper.ZKConfig
@@ -338,6 +339,20 @@ module Hbase
   '!' + ReplicationPeerConfigUtil.convertToString(tableCFs)
 end
 
+# Transit current cluster to a new state in the specified synchronous
+# replication peer
+def transit_peer_sync_replication_state(id, state)
+  if 'ACTIVE'.eql?(state)
+@admin.transitReplicationPeerSyncReplicationState(id, 
SyncReplicationState::ACTIVE)
+  elsif 'DOWNGRADE_ACTIVE'.eql?(state)
+@admin.transitReplicationPeerSyncReplicationState(id, 
SyncReplicationState::DOWNGRADE_ACTIVE)
+  elsif 'STANDBY'.eql?(state)
+@admin.transitReplicationPeerSyncReplicationState(id, 
SyncReplicationState::STANDBY)
+  else
+raise(ArgumentError, 'synchronous replication state must be ACTIVE, 
DOWNGRADE_ACTIVE or STANDBY')
+  end
+end
+
 
#--
 # Enables a table's replication switch
 def enable_tablerep(table_name)

http://git-wip-us.apache.org/repos/asf/hbase/blob/6063da73/hbase-shell/src/main/ruby/shell.rb
--
diff --git a/hbase-shell/src/main/ruby/shell.rb 
b/hbase-shell/src/main/ruby/shell.rb
index 9a79658..934fa11 100644
--- a/hbase-shell/src/main/ruby/shell.rb
+++ b/hbase-shell/src/main/ruby/shell.rb
@@ -393,6 +393,7 @@ Shell.load_command_group(
 get_peer_config
 list_peer_configs
 update_peer_config
+transit_peer_sync_replication_state
   ]
 )
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/6063da73/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_peers.rb 
b/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
index f3ab749..f2ec014 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
@@ -39,8 +39,8 @@ EOF
 peers = replication_admin.list_peers
 
 formatter.header(%w[PEER_ID CLUSTER_KEY ENDPOINT_CLASSNAME
-REMOTE_ROOT_DIR STATE REPLICATE_ALL 
-NAMESPACES TABLE_CFS BANDWIDTH
+REMOTE_ROOT_DIR SYNC_REPLICATION_STATE STATE
+REPLICATE_ALL NAMESPACES TABLE_CFS BANDWIDTH

[04/36] hbase git commit: HBASE-20593 HBase website landing page should link to HBaseCon Asia 2018

2018-05-17 Thread zhangduo
HBASE-20593 HBase website landing page should link to HBaseCon Asia 2018

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2c32272d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2c32272d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2c32272d

Branch: refs/heads/HBASE-19064
Commit: 2c32272dfa40dbf574343901c2ddea9319ca0bd5
Parents: 6d656b7
Author: Sean Busbey 
Authored: Wed May 16 11:13:01 2018 -0500
Committer: Sean Busbey 
Committed: Wed May 16 13:29:15 2018 -0500

--
 src/site/xdoc/index.xml | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2c32272d/src/site/xdoc/index.xml
--
diff --git a/src/site/xdoc/index.xml b/src/site/xdoc/index.xml
index d87ec55..0b54cc0 100644
--- a/src/site/xdoc/index.xml
+++ b/src/site/xdoc/index.xml
@@ -83,7 +83,8 @@ Apache HBase is an open-source, distributed, versioned, 
non-relational database
 
 
  
-   June 18th, 2018 https://hbase.apache.org/hbasecon-2018;>HBaseCon 2018 @ San Jose 
Convention Center, San Jose, CA, USA. CFP open, see site for details!
+   August 17th, 2018 https://hbase.apache.org/hbaseconasia-2018/;>HBaseCon Asia 2018 @ 
Gehua New Century Hotel, Beijing, China. CFP open, see site for details!
+   June 18th, 2018 https://hbase.apache.org/hbasecon-2018;>HBaseCon North America West 
2018 @ San Jose Convention Center, San Jose, CA, USA. registration still 
open, see site for details!
August 4th, 2017 https://easychair.org/cfp/HBaseConAsia2017;>HBaseCon Asia 2017 @ the 
Huawei Campus in Shenzhen, China
June 12th, 2017 https://easychair.org/cfp/hbasecon2017;>HBaseCon2017 at the 
Crittenden Buildings on the Google Mountain View Campus
April 25th, 2017 https://www.meetup.com/hbaseusergroup/events/239291716/;>Meetup @ 
Visa in Palo Alto



[31/36] hbase git commit: HBASE-20458 Support removing a WAL from LogRoller

2018-05-17 Thread zhangduo
HBASE-20458 Support removing a WAL from LogRoller


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b50945e3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b50945e3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b50945e3

Branch: refs/heads/HBASE-19064
Commit: b50945e3c124437e528641256e596990d221ce07
Parents: 386e40d
Author: Guanghao Zhang 
Authored: Mon Apr 23 16:31:54 2018 +0800
Committer: zhangduo 
Committed: Thu May 17 17:26:40 2018 +0800

--
 .../hadoop/hbase/regionserver/LogRoller.java| 29 +--
 .../hbase/regionserver/wal/AbstractFSWAL.java   |  7 +-
 .../regionserver/wal/WALClosedException.java| 47 ++
 .../hbase/regionserver/TestLogRoller.java   | 90 
 .../regionserver/wal/AbstractTestFSWAL.java |  9 ++
 5 files changed, 171 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b50945e3/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
index 55c5219..ab0083f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hbase.regionserver;
 
 import java.io.Closeable;
 import java.io.IOException;
+import java.util.Iterator;
+import java.util.Map;
 import java.util.Map.Entry;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -30,6 +32,7 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL;
 import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
 import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
+import org.apache.hadoop.hbase.regionserver.wal.WALClosedException;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.HasThread;
 import org.apache.hadoop.hbase.wal.WAL;
@@ -177,17 +180,24 @@ public class LogRoller extends HasThread implements 
Closeable {
   rollLock.lock(); // FindBugs UL_UNRELEASED_LOCK_EXCEPTION_PATH
   try {
 this.lastrolltime = now;
-for (Entry entry : walNeedsRoll.entrySet()) {
+for (Iterator> iter = 
walNeedsRoll.entrySet().iterator(); iter
+.hasNext();) {
+  Entry entry = iter.next();
   final WAL wal = entry.getKey();
   // Force the roll if the logroll.period is elapsed or if a roll was 
requested.
   // The returned value is an array of actual region names.
-  final byte [][] regionsToFlush = wal.rollWriter(periodic ||
-  entry.getValue().booleanValue());
-  walNeedsRoll.put(wal, Boolean.FALSE);
-  if (regionsToFlush != null) {
-for (byte[] r : regionsToFlush) {
-  scheduleFlush(r);
+  try {
+final byte[][] regionsToFlush =
+wal.rollWriter(periodic || entry.getValue().booleanValue());
+walNeedsRoll.put(wal, Boolean.FALSE);
+if (regionsToFlush != null) {
+  for (byte[] r : regionsToFlush) {
+scheduleFlush(r);
+  }
 }
+  } catch (WALClosedException e) {
+LOG.warn("WAL has been closed. Skipping rolling of writer and just 
remove it", e);
+iter.remove();
   }
 }
   } catch (FailedLogCloseException e) {
@@ -252,4 +262,9 @@ public class LogRoller extends HasThread implements 
Closeable {
 running = false;
 interrupt();
   }
+
+  @VisibleForTesting
+  Map getWalNeedsRoll() {
+return this.walNeedsRoll;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b50945e3/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index 4255086..72ad8b8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -754,15 +754,14 @@ public abstract class AbstractFSWAL 
implements WAL {
   public byte[][] rollWriter(boolean force) throws FailedLogCloseException, 
IOException 

[27/36] hbase git commit: HBASE-20163 Forbid major compaction when standby cluster replay the remote wals

2018-05-17 Thread zhangduo
HBASE-20163 Forbid major compaction when standby cluster replay the remote wals


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/02b22dfc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/02b22dfc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/02b22dfc

Branch: refs/heads/HBASE-19064
Commit: 02b22dfc5bf777823d3a6358321724c640a0df9d
Parents: 051197f
Author: Guanghao Zhang 
Authored: Thu Apr 12 14:44:25 2018 +0800
Committer: zhangduo 
Committed: Thu May 17 17:26:39 2018 +0800

--
 .../hadoop/hbase/regionserver/HRegion.java  | 18 
 .../hbase/regionserver/HRegionServer.java   |  2 +-
 .../regionserver/RegionServerServices.java  |  5 +++
 .../ForbidMajorCompactionChecker.java   | 44 
 .../hadoop/hbase/MockRegionServerServices.java  |  6 +++
 .../hadoop/hbase/master/MockRegionServer.java   |  6 +++
 6 files changed, 80 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/02b22dfc/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index d86565e..6aa4b27 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -144,6 +144,7 @@ import 
org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
 import org.apache.hadoop.hbase.regionserver.ScannerContext.NextState;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
 import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
+import 
org.apache.hadoop.hbase.regionserver.compactions.ForbidMajorCompactionChecker;
 import 
org.apache.hadoop.hbase.regionserver.throttle.CompactionThroughputControllerFactory;
 import 
org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController;
 import org.apache.hadoop.hbase.regionserver.throttle.StoreHotnessProtector;
@@ -1980,6 +1981,14 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 return compact(compaction, store, throughputController, null);
   }
 
+  private boolean shouldForbidMajorCompaction() {
+if (rsServices != null && rsServices.getReplicationSourceService() != 
null) {
+  return 
rsServices.getReplicationSourceService().getSyncReplicationPeerInfoProvider()
+  .checkState(getRegionInfo(), ForbidMajorCompactionChecker.get());
+}
+return false;
+  }
+
   public boolean compact(CompactionContext compaction, HStore store,
   ThroughputController throughputController, User user) throws IOException 
{
 assert compaction != null && compaction.hasSelection();
@@ -1989,6 +1998,15 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   store.cancelRequestedCompaction(compaction);
   return false;
 }
+
+if (compaction.getRequest().isAllFiles() && shouldForbidMajorCompaction()) 
{
+  LOG.warn("Skipping major compaction on " + this
+  + " because this cluster is transiting sync replication state"
+  + " from STANDBY to DOWNGRADE_ACTIVE");
+  store.cancelRequestedCompaction(compaction);
+  return false;
+}
+
 MonitoredTask status = null;
 boolean requestNeedsCancellation = true;
 /*

http://git-wip-us.apache.org/repos/asf/hbase/blob/02b22dfc/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index af2f3b5..440a838 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -2472,7 +2472,7 @@ public class HRegionServer extends HasThread implements
* @return Return the object that implements the replication
* source executorService.
*/
-  @VisibleForTesting
+  @Override
   public ReplicationSourceService getReplicationSourceService() {
 return replicationSourceHandler;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/02b22dfc/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
--
diff --git 

[29/36] hbase git commit: HBASE-20426 Give up replicating anything in S state

2018-05-17 Thread zhangduo
HBASE-20426 Give up replicating anything in S state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f112664b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f112664b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f112664b

Branch: refs/heads/HBASE-19064
Commit: f112664b13718e8b302c27af27d115f3375348a7
Parents: 5f3cc3c
Author: zhangduo 
Authored: Thu May 3 15:51:35 2018 +0800
Committer: zhangduo 
Committed: Thu May 17 17:26:40 2018 +0800

--
 .../src/main/protobuf/MasterProcedure.proto |  13 +-
 .../replication/AbstractPeerProcedure.java  |   4 +
 .../master/replication/ModifyPeerProcedure.java |   6 -
 .../replication/ReplicationPeerManager.java |  13 +-
 ...ransitPeerSyncReplicationStateProcedure.java |  94 +++
 .../hadoop/hbase/regionserver/LogRoller.java|  11 +-
 .../regionserver/PeerProcedureHandlerImpl.java  |  63 --
 .../regionserver/ReplicationSource.java |   1 +
 .../regionserver/ReplicationSourceManager.java  | 118 ---
 .../TestDrainReplicationQueuesForStandBy.java   | 118 +++
 10 files changed, 379 insertions(+), 62 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f112664b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index 01e4dae..f15cb04 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -394,11 +394,14 @@ enum PeerSyncReplicationStateTransitionState {
   SET_PEER_NEW_SYNC_REPLICATION_STATE = 2;
   REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_BEGIN = 3;
   REPLAY_REMOTE_WAL_IN_PEER = 4;
-  REOPEN_ALL_REGIONS_IN_PEER = 5;
-  TRANSIT_PEER_NEW_SYNC_REPLICATION_STATE = 6;
-  REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_END = 7;
-  CREATE_DIR_FOR_REMOTE_WAL = 8;
-  POST_PEER_SYNC_REPLICATION_STATE_TRANSITION = 9;
+  REMOVE_ALL_REPLICATION_QUEUES_IN_PEER = 5;
+  REOPEN_ALL_REGIONS_IN_PEER = 6;
+  TRANSIT_PEER_NEW_SYNC_REPLICATION_STATE = 7;
+  REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_END = 8;
+  SYNC_REPLICATION_SET_PEER_ENABLED = 9;
+  SYNC_REPLICATION_ENABLE_PEER_REFRESH_PEER_ON_RS = 10;
+  CREATE_DIR_FOR_REMOTE_WAL = 11;
+  POST_PEER_SYNC_REPLICATION_STATE_TRANSITION = 12;
 }
 
 message PeerModificationStateData {

http://git-wip-us.apache.org/repos/asf/hbase/blob/f112664b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
index 6679d78..458e073 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
@@ -106,4 +106,8 @@ public abstract class AbstractPeerProcedure
 throw new UnsupportedOperationException();
   }
 
+  protected final void refreshPeer(MasterProcedureEnv env, PeerOperationType 
type) {
+
addChildProcedure(env.getMasterServices().getServerManager().getOnlineServersList().stream()
+  .map(sn -> new RefreshPeerProcedure(peerId, type, 
sn)).toArray(RefreshPeerProcedure[]::new));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f112664b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
index 32b8ea1..56462ca 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
@@ -108,12 +108,6 @@ public abstract class ModifyPeerProcedure extends 
AbstractPeerProcedure new RefreshPeerProcedure(peerId, type, sn))
-  .toArray(RefreshPeerProcedure[]::new));
-  }
-
   protected ReplicationPeerConfig getOldPeerConfig() {
 return null;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f112664b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java

[15/36] hbase git commit: HBASE-19781 Add a new cluster state flag for synchronous replication

2018-05-17 Thread zhangduo
HBASE-19781 Add a new cluster state flag for synchronous replication


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6063da73
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6063da73
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6063da73

Branch: refs/heads/HBASE-19064
Commit: 6063da732ef426d80cebaf852ef4ab18fb82ac83
Parents: 61d9436
Author: Guanghao Zhang 
Authored: Mon Jan 22 11:44:49 2018 +0800
Committer: zhangduo 
Committed: Thu May 17 17:09:36 2018 +0800

--
 .../org/apache/hadoop/hbase/client/Admin.java   |  39 +
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |  31 
 .../hadoop/hbase/client/AsyncHBaseAdmin.java|   7 +
 .../hbase/client/ConnectionImplementation.java  |   9 ++
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |  26 +++
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |  15 ++
 .../client/ShortCircuitMasterConnection.java|   9 ++
 .../replication/ReplicationPeerConfigUtil.java  |  26 +--
 .../replication/ReplicationPeerDescription.java |  10 +-
 .../hbase/replication/SyncReplicationState.java |  48 ++
 .../hbase/shaded/protobuf/RequestConverter.java |  10 ++
 .../src/main/protobuf/Master.proto  |   4 +
 .../src/main/protobuf/MasterProcedure.proto |   4 +
 .../src/main/protobuf/Replication.proto |  20 +++
 .../replication/ReplicationPeerStorage.java |  18 ++-
 .../hbase/replication/ReplicationUtils.java |   1 +
 .../replication/ZKReplicationPeerStorage.java   |  61 +--
 .../replication/TestReplicationStateBasic.java  |  23 ++-
 .../TestZKReplicationPeerStorage.java   |  12 +-
 .../hbase/coprocessor/MasterObserver.java   |  23 +++
 .../org/apache/hadoop/hbase/master/HMaster.java |  12 ++
 .../hbase/master/MasterCoprocessorHost.java |  21 +++
 .../hadoop/hbase/master/MasterRpcServices.java  |  17 ++
 .../hadoop/hbase/master/MasterServices.java |   9 ++
 .../procedure/PeerProcedureInterface.java   |   2 +-
 .../replication/ReplicationPeerManager.java |  51 +-
 ...ransitPeerSyncReplicationStateProcedure.java | 159 +++
 .../hbase/security/access/AccessController.java |   8 +
 .../replication/TestReplicationAdmin.java   |  62 
 .../hbase/master/MockNoopMasterServices.java|   8 +-
 .../cleaner/TestReplicationHFileCleaner.java|   4 +-
 .../TestReplicationTrackerZKImpl.java   |   6 +-
 .../TestReplicationSourceManager.java   |   3 +-
 .../security/access/TestAccessController.java   |  16 ++
 .../hbase/util/TestHBaseFsckReplication.java|   5 +-
 .../src/main/ruby/hbase/replication_admin.rb|  15 ++
 hbase-shell/src/main/ruby/shell.rb  |   1 +
 .../src/main/ruby/shell/commands/list_peers.rb  |   6 +-
 .../transit_peer_sync_replication_state.rb  |  44 +
 .../test/ruby/hbase/replication_admin_test.rb   |  24 +++
 40 files changed, 816 insertions(+), 53 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6063da73/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 331f2d1..39542e4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -51,6 +51,7 @@ import 
org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
 import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
 import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
@@ -2657,6 +2658,44 @@ public interface Admin extends Abortable, Closeable {
   List listReplicationPeers(Pattern pattern) 
throws IOException;
 
   /**
+   * Transit current cluster to a new state in a synchronous replication peer.
+   * @param peerId a short name that identifies the peer
+   * @param state a new state of current cluster
+   * @throws IOException if a remote or network exception occurs
+   */
+  void transitReplicationPeerSyncReplicationState(String peerId, 
SyncReplicationState state)
+  throws IOException;
+
+  /**
+   * Transit current cluster to a new state in a synchronous replication peer. 
But does not block
+   * and wait for it.
+   * 
+   * You can use Future.get(long, TimeUnit) to wait on the 

[10/36] hbase git commit: HBASE-19943 Only allow removing sync replication peer which is in DA state

2018-05-17 Thread zhangduo
HBASE-19943 Only allow removing sync replication peer which is in DA state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/99ad4e6b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/99ad4e6b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/99ad4e6b

Branch: refs/heads/HBASE-19064
Commit: 99ad4e6b869ab5f0060d20aa0c71b24d90dacdfd
Parents: bb87b0f
Author: huzheng 
Authored: Thu Mar 1 18:34:02 2018 +0800
Committer: zhangduo 
Committed: Thu May 17 17:09:36 2018 +0800

--
 .../replication/ReplicationPeerManager.java | 14 -
 .../hbase/wal/SyncReplicationWALProvider.java   |  2 +-
 .../replication/TestReplicationAdmin.java   | 63 
 .../hbase/replication/TestSyncReplication.java  |  2 +-
 4 files changed, 78 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/99ad4e6b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index 0dc922d..41dd6e3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -120,8 +120,20 @@ public class ReplicationPeerManager {
 return desc;
   }
 
+  private void checkPeerInDAStateIfSyncReplication(String peerId) throws 
DoNotRetryIOException {
+ReplicationPeerDescription desc = peers.get(peerId);
+if (desc != null && desc.getPeerConfig().isSyncReplication()
+&& 
!SyncReplicationState.DOWNGRADE_ACTIVE.equals(desc.getSyncReplicationState())) {
+  throw new DoNotRetryIOException("Couldn't remove synchronous replication 
peer with state="
+  + desc.getSyncReplicationState()
+  + ", Transit the synchronous replication state to be 
DOWNGRADE_ACTIVE firstly.");
+}
+  }
+
   ReplicationPeerConfig preRemovePeer(String peerId) throws 
DoNotRetryIOException {
-return checkPeerExists(peerId).getPeerConfig();
+ReplicationPeerDescription pd = checkPeerExists(peerId);
+checkPeerInDAStateIfSyncReplication(peerId);
+return pd.getPeerConfig();
   }
 
   void preEnablePeer(String peerId) throws DoNotRetryIOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/99ad4e6b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
index ac4b4cd..282aa21 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
@@ -142,7 +142,7 @@ public class SyncReplicationWALProvider implements 
WALProvider, PeerActionListen
   @Override
   public WAL getWAL(RegionInfo region) throws IOException {
 if (region == null) {
-  return provider.getWAL(region);
+  return provider.getWAL(null);
 }
 Optional> peerIdAndRemoteWALDir =
   peerInfoProvider.getPeerIdAndRemoteWALDir(region);

http://git-wip-us.apache.org/repos/asf/hbase/blob/99ad4e6b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
index 0ad476f..486ab51 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
@@ -254,6 +254,62 @@ public class TestReplicationAdmin {
   }
 
   @Test
+  public void testRemovePeerWithNonDAState() throws Exception {
+TableName tableName = TableName.valueOf(name.getMethodName());
+TEST_UTIL.createTable(tableName, Bytes.toBytes("family"));
+ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder();
+
+String rootDir = "hdfs://srv1:/hbase";
+builder.setClusterKey(KEY_ONE);
+builder.setRemoteWALDir(rootDir);
+builder.setReplicateAllUserTables(false);
+Map tableCfs = new 

[20/36] hbase git commit: HBASE-19935 Only allow table replication for sync replication for now

2018-05-17 Thread zhangduo
HBASE-19935 Only allow table replication for sync replication for now


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5d20df78
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5d20df78
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5d20df78

Branch: refs/heads/HBASE-19064
Commit: 5d20df783adf3a22abe5c7116ce04169077e1465
Parents: a28fbbf
Author: Guanghao Zhang 
Authored: Tue Feb 6 16:00:59 2018 +0800
Committer: zhangduo 
Committed: Thu May 17 17:09:36 2018 +0800

--
 .../replication/ReplicationPeerConfig.java  |  9 +++
 .../replication/ReplicationPeerManager.java | 34 -
 .../replication/TestReplicationAdmin.java   | 73 ++--
 .../wal/TestCombinedAsyncWriter.java|  6 ++
 .../wal/TestSyncReplicationWALProvider.java |  6 ++
 5 files changed, 102 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5d20df78/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index 97abc74..997a155 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -25,6 +25,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeMap;
+
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -220,6 +222,13 @@ public class ReplicationPeerConfig {
 return this.remoteWALDir;
   }
 
+  /**
+   * Use remote wal dir to decide whether a peer is sync replication peer
+   */
+  public boolean isSyncReplication() {
+return !StringUtils.isBlank(this.remoteWALDir);
+  }
+
   public static ReplicationPeerConfigBuilder newBuilder() {
 return new ReplicationPeerConfigBuilderImpl();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/5d20df78/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index f07a0d8..ff778a8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -170,7 +170,7 @@ public class ReplicationPeerManager {
   " does not match new remote wal dir '" + 
peerConfig.getRemoteWALDir() + "'");
 }
 
-if (oldPeerConfig.getRemoteWALDir() != null) {
+if (oldPeerConfig.isSyncReplication()) {
   if (!ReplicationUtils.isNamespacesAndTableCFsEqual(oldPeerConfig, 
peerConfig)) {
 throw new DoNotRetryIOException(
   "Changing the replicated namespace/table config on a synchronous 
replication " +
@@ -199,8 +199,8 @@ public class ReplicationPeerManager {
 }
 ReplicationPeerConfig copiedPeerConfig = 
ReplicationPeerConfig.newBuilder(peerConfig).build();
 SyncReplicationState syncReplicationState =
-StringUtils.isBlank(peerConfig.getRemoteWALDir()) ? 
SyncReplicationState.NONE
-: SyncReplicationState.DOWNGRADE_ACTIVE;
+copiedPeerConfig.isSyncReplication() ? 
SyncReplicationState.DOWNGRADE_ACTIVE
+: SyncReplicationState.NONE;
 peerStorage.addPeer(peerId, copiedPeerConfig, enabled, 
syncReplicationState);
 peers.put(peerId,
   new ReplicationPeerDescription(peerId, enabled, copiedPeerConfig, 
syncReplicationState));
@@ -324,9 +324,37 @@ public class ReplicationPeerManager {
 peerConfig.getTableCFsMap());
 }
 
+if (peerConfig.isSyncReplication()) {
+  checkPeerConfigForSyncReplication(peerConfig);
+}
+
 checkConfiguredWALEntryFilters(peerConfig);
   }
 
+  private void checkPeerConfigForSyncReplication(ReplicationPeerConfig 
peerConfig)
+  throws DoNotRetryIOException {
+// This is used to reduce the difficulty for implementing the sync 
replication state transition
+// as we need to reopen all the related regions.
+// TODO: Add namespace, replicat_all flag back
+if (peerConfig.replicateAllUserTables()) {
+  throw new DoNotRetryIOException(
+   

[33/36] hbase git commit: HBASE-20432 Cleanup related resources when remove a sync replication peer

2018-05-17 Thread zhangduo
HBASE-20432 Cleanup related resources when remove a sync replication peer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5f3cc3c1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5f3cc3c1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5f3cc3c1

Branch: refs/heads/HBASE-19064
Commit: 5f3cc3c11ab483f691562d18648e2bc3e5a3895a
Parents: b50945e
Author: huzheng 
Authored: Wed Apr 18 20:38:33 2018 +0800
Committer: zhangduo 
Committed: Thu May 17 17:26:40 2018 +0800

--
 .../master/replication/RemovePeerProcedure.java | 10 +
 .../ReplaySyncReplicationWALManager.java|  8 
 .../replication/SyncReplicationTestBase.java| 45 +---
 .../replication/TestSyncReplicationActive.java  |  9 ++--
 .../replication/TestSyncReplicationStandBy.java | 31 --
 5 files changed, 89 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5f3cc3c1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RemovePeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RemovePeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RemovePeerProcedure.java
index 82dc07e..7335fe0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RemovePeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RemovePeerProcedure.java
@@ -66,9 +66,19 @@ public class RemovePeerProcedure extends ModifyPeerProcedure 
{
 env.getReplicationPeerManager().removePeer(peerId);
   }
 
+  private void removeRemoteWALs(MasterProcedureEnv env) throws IOException {
+ReplaySyncReplicationWALManager remoteWALManager =
+env.getMasterServices().getReplaySyncReplicationWALManager();
+remoteWALManager.removePeerRemoteWALs(peerId);
+remoteWALManager.removePeerReplayWALDir(peerId);
+  }
+
   @Override
   protected void postPeerModification(MasterProcedureEnv env)
   throws IOException, ReplicationException {
+if (peerConfig.isSyncReplication()) {
+  removeRemoteWALs(env);
+}
 env.getReplicationPeerManager().removeAllQueuesAndHFileRefs(peerId);
 if (peerConfig.isSerial()) {
   env.getReplicationPeerManager().removeAllLastPushedSeqIds(peerId);

http://git-wip-us.apache.org/repos/asf/hbase/blob/5f3cc3c1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplaySyncReplicationWALManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplaySyncReplicationWALManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplaySyncReplicationWALManager.java
index 72f5c37..eac5aa4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplaySyncReplicationWALManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplaySyncReplicationWALManager.java
@@ -115,6 +115,14 @@ public class ReplaySyncReplicationWALManager {
 }
   }
 
+  public void removePeerRemoteWALs(String peerId) throws IOException {
+Path remoteWALDir = getPeerRemoteWALDir(peerId);
+if (fs.exists(remoteWALDir) && !fs.delete(remoteWALDir, true)) {
+  throw new IOException(
+  "Failed to remove remote WALs dir " + remoteWALDir + " for peer id=" 
+ peerId);
+}
+  }
+
   public void initPeerWorkers(String peerId) {
 BlockingQueue servers = new LinkedBlockingQueue<>();
 services.getServerManager().getOnlineServers().keySet()

http://git-wip-us.apache.org/repos/asf/hbase/blob/5f3cc3c1/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
index 0d5fce8..de679be 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.replication;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -71,6 +72,10 @@ public class SyncReplicationTestBase {
 
   protected static String PEER_ID = "1";
 
+  protected static Path remoteWALDir1;
+
+  

[21/36] hbase git commit: HBASE-19990 Create remote wal directory when transitting to state S

2018-05-17 Thread zhangduo
HBASE-19990 Create remote wal directory when transitting to state S


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bb87b0f5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bb87b0f5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bb87b0f5

Branch: refs/heads/HBASE-19064
Commit: bb87b0f52f69ab892ee8843872c7ceefe7c636e5
Parents: 4edb494
Author: zhangduo 
Authored: Wed Feb 14 16:01:16 2018 +0800
Committer: zhangduo 
Committed: Thu May 17 17:09:36 2018 +0800

--
 .../procedure2/ProcedureYieldException.java |  9 --
 .../hbase/replication/ReplicationUtils.java |  2 ++
 .../hadoop/hbase/master/MasterFileSystem.java   | 19 ++---
 .../master/procedure/MasterProcedureEnv.java|  5 
 ...ransitPeerSyncReplicationStateProcedure.java | 29 
 .../hbase/replication/TestSyncReplication.java  |  8 ++
 6 files changed, 55 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bb87b0f5/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java
index 0487ac5b..dbb9981 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java
@@ -15,16 +15,21 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.procedure2;
 
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
 
-// TODO: Not used yet
+/**
+ * Indicate that a procedure wants to be rescheduled. Usually because there 
are something wrong but
+ * we do not want to fail the procedure.
+ * 
+ * TODO: need to support scheduling after a delay.
+ */
 @InterfaceAudience.Private
 @InterfaceStability.Stable
 public class ProcedureYieldException extends ProcedureException {
+
   /** default constructor */
   public ProcedureYieldException() {
 super();

http://git-wip-us.apache.org/repos/asf/hbase/blob/bb87b0f5/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
index d94cb00..e402d0f 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
@@ -41,6 +41,8 @@ public final class ReplicationUtils {
 
   public static final String REPLICATION_ATTR_NAME = "__rep__";
 
+  public static final String REMOTE_WAL_DIR_NAME = "remoteWALs";
+
   private ReplicationUtils() {
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/bb87b0f5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index 864be02..7ccbd71 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.log.HBaseMarkers;
 import org.apache.hadoop.hbase.mob.MobConstants;
 import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.replication.ReplicationUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -133,7 +134,6 @@ public class MasterFileSystem {
* Idempotent.
*/
   private void createInitialFileSystemLayout() throws IOException {
-
 final String[] protectedSubDirs = new String[] {
 HConstants.BASE_NAMESPACE_DIR,
 HConstants.HFILE_ARCHIVE_DIRECTORY,
@@ -145,7 +145,8 @@ public class MasterFileSystem {
   HConstants.HREGION_LOGDIR_NAME,
   HConstants.HREGION_OLDLOGDIR_NAME,
   HConstants.CORRUPT_DIR_NAME,
-  WALProcedureStore.MASTER_PROCEDURE_LOGDIR
+  

[11/36] hbase git commit: HBASE-19078 Add a remote peer cluster wal directory config for synchronous replication

2018-05-17 Thread zhangduo
HBASE-19078 Add a remote peer cluster wal directory config for synchronous 
replication

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/912dfc8b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/912dfc8b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/912dfc8b

Branch: refs/heads/HBASE-19064
Commit: 912dfc8b71cdeee4b91a4eaa38cea1097f6f5ad6
Parents: 21d1d5e
Author: Guanghao Zhang 
Authored: Sat Jan 13 18:55:28 2018 +0800
Committer: zhangduo 
Committed: Thu May 17 17:09:36 2018 +0800

--
 .../replication/ReplicationPeerConfigUtil.java  |  6 ++
 .../replication/ReplicationPeerConfig.java  | 20 -
 .../ReplicationPeerConfigBuilder.java   |  7 ++
 .../src/main/protobuf/Replication.proto |  1 +
 .../replication/ReplicationPeerManager.java | 15 
 .../replication/TestReplicationAdmin.java   | 77 
 .../src/main/ruby/hbase/replication_admin.rb| 14 ++--
 hbase-shell/src/main/ruby/hbase_constants.rb|  1 +
 .../src/main/ruby/shell/commands/add_peer.rb| 21 +-
 .../src/main/ruby/shell/commands/list_peers.rb  | 19 -
 .../test/ruby/hbase/replication_admin_test.rb   | 16 
 11 files changed, 186 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/912dfc8b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
index b1c1713..474ded3 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
@@ -319,6 +319,9 @@ public final class ReplicationPeerConfigUtil {
 
excludeNamespacesList.stream().map(ByteString::toStringUtf8).collect(Collectors.toSet()));
 }
 
+if (peer.hasRemoteWALDir()) {
+  builder.setRemoteWALDir(peer.getRemoteWALDir());
+}
 return builder.build();
   }
 
@@ -376,6 +379,9 @@ public final class ReplicationPeerConfigUtil {
   }
 }
 
+if (peerConfig.getRemoteWALDir() != null) {
+  builder.setRemoteWALDir(peerConfig.getRemoteWALDir());
+}
 return builder.build();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/912dfc8b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index e0d9a4c..97abc74 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -47,6 +47,8 @@ public class ReplicationPeerConfig {
   private Set excludeNamespaces = null;
   private long bandwidth = 0;
   private final boolean serial;
+  // Used by synchronous replication
+  private String remoteWALDir;
 
   private ReplicationPeerConfig(ReplicationPeerConfigBuilderImpl builder) {
 this.clusterKey = builder.clusterKey;
@@ -66,6 +68,7 @@ public class ReplicationPeerConfig {
 : null;
 this.bandwidth = builder.bandwidth;
 this.serial = builder.serial;
+this.remoteWALDir = builder.remoteWALDir;
   }
 
   private Map
@@ -213,6 +216,10 @@ public class ReplicationPeerConfig {
 return this;
   }
 
+  public String getRemoteWALDir() {
+return this.remoteWALDir;
+  }
+
   public static ReplicationPeerConfigBuilder newBuilder() {
 return new ReplicationPeerConfigBuilderImpl();
   }
@@ -230,7 +237,8 @@ public class ReplicationPeerConfig {
   .setReplicateAllUserTables(peerConfig.replicateAllUserTables())
   .setExcludeTableCFsMap(peerConfig.getExcludeTableCFsMap())
   .setExcludeNamespaces(peerConfig.getExcludeNamespaces())
-  
.setBandwidth(peerConfig.getBandwidth()).setSerial(peerConfig.isSerial());
+  .setBandwidth(peerConfig.getBandwidth()).setSerial(peerConfig.isSerial())
+  .setRemoteWALDir(peerConfig.getRemoteWALDir());
 return builder;
   }
 
@@ -259,6 +267,8 @@ public class ReplicationPeerConfig {
 
 private boolean serial = false;
 
+private String remoteWALDir = null;
+
 @Override
 public ReplicationPeerConfigBuilder setClusterKey(String 

[13/36] hbase git commit: HBASE-19864 Use protobuf instead of enum.ordinal to store SyncReplicationState

2018-05-17 Thread zhangduo
HBASE-19864 Use protobuf instead of enum.ordinal to store SyncReplicationState

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a28fbbf5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a28fbbf5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a28fbbf5

Branch: refs/heads/HBASE-19064
Commit: a28fbbf51b5157bc0e82540242b4fa9c38e01434
Parents: f453292
Author: Guanghao Zhang 
Authored: Fri Jan 26 16:50:48 2018 +0800
Committer: zhangduo 
Committed: Thu May 17 17:09:36 2018 +0800

--
 .../replication/ReplicationPeerConfigUtil.java  | 22 +++---
 .../hbase/replication/SyncReplicationState.java | 17 ++
 .../hbase/shaded/protobuf/RequestConverter.java |  7 +++---
 .../src/main/protobuf/Replication.proto | 13 +++
 .../replication/ZKReplicationPeerStorage.java   | 24 +---
 .../hadoop/hbase/master/MasterRpcServices.java  |  9 
 ...ransitPeerSyncReplicationStateProcedure.java |  9 
 .../TestReplicationSourceManager.java   |  2 +-
 8 files changed, 67 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a28fbbf5/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
index 6cbe05b..331795c 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
@@ -403,7 +403,7 @@ public final class ReplicationPeerConfigUtil {
 ReplicationProtos.ReplicationState.State.ENABLED == 
desc.getState().getState();
 ReplicationPeerConfig config = convert(desc.getConfig());
 return new ReplicationPeerDescription(desc.getId(), enabled, config,
-
SyncReplicationState.valueOf(desc.getSyncReplicationState().getNumber()));
+  toSyncReplicationState(desc.getSyncReplicationState()));
   }
 
   public static ReplicationProtos.ReplicationPeerDescription
@@ -411,17 +411,33 @@ public final class ReplicationPeerConfigUtil {
 ReplicationProtos.ReplicationPeerDescription.Builder builder =
 ReplicationProtos.ReplicationPeerDescription.newBuilder();
 builder.setId(desc.getPeerId());
+
 ReplicationProtos.ReplicationState.Builder stateBuilder =
 ReplicationProtos.ReplicationState.newBuilder();
 stateBuilder.setState(desc.isEnabled() ? 
ReplicationProtos.ReplicationState.State.ENABLED :
 ReplicationProtos.ReplicationState.State.DISABLED);
 builder.setState(stateBuilder.build());
+
 builder.setConfig(convert(desc.getPeerConfig()));
-builder.setSyncReplicationState(
-  
ReplicationProtos.SyncReplicationState.forNumber(desc.getSyncReplicationState().ordinal()));
+
builder.setSyncReplicationState(toSyncReplicationState(desc.getSyncReplicationState()));
+
 return builder.build();
   }
 
+  public static ReplicationProtos.SyncReplicationState
+  toSyncReplicationState(SyncReplicationState state) {
+ReplicationProtos.SyncReplicationState.Builder syncReplicationStateBuilder 
=
+ReplicationProtos.SyncReplicationState.newBuilder();
+syncReplicationStateBuilder
+
.setState(ReplicationProtos.SyncReplicationState.State.forNumber(state.ordinal()));
+return syncReplicationStateBuilder.build();
+  }
+
+  public static SyncReplicationState
+  toSyncReplicationState(ReplicationProtos.SyncReplicationState state) {
+return SyncReplicationState.valueOf(state.getState().getNumber());
+  }
+
   public static ReplicationPeerConfig appendTableCFsToReplicationPeerConfig(
   Map tableCfs, ReplicationPeerConfig peerConfig) 
{
 ReplicationPeerConfigBuilder builder = 
ReplicationPeerConfig.newBuilder(peerConfig);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a28fbbf5/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
index bd144e9..a65b144 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
+++ 

[24/36] hbase git commit: HBASE-19999 Remove the SYNC_REPLICATION_ENABLED flag

2018-05-17 Thread zhangduo
HBASE-1 Remove the SYNC_REPLICATION_ENABLED flag


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a73e19a9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a73e19a9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a73e19a9

Branch: refs/heads/HBASE-19064
Commit: a73e19a916f78021d7f6362c88cfd9114d1aec8c
Parents: 564bdf9
Author: Guanghao Zhang 
Authored: Fri Mar 9 11:30:25 2018 +0800
Committer: zhangduo 
Committed: Thu May 17 17:26:36 2018 +0800

--
 .../hbase/replication/ReplicationUtils.java  |  2 --
 .../hadoop/hbase/regionserver/HRegionServer.java | 13 -
 .../hbase/wal/SyncReplicationWALProvider.java| 19 ++-
 .../org/apache/hadoop/hbase/wal/WALFactory.java  | 18 --
 .../hbase/replication/TestSyncReplication.java   |  1 -
 .../master/TestRecoverStandbyProcedure.java  |  2 --
 .../wal/TestSyncReplicationWALProvider.java  |  2 --
 7 files changed, 38 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a73e19a9/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
index e402d0f..cb22f57 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
@@ -37,8 +37,6 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public final class ReplicationUtils {
 
-  public static final String SYNC_REPLICATION_ENABLED = 
"hbase.replication.sync.enabled";
-
   public static final String REPLICATION_ATTR_NAME = "__rep__";
 
   public static final String REMOTE_WAL_DIR_NAME = "remoteWALs";

http://git-wip-us.apache.org/repos/asf/hbase/blob/a73e19a9/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 2fb4f67..af2f3b5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1804,10 +1804,8 @@ public class HRegionServer extends HasThread implements
   private void setupWALAndReplication() throws IOException {
 boolean isMasterNoTableOrSystemTableOnly = this instanceof HMaster &&
   (!LoadBalancer.isTablesOnMaster(conf) || 
LoadBalancer.isSystemTablesOnlyOnMaster(conf));
-if (isMasterNoTableOrSystemTableOnly) {
-  conf.setBoolean(ReplicationUtils.SYNC_REPLICATION_ENABLED, false);
-}
-WALFactory factory = new WALFactory(conf, serverName.toString());
+WALFactory factory =
+new WALFactory(conf, serverName.toString(), 
!isMasterNoTableOrSystemTableOnly);
 if (!isMasterNoTableOrSystemTableOnly) {
   // TODO Replication make assumptions here based on the default 
filesystem impl
   Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
@@ -1926,11 +1924,8 @@ public class HRegionServer extends HasThread implements
 }
 this.executorService.startExecutorService(ExecutorType.RS_REFRESH_PEER,
   conf.getInt("hbase.regionserver.executor.refresh.peer.threads", 2));
-
-if (conf.getBoolean(ReplicationUtils.SYNC_REPLICATION_ENABLED, false)) {
-  
this.executorService.startExecutorService(ExecutorType.RS_REPLAY_SYNC_REPLICATION_WAL,
-
conf.getInt("hbase.regionserver.executor.replay.sync.replication.wal.threads", 
2));
-}
+
this.executorService.startExecutorService(ExecutorType.RS_REPLAY_SYNC_REPLICATION_WAL,
+  
conf.getInt("hbase.regionserver.executor.replay.sync.replication.wal.threads", 
1));
 
 Threads.setDaemonThreadRunning(this.walRoller.getThread(), getName() + 
".logRoller",
 uncaughtExceptionHandler);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a73e19a9/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
index 282aa21..54287fe 100644
--- 

[34/36] hbase git commit: HBASE-19782 Reject the replication request when peer is DA or A state

2018-05-17 Thread zhangduo
HBASE-19782 Reject the replication request when peer is DA or A state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6c77f8a8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6c77f8a8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6c77f8a8

Branch: refs/heads/HBASE-19064
Commit: 6c77f8a833dabbf1c0043c4298de6c03473e35c0
Parents: 87d05d6
Author: huzheng 
Authored: Fri Mar 2 18:05:29 2018 +0800
Committer: zhangduo 
Committed: Thu May 17 17:26:40 2018 +0800

--
 .../hbase/protobuf/ReplicationProtbufUtil.java  |  2 +-
 .../hadoop/hbase/regionserver/HRegion.java  |  2 +-
 .../hbase/regionserver/HRegionServer.java   |  5 +--
 .../hbase/regionserver/RSRpcServices.java   | 25 +--
 .../RejectReplicationRequestStateChecker.java   | 45 
 .../ReplaySyncReplicationWALCallable.java   | 24 ++-
 .../replication/regionserver/Replication.java   |  2 +-
 .../regionserver/ReplicationSink.java   | 16 +++
 .../SyncReplicationPeerInfoProvider.java| 11 ++---
 .../SyncReplicationPeerInfoProviderImpl.java| 13 +++---
 .../SyncReplicationPeerMappingManager.java  |  5 +--
 .../hbase/wal/SyncReplicationWALProvider.java   |  7 +--
 .../replication/SyncReplicationTestBase.java| 32 ++
 .../replication/TestSyncReplicationActive.java  | 13 +-
 .../regionserver/TestReplicationSink.java   |  5 +--
 .../regionserver/TestWALEntrySinkFilter.java|  3 +-
 .../wal/TestSyncReplicationWALProvider.java |  6 +--
 17 files changed, 163 insertions(+), 53 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6c77f8a8/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
index 81dd59e..e01f881 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.PrivateCellUtil;
+import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.apache.hadoop.hbase.wal.WALKeyImpl;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.io.SizedCellScanner;
@@ -45,7 +46,6 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminServic
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.wal.WAL.Entry;
 
 @InterfaceAudience.Private
 public class ReplicationProtbufUtil {

http://git-wip-us.apache.org/repos/asf/hbase/blob/6c77f8a8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 6aa4b27..ba487c6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -1984,7 +1984,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   private boolean shouldForbidMajorCompaction() {
 if (rsServices != null && rsServices.getReplicationSourceService() != 
null) {
   return 
rsServices.getReplicationSourceService().getSyncReplicationPeerInfoProvider()
-  .checkState(getRegionInfo(), ForbidMajorCompactionChecker.get());
+  .checkState(getRegionInfo().getTable(), 
ForbidMajorCompactionChecker.get());
 }
 return false;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/6c77f8a8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 440a838..ab571c6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -2478,10 +2478,9 @@ public 

[16/36] hbase git commit: HBASE-19747 Introduce a special WALProvider for synchronous replication

2018-05-17 Thread zhangduo
HBASE-19747 Introduce a special WALProvider for synchronous replication


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/61d94362
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/61d94362
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/61d94362

Branch: refs/heads/HBASE-19064
Commit: 61d94362958545ddc1d57f151187197deb861213
Parents: 912dfc8
Author: zhangduo 
Authored: Fri Jan 19 18:38:39 2018 +0800
Committer: zhangduo 
Committed: Thu May 17 17:09:36 2018 +0800

--
 .../hbase/regionserver/wal/AbstractFSWAL.java   |   7 +
 .../hbase/regionserver/wal/AsyncFSWAL.java  |   1 -
 .../hbase/regionserver/wal/DualAsyncFSWAL.java  |   4 +-
 .../hadoop/hbase/regionserver/wal/FSHLog.java   |   4 -
 .../regionserver/PeerActionListener.java|  33 +++
 .../SynchronousReplicationPeerProvider.java |  35 +++
 .../hadoop/hbase/wal/AbstractFSWALProvider.java |   1 +
 .../hadoop/hbase/wal/AsyncFSWALProvider.java|  18 +-
 .../hbase/wal/NettyAsyncFSWALConfigHelper.java  |   8 +-
 .../hbase/wal/RegionGroupingProvider.java   |  13 +-
 .../wal/SynchronousReplicationWALProvider.java  | 225 +++
 .../org/apache/hadoop/hbase/wal/WALFactory.java |  37 ++-
 .../org/apache/hadoop/hbase/wal/WALKeyImpl.java |  16 +-
 .../regionserver/TestCompactionPolicy.java  |   1 +
 .../regionserver/TestFailedAppendAndSync.java   | 122 +-
 .../hadoop/hbase/regionserver/TestHRegion.java  |  24 +-
 .../TestHRegionWithInMemoryFlush.java   |   7 -
 .../hbase/regionserver/TestRegionIncrement.java |  20 +-
 .../hbase/regionserver/TestWALLockup.java   |   1 +
 .../regionserver/wal/AbstractTestWALReplay.java |   1 +
 .../regionserver/wal/ProtobufLogTestHelper.java |  44 +++-
 .../hbase/regionserver/wal/TestAsyncFSWAL.java  |  13 +-
 .../regionserver/wal/TestAsyncWALReplay.java|   4 +-
 .../wal/TestCombinedAsyncWriter.java|   3 +-
 .../hbase/regionserver/wal/TestFSHLog.java  |  15 +-
 .../hbase/regionserver/wal/TestWALReplay.java   |   1 +
 .../apache/hadoop/hbase/wal/IOTestProvider.java |   2 -
 .../TestSynchronousReplicationWALProvider.java  | 153 +
 28 files changed, 659 insertions(+), 154 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/61d94362/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index 825ad17..4255086 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -434,6 +434,13 @@ public abstract class AbstractFSWAL 
implements WAL {
 this.implClassName = getClass().getSimpleName();
   }
 
+  /**
+   * Used to initialize the WAL. Usually just call rollWriter to create the 
first log writer.
+   */
+  public void init() throws IOException {
+rollWriter();
+  }
+
   @Override
   public void registerWALActionsListener(WALActionsListener listener) {
 this.listeners.add(listener);

http://git-wip-us.apache.org/repos/asf/hbase/blob/61d94362/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
index 4732f41..d98ab75 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
@@ -248,7 +248,6 @@ public class AsyncFSWAL extends AbstractFSWAL {
 batchSize = conf.getLong(WAL_BATCH_SIZE, DEFAULT_WAL_BATCH_SIZE);
 waitOnShutdownInSeconds = 
conf.getInt(ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS,
   DEFAULT_ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS);
-rollWriter();
   }
 
   private static boolean waitingRoll(int epochAndState) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/61d94362/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
index 42b0dae..0495337 100644
--- 

[26/36] hbase git commit: HBASE-19079 Support setting up two clusters with A and S stat

2018-05-17 Thread zhangduo
HBASE-19079 Support setting up two clusters with A and S stat


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/051197f7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/051197f7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/051197f7

Branch: refs/heads/HBASE-19064
Commit: 051197f7543152149eee87a3497412b9f1f1185c
Parents: a73e19a
Author: zhangduo 
Authored: Tue Apr 10 22:35:19 2018 +0800
Committer: zhangduo 
Committed: Thu May 17 17:26:39 2018 +0800

--
 .../replication/ReplicationPeerManager.java |   5 +-
 ...ransitPeerSyncReplicationStateProcedure.java |   2 +-
 .../hbase/regionserver/wal/DualAsyncFSWAL.java  |  14 ++
 .../hadoop/hbase/regionserver/wal/WALUtil.java  |  25 ++-
 .../hbase/replication/ChainWALEntryFilter.java  |  28 +--
 .../ReplaySyncReplicationWALCallable.java   |  27 ++-
 .../SyncReplicationPeerInfoProviderImpl.java|   6 +-
 .../hadoop/hbase/wal/AbstractFSWALProvider.java |  10 +-
 .../hbase/wal/SyncReplicationWALProvider.java   |  94 ++---
 .../org/apache/hadoop/hbase/wal/WALEdit.java|   8 +-
 .../org/apache/hadoop/hbase/wal/WALFactory.java |   2 +-
 .../replication/TestReplicationAdmin.java   |  33 +--
 .../regionserver/wal/TestWALDurability.java |   2 +
 .../replication/SyncReplicationTestBase.java| 185 +
 .../hbase/replication/TestSyncReplication.java  | 207 ---
 .../replication/TestSyncReplicationActive.java  |  64 ++
 .../replication/TestSyncReplicationStandBy.java |  96 +
 17 files changed, 521 insertions(+), 287 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/051197f7/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index 41dd6e3..229549e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -68,8 +68,9 @@ public class ReplicationPeerManager {
 
   private final ImmutableMap
 allowedTransition = 
Maps.immutableEnumMap(ImmutableMap.of(SyncReplicationState.ACTIVE,
-  EnumSet.of(SyncReplicationState.DOWNGRADE_ACTIVE), 
SyncReplicationState.STANDBY,
-  EnumSet.of(SyncReplicationState.DOWNGRADE_ACTIVE), 
SyncReplicationState.DOWNGRADE_ACTIVE,
+  EnumSet.of(SyncReplicationState.DOWNGRADE_ACTIVE, 
SyncReplicationState.STANDBY),
+  SyncReplicationState.STANDBY, 
EnumSet.of(SyncReplicationState.DOWNGRADE_ACTIVE),
+  SyncReplicationState.DOWNGRADE_ACTIVE,
   EnumSet.of(SyncReplicationState.STANDBY, SyncReplicationState.ACTIVE)));
 
   ReplicationPeerManager(ReplicationPeerStorage peerStorage, 
ReplicationQueueStorage queueStorage,

http://git-wip-us.apache.org/repos/asf/hbase/blob/051197f7/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
index cc51890..5da2b0c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
@@ -171,7 +171,7 @@ public class TransitPeerSyncReplicationStateProcedure
 }
 return Flow.HAS_MORE_STATE;
   case REPLAY_REMOTE_WAL_IN_PEER:
-// TODO: replay remote wal when transiting from S to DA.
+addChildProcedure(new RecoverStandbyProcedure(peerId));
 
setNextState(PeerSyncReplicationStateTransitionState.REOPEN_ALL_REGIONS_IN_PEER);
 return Flow.HAS_MORE_STATE;
   case REOPEN_ALL_REGIONS_IN_PEER:

http://git-wip-us.apache.org/repos/asf/hbase/blob/051197f7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
index 0495337..a98567a 100644
--- 

[19/36] hbase git commit: HBASE-19957 General framework to transit sync replication state

2018-05-17 Thread zhangduo
HBASE-19957 General framework to transit sync replication state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/44a4cf34
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/44a4cf34
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/44a4cf34

Branch: refs/heads/HBASE-19064
Commit: 44a4cf3417ff7914661e907e321fc881a2a20384
Parents: 5d20df7
Author: zhangduo 
Authored: Fri Feb 9 18:33:28 2018 +0800
Committer: zhangduo 
Committed: Thu May 17 17:09:36 2018 +0800

--
 .../replication/ReplicationPeerConfig.java  |   2 -
 .../replication/ReplicationPeerDescription.java |   5 +-
 .../hbase/replication/SyncReplicationState.java |  19 +-
 .../org/apache/hadoop/hbase/HConstants.java |   3 +
 .../src/main/protobuf/MasterProcedure.proto |  20 +-
 .../hbase/replication/ReplicationPeerImpl.java  |  45 -
 .../replication/ReplicationPeerStorage.java |  25 ++-
 .../hbase/replication/ReplicationPeers.java |  27 ++-
 .../replication/ZKReplicationPeerStorage.java   |  63 +--
 .../hbase/coprocessor/MasterObserver.java   |   7 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |   4 +-
 .../hbase/master/MasterCoprocessorHost.java |  12 +-
 .../replication/AbstractPeerProcedure.java  |  14 +-
 .../master/replication/ModifyPeerProcedure.java |  11 --
 .../replication/RefreshPeerProcedure.java   |  18 +-
 .../replication/ReplicationPeerManager.java |  89 +
 ...ransitPeerSyncReplicationStateProcedure.java | 181 ---
 .../hbase/regionserver/HRegionServer.java   |  35 ++--
 .../regionserver/ReplicationSourceService.java  |  11 +-
 .../regionserver/PeerActionListener.java|   4 +-
 .../regionserver/PeerProcedureHandler.java  |  16 +-
 .../regionserver/PeerProcedureHandlerImpl.java  |  52 +-
 .../regionserver/RefreshPeerCallable.java   |   7 +
 .../replication/regionserver/Replication.java   |  22 ++-
 .../regionserver/ReplicationSourceManager.java  |  41 +++--
 .../SyncReplicationPeerInfoProvider.java|  43 +
 .../SyncReplicationPeerInfoProviderImpl.java|  71 
 .../SyncReplicationPeerMappingManager.java  |  48 +
 .../SyncReplicationPeerProvider.java|  35 
 .../hbase/wal/SyncReplicationWALProvider.java   |  35 ++--
 .../org/apache/hadoop/hbase/wal/WALFactory.java |  47 ++---
 .../replication/TestReplicationAdmin.java   |   3 +-
 .../TestReplicationSourceManager.java   |   5 +-
 .../wal/TestSyncReplicationWALProvider.java |  36 ++--
 34 files changed, 743 insertions(+), 313 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/44a4cf34/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index 997a155..cc7b4bc 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -15,7 +15,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.replication;
 
 import java.util.Collection;
@@ -25,7 +24,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeMap;
-
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.util.Bytes;

http://git-wip-us.apache.org/repos/asf/hbase/blob/44a4cf34/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
index 2d077c5..b0c27bb 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
@@ -20,7 +20,10 @@ package org.apache.hadoop.hbase.replication;
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
- * The POJO equivalent of ReplicationProtos.ReplicationPeerDescription
+ * The POJO equivalent of ReplicationProtos.ReplicationPeerDescription.
+ * 
+ * To developer, here we do not store the new sync replication state since it 
is just an
+ * intermediate state and this class is 

[02/36] hbase git commit: HBASE-20564 Tighter ByteBufferKeyValue Cell Comparator; ADDENDUM

2018-05-17 Thread zhangduo
HBASE-20564 Tighter ByteBufferKeyValue Cell Comparator; ADDENDUM

Add method the CellComparator Interface. Add implementation to
meta comparator so we don't fall back to the default comparator.

Includes a nothing change to hbase-server/pom.xml just to provoke
build.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/438af9bf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/438af9bf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/438af9bf

Branch: refs/heads/HBASE-19064
Commit: 438af9bf74c915fce9c8f525d986eb581e8cc0da
Parents: 060b8ac
Author: Michael Stack 
Authored: Wed May 16 06:25:42 2018 -0700
Committer: Michael Stack 
Committed: Wed May 16 09:43:16 2018 -0700

--
 .../org/apache/hadoop/hbase/CellComparator.java |  8 +++
 .../apache/hadoop/hbase/CellComparatorImpl.java | 74 +---
 hbase-server/pom.xml|  3 +-
 3 files changed, 42 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/438af9bf/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
index dc755f5..60be670 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
@@ -51,6 +51,14 @@ public interface CellComparator extends Comparator {
   int compare(Cell leftCell, Cell rightCell);
 
   /**
+   * Compare cells.
+   * @param ignoreSequenceid True if we are to compare the key portion only 
and ignore
+   * the sequenceid. Set to false to compare key and consider sequenceid.
+   * @return 0 if equal, -1 if a  b, and +1 if a  b.
+   */
+  int compare(Cell leftCell, Cell rightCell, boolean ignoreSequenceid);
+
+  /**
* Lexographically compares the rows of two cells.
* @param leftCell the left hand side cell
* @param rightCell the right hand side cell

http://git-wip-us.apache.org/repos/asf/hbase/blob/438af9bf/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
index fa336fd..785d8ff 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
@@ -63,13 +63,12 @@ public class CellComparatorImpl implements CellComparator {
 
   /**
* Compare cells.
-   * @param a
-   * @param b
* @param ignoreSequenceid True if we are to compare the key portion only 
and ignore
-   * the sequenceid. Set to false to compare key and consider sequenceid.
+   *  the sequenceid. Set to false to compare key and consider sequenceid.
* @return 0 if equal, -1 if a  b, and +1 if a  b.
*/
-  public final int compare(final Cell a, final Cell b, boolean 
ignoreSequenceid) {
+  @Override
+  public int compare(final Cell a, final Cell b, boolean ignoreSequenceid) {
 int diff = 0;
 if (a instanceof ByteBufferKeyValue && b instanceof ByteBufferKeyValue) {
   diff = compareByteBufferKeyValue((ByteBufferKeyValue)a, 
(ByteBufferKeyValue)b);
@@ -97,7 +96,8 @@ public class CellComparatorImpl implements CellComparator {
* Caches deserialized lengths of rows and families, etc., and reuses them 
where it can
* (ByteBufferKeyValue has been changed to be amenable to our providing 
pre-made lengths, etc.)
*/
-  private final int compareByteBufferKeyValue(ByteBufferKeyValue left, 
ByteBufferKeyValue right) {
+  private static final int compareByteBufferKeyValue(ByteBufferKeyValue left,
+  ByteBufferKeyValue right) {
 // Compare Rows. Cache row length.
 int leftRowLength = left.getRowLength();
 int rightRowLength = right.getRowLength();
@@ -134,6 +134,7 @@ public class CellComparatorImpl implements CellComparator {
 if (rightFamilyLength + rightQualifierLength == 0 && rightType == 
Type.Minimum.getCode()) {
   return -1;
 }
+
 // Compare families.
 int leftFamilyPosition = left.getFamilyPosition(leftFamilyLengthPosition);
 int rightFamilyPosition = 
right.getFamilyPosition(rightFamilyLengthPosition);
@@ -153,7 +154,8 @@ public class CellComparatorImpl implements CellComparator {
   return diff;
 }
 // Timestamps.
-diff = compareTimestamps(left.getTimestamp(leftKeyLength), 
right.getTimestamp(rightKeyLength));
+diff = 

[35/36] hbase git commit: HBASE-19865 Add UT for sync replication peer in DA state

2018-05-17 Thread zhangduo
HBASE-19865 Add UT for sync replication peer in DA state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1497b362
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1497b362
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1497b362

Branch: refs/heads/HBASE-19064
Commit: 1497b3622d4c79c38fbc07ea3473eb6991d03413
Parents: f112664
Author: zhangduo 
Authored: Tue May 8 20:33:22 2018 +0800
Committer: zhangduo 
Committed: Thu May 17 17:26:40 2018 +0800

--
 .../hbase/replication/TestReplicationBase.java  | 28 +++---
 ...estReplicationChangingPeerRegionservers.java | 20 ++
 .../TestReplicationSmallTestsSync.java  | 40 
 3 files changed, 76 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1497b362/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
index f96dbe5..cd84293 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
@@ -1,5 +1,4 @@
-/*
- *
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -28,6 +27,8 @@ import java.util.List;
 import java.util.NavigableMap;
 import java.util.TreeMap;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
@@ -58,6 +59,9 @@ import org.junit.BeforeClass;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
+import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
+
 /**
  * This class is only a base for other integration-level replication tests.
  * Do not add tests here.
@@ -99,6 +103,10 @@ public class TestReplicationBase {
 return false;
   }
 
+  protected boolean isSyncPeer() {
+return false;
+  }
+
   protected final void cleanUp() throws IOException, InterruptedException {
 // Starting and stopping replication can make us miss new logs,
 // rolling like this makes sure the most recent one gets added to the queue
@@ -245,9 +253,19 @@ public class TestReplicationBase {
   @Before
   public void setUpBase() throws Exception {
 if (!peerExist(PEER_ID2)) {
-  ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder()
-  
.setClusterKey(utility2.getClusterKey()).setSerial(isSerialPeer()).build();
-  hbaseAdmin.addReplicationPeer(PEER_ID2, rpc);
+  ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder()
+.setClusterKey(utility2.getClusterKey()).setSerial(isSerialPeer());
+  if (isSyncPeer()) {
+FileSystem fs2 = utility2.getTestFileSystem();
+// The remote wal dir is not important as we do not use it in DA 
state, here we only need to
+// confirm that a sync peer in DA state can still replicate data to 
remote cluster
+// asynchronously.
+builder.setReplicateAllUserTables(false)
+  .setTableCFsMap(ImmutableMap.of(tableName, ImmutableList.of()))
+  .setRemoteWALDir(new Path("/RemoteWAL")
+.makeQualified(fs2.getUri(), 
fs2.getWorkingDirectory()).toUri().toString());
+  }
+  hbaseAdmin.addReplicationPeer(PEER_ID2, builder.build());
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/1497b362/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java
index b94b443..5c96742 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java
@@ -62,22 +62,28 @@ public class TestReplicationChangingPeerRegionservers 
extends TestReplicationBas
   private static final Logger LOG =

[18/36] hbase git commit: HBASE-19957 General framework to transit sync replication state

2018-05-17 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/44a4cf34/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProvider.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProvider.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProvider.java
new file mode 100644
index 000..92f2c52
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProvider.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import java.util.Optional;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Get the information for a sync replication peer.
+ */
+@InterfaceAudience.Private
+public interface SyncReplicationPeerInfoProvider {
+
+  /**
+   * Return the peer id and remote WAL directory if the region is 
synchronously replicated and the
+   * state is {@link SyncReplicationState#ACTIVE}.
+   */
+  Optional> getPeerIdAndRemoteWALDir(RegionInfo info);
+
+  /**
+   * Check whether the give region is contained in a sync replication peer 
which is in the given
+   * state.
+   */
+  boolean isInState(RegionInfo info, SyncReplicationState state);
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/44a4cf34/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProviderImpl.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProviderImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProviderImpl.java
new file mode 100644
index 000..32159e6
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProviderImpl.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import java.util.Optional;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.replication.ReplicationPeer;
+import org.apache.hadoop.hbase.replication.ReplicationPeers;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
+class SyncReplicationPeerInfoProviderImpl implements 
SyncReplicationPeerInfoProvider {
+
+  private final ReplicationPeers replicationPeers;
+
+  private final SyncReplicationPeerMappingManager mapping;
+
+  SyncReplicationPeerInfoProviderImpl(ReplicationPeers replicationPeers,
+  SyncReplicationPeerMappingManager mapping) {
+this.replicationPeers = replicationPeers;
+this.mapping = mapping;
+  }
+
+  @Override
+  public Optional> getPeerIdAndRemoteWALDir(RegionInfo 
info) {
+String peerId = mapping.getPeerId(info);
+if (peerId == null) {
+  return Optional.empty();
+   

[03/36] hbase git commit: HBASE-20547 Restore from backup will fail if done from a different file system

2018-05-17 Thread zhangduo
HBASE-20547 Restore from backup will fail if done from a different file system

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6d656b7e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6d656b7e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6d656b7e

Branch: refs/heads/HBASE-19064
Commit: 6d656b7e71e38c32a65026df6196108e6a60ca0a
Parents: 438af9b
Author: Vladimir Rodionov 
Authored: Tue May 15 12:10:40 2018 -0700
Committer: tedyu 
Committed: Wed May 16 09:55:19 2018 -0700

--
 .../hadoop/hbase/backup/impl/IncrementalTableBackupClient.java| 1 +
 .../java/org/apache/hadoop/hbase/backup/util/BackupUtils.java | 3 ++-
 2 files changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6d656b7e/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
--
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
index 8fd6573..43824d7 100644
--- 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
@@ -361,6 +361,7 @@ public class IncrementalTableBackupClient extends 
TableBackupClient {
   protected void deleteBulkLoadDirectory() throws IOException {
 // delete original bulk load directory on method exit
 Path path = getBulkOutputDir();
+FileSystem fs = FileSystem.get(path.toUri(), conf);
 boolean result = fs.delete(path, true);
 if (!result) {
   LOG.warn("Could not delete " + path);

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d656b7e/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java
--
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java
index 96ecab9..e01849a 100644
--- 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/BackupUtils.java
@@ -563,7 +563,8 @@ public final class BackupUtils {
   private static List getHistory(Configuration conf, Path 
backupRootPath)
   throws IOException {
 // Get all (n) history from backup root destination
-FileSystem fs = FileSystem.get(conf);
+
+FileSystem fs = FileSystem.get(backupRootPath.toUri(), conf);
 RemoteIterator it = 
fs.listLocatedStatus(backupRootPath);
 
 List infos = new ArrayList<>();



[08/36] hbase git commit: HBASE-20530 Composition of backup directory containing namespace when restoring is different from the actual hfile location

2018-05-17 Thread zhangduo
HBASE-20530 Composition of backup directory containing namespace when restoring 
is different from the actual hfile location

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/acbc3a22
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/acbc3a22
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/acbc3a22

Branch: refs/heads/HBASE-19064
Commit: acbc3a225338fd1ff82226ebbd937f7b15ef5b60
Parents: f4006b5
Author: Vladimir Rodionov 
Authored: Thu May 10 13:50:31 2018 -0700
Committer: tedyu 
Committed: Wed May 16 14:21:20 2018 -0700

--
 .../org/apache/hadoop/hbase/backup/TestBackupBase.java   |  6 +++---
 .../hadoop/hbase/backup/TestIncrementalBackup.java   |  4 ++--
 .../hadoop/hbase/mapreduce/HFileOutputFormat2.java   |  9 -
 .../hadoop/hbase/mapreduce/TestHFileOutputFormat2.java   | 11 ---
 4 files changed, 21 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/acbc3a22/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
--
diff --git 
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
index 4243f5b..08ecd63 100644
--- 
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
+++ 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
@@ -88,8 +88,8 @@ public class TestBackupBase {
   protected static TableName table3 = TableName.valueOf("table3");
   protected static TableName table4 = TableName.valueOf("table4");
 
-  protected static TableName table1_restore = 
TableName.valueOf("ns1:table1_restore");
-  protected static TableName table2_restore = 
TableName.valueOf("ns2:table2_restore");
+  protected static TableName table1_restore = 
TableName.valueOf("default:table1");
+  protected static TableName table2_restore = TableName.valueOf("ns2:table2");
   protected static TableName table3_restore = 
TableName.valueOf("ns3:table3_restore");
   protected static TableName table4_restore = 
TableName.valueOf("ns4:table4_restore");
 
@@ -404,7 +404,7 @@ public class TestBackupBase {
 
   protected static void createTables() throws Exception {
 long tid = System.currentTimeMillis();
-table1 = TableName.valueOf("ns1:test-" + tid);
+table1 = TableName.valueOf("test-" + tid);
 HBaseAdmin ha = TEST_UTIL.getHBaseAdmin();
 
 // Create namespaces

http://git-wip-us.apache.org/repos/asf/hbase/blob/acbc3a22/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
--
diff --git 
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
index 0bce769..b74f42f 100644
--- 
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
+++ 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
@@ -163,14 +163,14 @@ public class TestIncrementalBackup extends TestBackupBase 
{
 String backupIdIncMultiple2 = client.backupTables(request);
 assertTrue(checkSucceeded(backupIdIncMultiple2));
 
-// #4 - restore full backup for all tables, without overwrite
+// #4 - restore full backup for all tables
 TableName[] tablesRestoreFull = new TableName[] { table1, table2 };
 
 TableName[] tablesMapFull = new TableName[] { table1_restore, 
table2_restore };
 
 LOG.debug("Restoring full " + backupIdFull);
 client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, 
backupIdFull, false,
-  tablesRestoreFull, tablesMapFull, false));
+  tablesRestoreFull, tablesMapFull, true));
 
 // #5.1 - check tables for full restore
 HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();

http://git-wip-us.apache.org/repos/asf/hbase/blob/acbc3a22/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
index 3b04c0b..a403455 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
@@ -27,6 +27,7 @@ import java.io.UnsupportedEncodingException;
 import java.net.InetSocketAddress;
 import 

[12/36] hbase git commit: HBASE-19083 Introduce a new log writer which can write to two HDFSes

2018-05-17 Thread zhangduo
HBASE-19083 Introduce a new log writer which can write to two HDFSes


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/21d1d5e7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/21d1d5e7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/21d1d5e7

Branch: refs/heads/HBASE-19064
Commit: 21d1d5e7719d0c20dc370f06cdb07a537cbb552e
Parents: 60bdaf7
Author: zhangduo 
Authored: Thu Jan 11 21:08:02 2018 +0800
Committer: zhangduo 
Committed: Thu May 17 17:09:36 2018 +0800

--
 .../hbase/regionserver/wal/AsyncFSWAL.java  |  21 +--
 .../regionserver/wal/CombinedAsyncWriter.java   | 134 ++
 .../hbase/regionserver/wal/DualAsyncFSWAL.java  |  67 +
 .../wal/AbstractTestProtobufLog.java| 110 +++
 .../regionserver/wal/ProtobufLogTestHelper.java |  99 ++
 .../regionserver/wal/TestAsyncProtobufLog.java  |  32 +
 .../wal/TestCombinedAsyncWriter.java| 136 +++
 .../hbase/regionserver/wal/TestProtobufLog.java |  14 +-
 .../regionserver/wal/WriterOverAsyncWriter.java |  63 +
 9 files changed, 533 insertions(+), 143 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/21d1d5e7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
index d032d83..4732f41 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
@@ -607,10 +607,14 @@ public class AsyncFSWAL extends 
AbstractFSWAL {
 }
   }
 
+  protected final AsyncWriter createAsyncWriter(FileSystem fs, Path path) 
throws IOException {
+return AsyncFSWALProvider.createAsyncWriter(conf, fs, path, false, 
this.blocksize,
+  eventLoopGroup, channelClass);
+  }
+
   @Override
   protected AsyncWriter createWriterInstance(Path path) throws IOException {
-return AsyncFSWALProvider.createAsyncWriter(conf, fs, path, false,
-this.blocksize, eventLoopGroup, channelClass);
+return createAsyncWriter(fs, path);
   }
 
   private void waitForSafePoint() {
@@ -632,13 +636,12 @@ public class AsyncFSWAL extends 
AbstractFSWAL {
 }
   }
 
-  private long closeWriter() {
-AsyncWriter oldWriter = this.writer;
-if (oldWriter != null) {
-  long fileLength = oldWriter.getLength();
+  protected final long closeWriter(AsyncWriter writer) {
+if (writer != null) {
+  long fileLength = writer.getLength();
   closeExecutor.execute(() -> {
 try {
-  oldWriter.close();
+  writer.close();
 } catch (IOException e) {
   LOG.warn("close old writer failed", e);
 }
@@ -654,7 +657,7 @@ public class AsyncFSWAL extends AbstractFSWAL {
   throws IOException {
 Preconditions.checkNotNull(nextWriter);
 waitForSafePoint();
-long oldFileLen = closeWriter();
+long oldFileLen = closeWriter(this.writer);
 logRollAndSetupWalProps(oldPath, newPath, oldFileLen);
 this.writer = nextWriter;
 if (nextWriter instanceof AsyncProtobufLogWriter) {
@@ -679,7 +682,7 @@ public class AsyncFSWAL extends AbstractFSWAL {
   @Override
   protected void doShutdown() throws IOException {
 waitForSafePoint();
-closeWriter();
+closeWriter(this.writer);
 closeExecutor.shutdown();
 try {
   if (!closeExecutor.awaitTermination(waitOnShutdownInSeconds, 
TimeUnit.SECONDS)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/21d1d5e7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java
new file mode 100644
index 000..8ecfede
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * 

[30/36] hbase git commit: HBASE-20434 Also remove remote wals when peer is in DA state

2018-05-17 Thread zhangduo
HBASE-20434 Also remove remote wals when peer is in DA state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/386e40d4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/386e40d4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/386e40d4

Branch: refs/heads/HBASE-19064
Commit: 386e40d453b7d2491198f1c789341425b28876da
Parents: dea0494
Author: zhangduo 
Authored: Wed Apr 25 17:12:23 2018 +0800
Committer: zhangduo 
Committed: Thu May 17 17:26:40 2018 +0800

--
 .../hbase/replication/ReplicationUtils.java |   4 +
 ...ransitPeerSyncReplicationStateProcedure.java |   2 +-
 .../regionserver/ReplicationSource.java |   7 +-
 .../regionserver/ReplicationSourceManager.java  |  86 ++--
 .../hadoop/hbase/wal/AbstractFSWALProvider.java |  19 ++--
 .../hbase/wal/SyncReplicationWALProvider.java   |  30 +-
 .../TestSyncReplicationRemoveRemoteWAL.java | 101 +++
 .../TestReplicationSourceManager.java   |  68 -
 8 files changed, 251 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/386e40d4/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
index 66e9b01..069db7a 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
@@ -191,6 +191,10 @@ public final class ReplicationUtils {
 return new Path(remoteWALDir, peerId);
   }
 
+  public static Path getRemoteWALDirForPeer(Path remoteWALDir, String peerId) {
+return new Path(remoteWALDir, peerId);
+  }
+
   /**
* Do the sleeping logic
* @param msg Why we sleep

http://git-wip-us.apache.org/repos/asf/hbase/blob/386e40d4/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
index 5da2b0c..99fd615 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
@@ -211,7 +211,7 @@ public class TransitPeerSyncReplicationStateProcedure
   case CREATE_DIR_FOR_REMOTE_WAL:
 MasterFileSystem mfs = env.getMasterFileSystem();
 Path remoteWALDir = new Path(mfs.getWALRootDir(), 
ReplicationUtils.REMOTE_WAL_DIR_NAME);
-Path remoteWALDirForPeer = new Path(remoteWALDir, peerId);
+Path remoteWALDirForPeer = 
ReplicationUtils.getRemoteWALDirForPeer(remoteWALDir, peerId);
 FileSystem walFs = mfs.getWALFileSystem();
 try {
   if (walFs.exists(remoteWALDirForPeer)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/386e40d4/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index 1a27fc1..7313f13 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -549,14 +549,17 @@ public class ReplicationSource implements 
ReplicationSourceInterface {
 }
 
 /**
+ * 
  * Split a path to get the start time
+ * 
+ * 
  * For example: 10.20.20.171%3A60020.1277499063250
+ * 
  * @param p path to split
  * @return start time
  */
 private static long getTS(Path p) {
-  int tsIndex = p.getName().lastIndexOf('.') + 1;
-  return Long.parseLong(p.getName().substring(tsIndex));
+  return AbstractFSWALProvider.getWALStartTimeFromWALName(p.getName());
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/386e40d4/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java

[22/36] hbase git commit: HBASE-19082 Reject read/write from client but accept write from replication in state S

2018-05-17 Thread zhangduo
HBASE-19082 Reject read/write from client but accept write from replication in 
state S


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4edb494a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4edb494a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4edb494a

Branch: refs/heads/HBASE-19064
Commit: 4edb494a8d4d2ba139d304e0b658d6ffc09917fb
Parents: 44a4cf3
Author: zhangduo 
Authored: Mon Feb 12 18:20:18 2018 +0800
Committer: zhangduo 
Committed: Thu May 17 17:09:36 2018 +0800

--
 .../org/apache/hadoop/hbase/HConstants.java |   3 -
 .../src/main/protobuf/MasterProcedure.proto |   3 +-
 .../hbase/replication/ReplicationUtils.java |   4 +
 ...ransitPeerSyncReplicationStateProcedure.java |  10 +
 .../hadoop/hbase/regionserver/HRegion.java  |   5 +-
 .../hbase/regionserver/HRegionServer.java   |   2 +-
 .../hbase/regionserver/RSRpcServices.java   |  88 ++--
 .../RejectRequestsFromClientStateChecker.java   |  44 
 .../regionserver/ReplicationSink.java   |  72 ---
 .../SyncReplicationPeerInfoProvider.java|  10 +-
 .../SyncReplicationPeerInfoProviderImpl.java|  19 +-
 .../hbase/wal/SyncReplicationWALProvider.java   |   3 +
 .../org/apache/hadoop/hbase/wal/WALFactory.java |   4 +-
 .../hbase/replication/TestSyncReplication.java  | 200 +++
 .../wal/TestSyncReplicationWALProvider.java |   8 +-
 15 files changed, 401 insertions(+), 74 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4edb494a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 522c2cf..9241682 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1355,9 +1355,6 @@ public final class HConstants {
 
   public static final String NOT_IMPLEMENTED = "Not implemented";
 
-  // TODO: need to find a better place to hold it.
-  public static final String SYNC_REPLICATION_ENABLED = 
"hbase.replication.sync.enabled";
-
   private HConstants() {
 // Can't be instantiated with this ctor.
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/4edb494a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index 67c1b43..e8b940e 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -397,7 +397,8 @@ enum PeerSyncReplicationStateTransitionState {
   REOPEN_ALL_REGIONS_IN_PEER = 5;
   TRANSIT_PEER_NEW_SYNC_REPLICATION_STATE = 6;
   REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_END = 7;
-  POST_PEER_SYNC_REPLICATION_STATE_TRANSITION = 8;
+  CREATE_DIR_FOR_REMOTE_WAL = 8;
+  POST_PEER_SYNC_REPLICATION_STATE_TRANSITION = 9;
 }
 
 message PeerModificationStateData {

http://git-wip-us.apache.org/repos/asf/hbase/blob/4edb494a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
index e4dea83..d94cb00 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
@@ -37,6 +37,10 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public final class ReplicationUtils {
 
+  public static final String SYNC_REPLICATION_ENABLED = 
"hbase.replication.sync.enabled";
+
+  public static final String REPLICATION_ATTR_NAME = "__rep__";
+
   private ReplicationUtils() {
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/4edb494a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
index 8fc932f..69404a0 100644
--- 

[09/36] hbase git commit: HBASE-20577 Make Log Level page design consistent with the design of other pages in UI

2018-05-17 Thread zhangduo
HBASE-20577 Make Log Level page design consistent with the design of other 
pages in UI

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/60bdaf78
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/60bdaf78
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/60bdaf78

Branch: refs/heads/HBASE-19064
Commit: 60bdaf7846446acb0c4b8208d02687452fdbd2b2
Parents: acbc3a2
Author: Nihal Jain 
Authored: Thu May 17 02:24:09 2018 +0530
Committer: tedyu 
Committed: Wed May 16 21:21:36 2018 -0700

--
 .../apache/hadoop/hbase/http/log/LogLevel.java  | 48 ++--
 1 file changed, 33 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/60bdaf78/hbase-http/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java
--
diff --git 
a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java 
b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java
index 2f62313..aa223f3 100644
--- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java
+++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java
@@ -105,13 +105,17 @@ public final class LogLevel {
   response)) {
 return;
   }
+  response.setContentType("text/html");
+  String requestedURL = "header.jsp?pageTitle=Log Level";
+  request.getRequestDispatcher(requestedURL).include(request, response);
+  PrintWriter out = response.getWriter();
+  out.println(FORMS);
 
-  PrintWriter out = ServletUtil.initHTML(response, "Log Level");
   String logName = ServletUtil.getParameter(request, "log");
   String level = ServletUtil.getParameter(request, "level");
 
   if (logName != null) {
-out.println("Results");
+out.println("Results:");
 out.println(MARKER
 + "Submitted Log Name: " + logName + "");
 
@@ -132,28 +136,42 @@ public final class LogLevel {
   out.println("Sorry, " + log.getClass() + " not supported.");
 }
   }
-
-  out.println(FORMS);
-  out.println(ServletUtil.HTML_TAIL);
+  out.println("");
+  request.getRequestDispatcher("footer.jsp").include(request, response);
+  out.close();
 }
 
-static final String FORMS = "\nGet / Set"
-+ "\nLog:  "
-+ ""
-+ ""
-+ "\nLog:  "
-+ "Level:  "
-+ ""
-+ "";
+static final String FORMS = "\n"
++ "\n" + "\n"
++ "Get/Set Log Level\n" + "\n" + "\n" + 
"Actions:" + ""
++ "\n" + "\n" + "\n"
++ "\n" + "\n"
++ "\n" + "\n" + "\n"
++ "\n" + "\n" + ""
++ "Get the current log level for the specified log name." + "\n" 
+ "\n"
++ "\n" + "\n" + "\n" + "\n"
++ "\n" + "\n"
++ "\n"
++ "\n"
++ "\n" + "\n" + ""
++ "Set the specified log level for the specified log name." + 
"\n" + "\n"
++ "\n" + "\n" + "\n" + "\n" + "\n";
 
 private static void process(org.apache.log4j.Logger log, String level,
 PrintWriter out) throws IOException {
   if (level != null) {
 if (!level.equals(org.apache.log4j.Level.toLevel(level).toString())) {
-  out.println(MARKER + "Bad level : " + level + "");
+  out.println(MARKER + "" + "Bad level : 
" + level
+  + "" + "");
 } else {
   log.setLevel(org.apache.log4j.Level.toLevel(level));
-  out.println(MARKER + "Setting Level to " + level + " ...");
+  out.println(MARKER + "" + "Setting Level 
to " + level
+  + " ..." + "");
 }
   }
   out.println(MARKER



[05/36] hbase git commit: HBASE-20567 Pass both old and new descriptors to pre/post hooks of modify operations for table and namespace.

2018-05-17 Thread zhangduo
HBASE-20567 Pass both old and new descriptors to pre/post hooks of modify 
operations for table and namespace.

Signed-off-by: Mike Drob 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8c9825a0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8c9825a0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8c9825a0

Branch: refs/heads/HBASE-19064
Commit: 8c9825a030ace256343dc6669b6edec22e6f75fd
Parents: 2c32272
Author: Apekshit Sharma 
Authored: Thu May 10 20:34:14 2018 -0700
Committer: Mike Drob 
Committed: Wed May 16 14:03:18 2018 -0500

--
 .../hbase/coprocessor/MasterObserver.java   | 122 +--
 .../org/apache/hadoop/hbase/master/HMaster.java |  29 +++--
 .../hbase/master/MasterCoprocessorHost.java |  36 +++---
 .../master/procedure/ModifyTableProcedure.java  |   6 +-
 4 files changed, 151 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8c9825a0/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index a17bc9f..a37f21a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -209,20 +209,67 @@ public interface MasterObserver {
* table RPC call.
* @param ctx the environment to interact with the framework and master
* @param tableName the name of the table
-   * @param htd the TableDescriptor
+   * @param newDescriptor after modify operation, table will have this 
descriptor
+   * @deprecated Since 2.1. Will be removed in 3.0.
*/
+  @Deprecated
   default void preModifyTable(final 
ObserverContext ctx,
-  final TableName tableName, TableDescriptor htd) throws IOException {}
+final TableName tableName, TableDescriptor newDescriptor) throws 
IOException {}
+
+  /**
+   * Called prior to modifying a table's properties.  Called as part of modify
+   * table RPC call.
+   * @param ctx the environment to interact with the framework and master
+   * @param tableName the name of the table
+   * @param currentDescriptor current TableDescriptor of the table
+   * @param newDescriptor after modify operation, table will have this 
descriptor
+   */
+  default void preModifyTable(final 
ObserverContext ctx,
+  final TableName tableName, TableDescriptor currentDescriptor, 
TableDescriptor newDescriptor)
+throws IOException {
+preModifyTable(ctx, tableName, newDescriptor);
+  }
 
   /**
* Called after the modifyTable operation has been requested.  Called as part
* of modify table RPC call.
* @param ctx the environment to interact with the framework and master
* @param tableName the name of the table
-   * @param htd the TableDescriptor
+   * @param currentDescriptor current TableDescriptor of the table
+   * @deprecated Since 2.1. Will be removed in 3.0.
*/
+  @Deprecated
   default void postModifyTable(final 
ObserverContext ctx,
-  final TableName tableName, TableDescriptor htd) throws IOException {}
+final TableName tableName, TableDescriptor currentDescriptor) throws 
IOException {}
+
+  /**
+   * Called after the modifyTable operation has been requested.  Called as part
+   * of modify table RPC call.
+   * @param ctx the environment to interact with the framework and master
+   * @param tableName the name of the table
+   * @param oldDescriptor descriptor of table before modify operation happened
+   * @param currentDescriptor current TableDescriptor of the table
+   */
+  default void postModifyTable(final 
ObserverContext ctx,
+  final TableName tableName, TableDescriptor oldDescriptor, 
TableDescriptor currentDescriptor)
+throws IOException {
+postModifyTable(ctx, tableName, currentDescriptor);
+  }
+
+  /**
+   * Called prior to modifying a table's properties.  Called as part of modify
+   * table procedure and it is async to the modify table RPC call.
+   *
+   * @param ctx the environment to interact with the framework and master
+   * @param tableName the name of the table
+   * @param newDescriptor after modify operation, table will have this 
descriptor
+   * @deprecated Since 2.1. Will be removed in 3.0.
+   */
+  @Deprecated
+  default void preModifyTableAction(
+final ObserverContext ctx,
+final TableName tableName,
+final TableDescriptor newDescriptor) throws IOException {}
 
   /**
* Called prior to modifying a table's properties.  Called as part of modify
@@ -230,12 

[25/36] hbase git commit: HBASE-20370 Also remove the wal file in remote cluster when we finish replicating a file

2018-05-17 Thread zhangduo
HBASE-20370 Also remove the wal file in remote cluster when we finish 
replicating a file


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/87d05d63
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/87d05d63
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/87d05d63

Branch: refs/heads/HBASE-19064
Commit: 87d05d632f47903a89ae0b587c0180f240bc464b
Parents: 02b22df
Author: zhangduo 
Authored: Tue Apr 17 09:04:56 2018 +0800
Committer: zhangduo 
Committed: Thu May 17 17:26:39 2018 +0800

--
 .../hbase/replication/ReplicationUtils.java |  36 ++-
 .../regionserver/ReplicationSource.java |  38 +++
 .../ReplicationSourceInterface.java |  21 +++-
 .../regionserver/ReplicationSourceManager.java  | 108 ++-
 .../regionserver/ReplicationSourceShipper.java  |  27 ++---
 .../hbase/wal/SyncReplicationWALProvider.java   |  11 +-
 .../replication/ReplicationSourceDummy.java |  20 ++--
 .../TestReplicationSourceManager.java   | 101 -
 8 files changed, 246 insertions(+), 116 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/87d05d63/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
index cb22f57..66e9b01 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
@@ -22,14 +22,17 @@ import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CompoundConfiguration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Helper class for replication.
@@ -37,6 +40,8 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public final class ReplicationUtils {
 
+  private static final Logger LOG = 
LoggerFactory.getLogger(ReplicationUtils.class);
+
   public static final String REPLICATION_ATTR_NAME = "__rep__";
 
   public static final String REMOTE_WAL_DIR_NAME = "remoteWALs";
@@ -176,4 +181,33 @@ public final class ReplicationUtils {
   return tableCFs != null && tableCFs.containsKey(tableName);
 }
   }
+
+  public static FileSystem getRemoteWALFileSystem(Configuration conf, String 
remoteWALDir)
+  throws IOException {
+return new Path(remoteWALDir).getFileSystem(conf);
+  }
+
+  public static Path getRemoteWALDirForPeer(String remoteWALDir, String 
peerId) {
+return new Path(remoteWALDir, peerId);
+  }
+
+  /**
+   * Do the sleeping logic
+   * @param msg Why we sleep
+   * @param sleepForRetries the base sleep time.
+   * @param sleepMultiplier by how many times the default sleeping time is 
augmented
+   * @param maxRetriesMultiplier the max retry multiplier
+   * @return True if sleepMultiplier is  
maxRetriesMultiplier
+   */
+  public static boolean sleepForRetries(String msg, long sleepForRetries, int 
sleepMultiplier,
+  int maxRetriesMultiplier) {
+try {
+  LOG.trace("{}, sleeping {} times {}", msg, sleepForRetries, 
sleepMultiplier);
+  Thread.sleep(sleepForRetries * sleepMultiplier);
+} catch (InterruptedException e) {
+  LOG.debug("Interrupted while sleeping between retries");
+  Thread.currentThread().interrupt();
+}
+return sleepMultiplier < maxRetriesMultiplier;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/87d05d63/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index b05a673..01ccb11 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -89,8 +89,6 @@ public class ReplicationSource implements 

[36/36] hbase git commit: HBASE-20576 Check remote WAL directory when creating peer and transiting peer to A

2018-05-17 Thread zhangduo
HBASE-20576 Check remote WAL directory when creating peer and transiting peer 
to A


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bcee6f8f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bcee6f8f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bcee6f8f

Branch: refs/heads/HBASE-19064
Commit: bcee6f8fc2df8621047be38b7047a1eef9498d63
Parents: 1497b36
Author: zhangduo 
Authored: Tue May 15 15:07:40 2018 +0800
Committer: zhangduo 
Committed: Thu May 17 17:26:41 2018 +0800

--
 .../replication/ReplicationPeerManager.java | 19 +++--
 ...ransitPeerSyncReplicationStateProcedure.java | 73 +---
 .../replication/TestReplicationAdmin.java   | 57 ---
 3 files changed, 110 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bcee6f8f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index e1d8b51..8e49137 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.master.replication;
 
 import java.io.IOException;
+import java.net.URI;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.EnumSet;
@@ -31,6 +32,7 @@ import java.util.regex.Pattern;
 import java.util.stream.Collectors;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
@@ -45,7 +47,6 @@ import 
org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
 import org.apache.hadoop.hbase.replication.ReplicationUtils;
 import org.apache.hadoop.hbase.replication.SyncReplicationState;
-import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -193,9 +194,9 @@ public class ReplicationPeerManager {
   }
 
   /**
-   * @return the old state, and whether the peer is enabled.
+   * @return the old desciption of the peer
*/
-  Pair 
preTransitPeerSyncReplicationState(String peerId,
+  ReplicationPeerDescription preTransitPeerSyncReplicationState(String peerId,
   SyncReplicationState state) throws DoNotRetryIOException {
 ReplicationPeerDescription desc = checkPeerExists(peerId);
 SyncReplicationState fromState = desc.getSyncReplicationState();
@@ -204,7 +205,7 @@ public class ReplicationPeerManager {
   throw new DoNotRetryIOException("Can not transit current cluster state 
from " + fromState +
 " to " + state + " for peer id=" + peerId);
 }
-return Pair.newPair(fromState, desc.isEnabled());
+return desc;
   }
 
   public void addPeer(String peerId, ReplicationPeerConfig peerConfig, boolean 
enabled)
@@ -384,6 +385,16 @@ public class ReplicationPeerManager {
   "Only support replicated table config for sync replication peer");
   }
 }
+Path remoteWALDir = new Path(peerConfig.getRemoteWALDir());
+if (!remoteWALDir.isAbsolute()) {
+  throw new DoNotRetryIOException(
+"The remote WAL directory " + peerConfig.getRemoteWALDir() + " is not 
absolute");
+}
+URI remoteWALDirUri = remoteWALDir.toUri();
+if (remoteWALDirUri.getScheme() == null || remoteWALDirUri.getAuthority() 
== null) {
+  throw new DoNotRetryIOException("The remote WAL directory " + 
peerConfig.getRemoteWALDir() +
+" is not qualified, you must provide scheme and authority");
+}
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/bcee6f8f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
index 0175296..ebe7a93 100644
---