hbase git commit: HBASE-18467

2017-08-23 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/HBASE-18467 9a0735bd1 -> 16b345fd8


HBASE-18467


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/16b345fd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/16b345fd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/16b345fd

Branch: refs/heads/HBASE-18467
Commit: 16b345fd81b99ee462a821335cc8f77e4272049f
Parents: 9a0735b
Author: Sean Busbey 
Authored: Wed Aug 23 02:17:24 2017 -0500
Committer: Sean Busbey 
Committed: Wed Aug 23 02:17:24 2017 -0500

--
 dev-support/Jenkinsfile | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/16b345fd/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 90ad072..1df8232 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -395,9 +395,7 @@ END
echo "[INFO] There are ${currentBuild.changeSets.size()} change 
sets."
def seenJiras = []
CharSequence pattern = /HBASE-[0-9]+/
-def foobar(CharSequence dee, CharSequence catz) {
-  return org.codehaus.groovy.runtime.StringGroovyMethods.find(dee, catz)
-}
+   def foobar = org.codehaus.groovy.runtime.StringGroovyMethods.
for ( changelist in currentBuild.changeSets ) {
  if ( changelist.isEmptySet() ) {
echo "[DEBUG] change set was empty, skipping JIRA comments."



hbase git commit: HBASE-18467 indirect.

2017-08-23 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/HBASE-18467 447774a13 -> 9a0735bd1


HBASE-18467 indirect.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9a0735bd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9a0735bd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9a0735bd

Branch: refs/heads/HBASE-18467
Commit: 9a0735bd1b8722121f82e775201abf96159005fb
Parents: 447774a
Author: Sean Busbey 
Authored: Wed Aug 23 01:52:35 2017 -0500
Committer: Sean Busbey 
Committed: Wed Aug 23 01:52:35 2017 -0500

--
 dev-support/Jenkinsfile | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9a0735bd/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index b878ded..90ad072 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -395,6 +395,9 @@ END
echo "[INFO] There are ${currentBuild.changeSets.size()} change 
sets."
def seenJiras = []
CharSequence pattern = /HBASE-[0-9]+/
+def foobar(CharSequence dee, CharSequence catz) {
+  return org.codehaus.groovy.runtime.StringGroovyMethods.find(dee, catz)
+}
for ( changelist in currentBuild.changeSets ) {
  if ( changelist.isEmptySet() ) {
echo "[DEBUG] change set was empty, skipping JIRA comments."
@@ -410,7 +413,7 @@ END
echo " ${change.author}"
echo ""
// For now, only match the first occurrance of an HBase jira 
id, due to JENKINS-46358
-   currentIssue = 
org.codehaus.groovy.runtime.StringGroovyMethods.find(msg, pattern)
+   currentIssue = foobar(msg, pattern)
if (currentIssue != null ) {
  echo "[DEBUG] found jira key: ${currentIssue}"
  if ( currentIssue in seenJiras ) {



hbase git commit: HBASE-18467

2017-08-23 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/HBASE-18467 16b345fd8 -> 991349739


HBASE-18467


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/99134973
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/99134973
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/99134973

Branch: refs/heads/HBASE-18467
Commit: 9913497395a099bffdb3ae25f422744d8be859e5
Parents: 16b345f
Author: Sean Busbey 
Authored: Wed Aug 23 02:30:15 2017 -0500
Committer: Sean Busbey 
Committed: Wed Aug 23 02:30:15 2017 -0500

--
 dev-support/Jenkinsfile | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/99134973/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 1df8232..27850a4 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -395,7 +395,9 @@ END
echo "[INFO] There are ${currentBuild.changeSets.size()} change 
sets."
def seenJiras = []
CharSequence pattern = /HBASE-[0-9]+/
-   def foobar = org.codehaus.groovy.runtime.StringGroovyMethods.
+   def foobar = { CharSequence foo, CharSequence bar ->
+ org.codehaus.groovy.runtime.StringGroovyMethods.find(foo,bar)
+   }
for ( changelist in currentBuild.changeSets ) {
  if ( changelist.isEmptySet() ) {
echo "[DEBUG] change set was empty, skipping JIRA comments."



hbase git commit: HBASE-18614 Setting BUCKET_CACHE_COMBINED_KEY to false disables stats on RS UI

2017-08-23 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1 338232f81 -> ad22437d0


HBASE-18614 Setting BUCKET_CACHE_COMBINED_KEY to false disables stats on RS UI

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ad22437d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ad22437d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ad22437d

Branch: refs/heads/branch-1
Commit: ad22437d0570b985861b2e2abddec8c4fc834a64
Parents: 338232f
Author: Biju Nair 
Authored: Wed Aug 23 13:10:04 2017 -0400
Committer: tedyu 
Committed: Wed Aug 23 10:51:27 2017 -0700

--
 .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 4 
 .../apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java| 4 ++--
 2 files changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ad22437d/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
index f427e04..334e848 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ClassSize;
 import org.apache.hadoop.hbase.util.HasThread;
 import org.apache.hadoop.util.StringUtils;
+import org.codehaus.jackson.annotate.JsonIgnore;
 import org.codehaus.jackson.annotate.JsonIgnoreProperties;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -1152,7 +1153,10 @@ public class LruBlockCache implements 
ResizableBlockCache, HeapSize {
   }
 
   @Override
+  @JsonIgnore
   public BlockCache[] getBlockCaches() {
+if (victimHandler != null)
+  return new BlockCache[] {this, this.victimHandler};
 return null;
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/ad22437d/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java
index 3b9161c..7ce9679 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java
@@ -118,7 +118,7 @@ public class TestBlockCacheReporting {
 BlockCache [] bcs = bc.getBlockCaches();
 if (bcs != null) {
   for (BlockCache sbc: bc.getBlockCaches()) {
-bucketCacheReport(sbc);
+LOG.info(bc.getClass().getSimpleName() + ": " + sbc.getStats());
   }
 }
   }
@@ -156,4 +156,4 @@ public class TestBlockCacheReporting {
 }
 return cbsbf;
   }
-}
\ No newline at end of file
+}



hbase git commit: HBASE-18575 [AMv2] Fixed and enabled TestRestartCluster#testRetainAssignmentOnRestart on master

2017-08-23 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 37c659462 -> fad968d99


HBASE-18575 [AMv2] Fixed and enabled 
TestRestartCluster#testRetainAssignmentOnRestart on master

* Fixed ServerCrashProcedure to set forceNewPlan to false for instances 
AssignProcedure. This enables balancer to find most suitable target server
* Fixed and enabled TestRestartCluster#testRetainAssignmentOnRestart on master
* Renamed method ServerName@isSameHostnameAndPort() to isSameAddress()

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fad968d9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fad968d9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fad968d9

Branch: refs/heads/branch-2
Commit: fad968d99f4693278edcaefff92738e67ad8e8a0
Parents: 37c6594
Author: Umesh Agashe 
Authored: Tue Aug 22 16:23:21 2017 -0700
Committer: Michael Stack 
Committed: Wed Aug 23 10:11:54 2017 -0700

--
 .../main/java/org/apache/hadoop/hbase/ServerName.java |  4 ++--
 .../apache/hadoop/hbase/DistributedHBaseCluster.java  |  4 ++--
 .../master/balancer/TestRSGroupBasedLoadBalancer.java |  2 +-
 .../hbase/favored/FavoredNodeAssignmentHelper.java|  3 +--
 .../hadoop/hbase/favored/FavoredNodeLoadBalancer.java |  2 +-
 .../apache/hadoop/hbase/favored/FavoredNodesPlan.java |  2 +-
 .../hadoop/hbase/master/ActiveMasterManager.java  |  2 +-
 .../org/apache/hadoop/hbase/master/DeadServer.java|  4 ++--
 .../org/apache/hadoop/hbase/master/ServerManager.java |  3 +--
 .../master/balancer/FavoredStochasticBalancer.java|  4 ++--
 .../hbase/master/procedure/ServerCrashProcedure.java  |  4 +++-
 .../hadoop/hbase/regionserver/HRegionServer.java  |  3 +--
 .../java/org/apache/hadoop/hbase/TestServerName.java  |  2 +-
 .../favored/TestStartcodeAgnosticServerName.java  |  4 ++--
 .../hadoop/hbase/master/TestRegionPlacement.java  |  8 
 .../hadoop/hbase/master/TestRegionPlacement2.java | 14 +++---
 .../hadoop/hbase/master/TestRestartCluster.java   |  7 ---
 .../TestFavoredStochasticBalancerPickers.java |  4 ++--
 .../balancer/TestFavoredStochasticLoadBalancer.java   |  7 +++
 19 files changed, 41 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/fad968d9/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java
index 88abc3f..5f83782 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java
@@ -347,8 +347,8 @@ public class ServerName implements Comparable, 
Serializable {
* @param right
* @return True if other has same hostname and port.
*/
-  public static boolean isSameHostnameAndPort(final ServerName left,
-  final ServerName right) {
+  public static boolean isSameAddress(final ServerName left,
+  final ServerName right) {
 // TODO: Make this left.getAddress().equals(right.getAddress())
 if (left == null) return false;
 if (right == null) return false;

http://git-wip-us.apache.org/repos/asf/hbase/blob/fad968d9/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
index 431ba42..55c6e28 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
@@ -320,7 +320,7 @@ public class DistributedHBaseCluster extends HBaseCluster {
 List deferred = new ArrayList<>();
 //check whether current master has changed
 final ServerName initMaster = initial.getMaster();
-if (!ServerName.isSameHostnameAndPort(initMaster, current.getMaster())) {
+if (!ServerName.isSameAddress(initMaster, current.getMaster())) {
   LOG.info("Restoring cluster - Initial active master : "
   + initMaster.getHostAndPort()
   + " has changed to : "
@@ -340,7 +340,7 @@ public class DistributedHBaseCluster extends HBaseCluster {
 // 2. Stop current master
 // 3. Start backup masters
 for (ServerName currentBackup : current.getBackupMasters()) {
-  if (!ServerName.isSameHostnameAndPort(currentBackup, initMaster)) {
+  if 

hbase git commit: HBASE-18594 Release hbase-2.0.0-alpha2; ADDENDUM update version from 2.0.0-alpha2 to 2.0.0-alpha3-SNAPSHOT

2017-08-23 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 fad968d99 -> b24e33312


HBASE-18594 Release hbase-2.0.0-alpha2; ADDENDUM update version from 
2.0.0-alpha2 to 2.0.0-alpha3-SNAPSHOT


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b24e3331
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b24e3331
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b24e3331

Branch: refs/heads/branch-2
Commit: b24e33312a6478a6a3063a7dd69637c7bd9025a0
Parents: fad968d
Author: Michael Stack 
Authored: Wed Aug 23 11:07:41 2017 -0700
Committer: Michael Stack 
Committed: Wed Aug 23 11:07:41 2017 -0700

--
 hbase-annotations/pom.xml| 2 +-
 hbase-archetypes/hbase-archetype-builder/pom.xml | 2 +-
 hbase-archetypes/hbase-client-project/pom.xml| 2 +-
 hbase-archetypes/hbase-shaded-client-project/pom.xml | 2 +-
 hbase-archetypes/pom.xml | 2 +-
 hbase-assembly/pom.xml   | 2 +-
 hbase-backup/pom.xml | 2 +-
 hbase-checkstyle/pom.xml | 4 ++--
 hbase-client/pom.xml | 2 +-
 hbase-common/pom.xml | 2 +-
 hbase-endpoint/pom.xml   | 2 +-
 hbase-examples/pom.xml   | 2 +-
 hbase-external-blockcache/pom.xml| 2 +-
 hbase-hadoop-compat/pom.xml  | 2 +-
 hbase-hadoop2-compat/pom.xml | 2 +-
 hbase-it/pom.xml | 2 +-
 hbase-metrics-api/pom.xml| 2 +-
 hbase-metrics/pom.xml| 2 +-
 hbase-prefix-tree/pom.xml| 2 +-
 hbase-procedure/pom.xml  | 2 +-
 hbase-protocol-shaded/pom.xml| 2 +-
 hbase-protocol/pom.xml   | 2 +-
 hbase-resource-bundle/pom.xml| 2 +-
 hbase-rest/pom.xml   | 2 +-
 hbase-rsgroup/pom.xml| 2 +-
 hbase-server/pom.xml | 2 +-
 hbase-shaded/hbase-shaded-client/pom.xml | 2 +-
 hbase-shaded/hbase-shaded-server/pom.xml | 2 +-
 hbase-shaded/pom.xml | 2 +-
 hbase-shell/pom.xml  | 2 +-
 hbase-spark-it/pom.xml   | 2 +-
 hbase-spark/pom.xml  | 2 +-
 hbase-testing-util/pom.xml   | 2 +-
 hbase-thrift/pom.xml | 2 +-
 pom.xml  | 2 +-
 35 files changed, 36 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b24e3331/hbase-annotations/pom.xml
--
diff --git a/hbase-annotations/pom.xml b/hbase-annotations/pom.xml
index 03f5335..6576147 100644
--- a/hbase-annotations/pom.xml
+++ b/hbase-annotations/pom.xml
@@ -23,7 +23,7 @@
   
 hbase
 org.apache.hbase
-2.0.0-alpha2
+2.0.0-alpha3-SNAPSHOT
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b24e3331/hbase-archetypes/hbase-archetype-builder/pom.xml
--
diff --git a/hbase-archetypes/hbase-archetype-builder/pom.xml 
b/hbase-archetypes/hbase-archetype-builder/pom.xml
index be85b18..37fb8ce 100644
--- a/hbase-archetypes/hbase-archetype-builder/pom.xml
+++ b/hbase-archetypes/hbase-archetype-builder/pom.xml
@@ -25,7 +25,7 @@
   
 hbase-archetypes
 org.apache.hbase
-2.0.0-alpha2
+2.0.0-alpha3-SNAPSHOT
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b24e3331/hbase-archetypes/hbase-client-project/pom.xml
--
diff --git a/hbase-archetypes/hbase-client-project/pom.xml 
b/hbase-archetypes/hbase-client-project/pom.xml
index 2eaa93e..2a4fa8a 100644
--- a/hbase-archetypes/hbase-client-project/pom.xml
+++ b/hbase-archetypes/hbase-client-project/pom.xml
@@ -26,7 +26,7 @@
   
 hbase-archetypes
 org.apache.hbase
-2.0.0-alpha2
+2.0.0-alpha3-SNAPSHOT
 ..
   
   hbase-client-project

http://git-wip-us.apache.org/repos/asf/hbase/blob/b24e3331/hbase-archetypes/hbase-shaded-client-project/pom.xml
--
diff --git a/hbase-archetypes/hbase-shaded-client-project/pom.xml 
b/hbase-archetypes/hbase-shaded-client-project/pom.xml
index f7a6016..ebfb235a 100644
--- 

[28/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
--
diff --git 
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
new file mode 100644
index 000..7011ed3
--- /dev/null
+++ 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
@@ -0,0 +1,336 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.apache.hadoop.hbase.backup.util.BackupUtils.succeeded;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.backup.mapreduce.MapReduceBackupMergeJob;
+import org.apache.hadoop.hbase.backup.mapreduce.MapReduceHFileSplitterJob;
+import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Pair;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.collect.Lists;
+
+@Category(LargeTests.class)
+public class TestIncrementalBackupMergeWithFailures extends TestBackupBase {
+  private static final Log LOG = 
LogFactory.getLog(TestIncrementalBackupMergeWithFailures.class);
+
+  static enum FailurePhase {
+PHASE1, PHASE2, PHASE3, PHASE4
+  }
+  public final static String FAILURE_PHASE_KEY = "failurePhase";
+
+  static class BackupMergeJobWithFailures extends MapReduceBackupMergeJob {
+
+FailurePhase failurePhase;
+
+@Override
+public void setConf(Configuration conf) {
+  super.setConf(conf);
+  String val = conf.get(FAILURE_PHASE_KEY);
+  if (val != null) {
+failurePhase = FailurePhase.valueOf(val);
+  } else {
+Assert.fail("Failure phase is not set");
+  }
+}
+
+
+/**
+ * This is the exact copy of parent's run() with injections
+ * of different types of failures
+ */
+@Override
+public void run(String[] backupIds) throws IOException {
+  String bulkOutputConfKey;
+
+  // TODO : run player on remote cluster
+  player = new MapReduceHFileSplitterJob();
+  bulkOutputConfKey = MapReduceHFileSplitterJob.BULK_OUTPUT_CONF_KEY;
+  // Player reads all files in arbitrary directory structure and creates
+  // a Map task for each file
+  String bids = StringUtils.join(backupIds, ",");
+
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Merge backup images " + bids);
+  }
+
+  List> processedTableList = new 
ArrayList>();
+  boolean finishedTables = false;
+  Connection conn = ConnectionFactory.createConnection(getConf());
+  BackupSystemTable table = new BackupSystemTable(conn);
+  FileSystem fs = FileSystem.get(getConf());
+
+  try {
+
+// Start backup exclusive operation
+table.startBackupExclusiveOperation();
+// Start merge operation
+table.startMergeOperation(backupIds);
+
+// Select most recent backup id
+String mergedBackupId = findMostRecentBackupId(backupIds);
+
+TableName[] tableNames = 

[34/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
--
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
new file mode 100644
index 000..650ba2e
--- /dev/null
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
@@ -0,0 +1,1022 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.impl;
+
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH;
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH_DESC;
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH;
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH_DESC;
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER;
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER_DESC;
+import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET;
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_BACKUP_DESC;
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_DESC;
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE;
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_DESC;
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_LIST_DESC;
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS;
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS_DESC;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.List;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Options;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupAdmin;
+import org.apache.hadoop.hbase.backup.BackupInfo;
+import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
+import org.apache.hadoop.hbase.backup.BackupRequest;
+import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
+import org.apache.hadoop.hbase.backup.BackupRestoreConstants.BackupCommand;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.backup.util.BackupSet;
+import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+
+/**
+ * General backup commands, options and usage messages
+ */
+
+@InterfaceAudience.Private
+public final class BackupCommands {
+
+  public final static String INCORRECT_USAGE = "Incorrect usage";
+
+  public static final String USAGE = "Usage: hbase backup COMMAND 
[command-specific arguments]\n"
+  + "where COMMAND is one of:\n" + "  create create a new backup 
image\n"
+  + "  delete delete an existing backup image\n"
+  + "  describe   show the detailed information of a backup image\n"
+  + "  historyshow history of all successful backups\n"
+  + "  progress   show the progress of the latest backup request\n"
+  + "  setbackup set management\n"
+  + "  repair repair backup system table\n"
+  + "  merge  merge backup images\n"
+  + "Run \'hbase backup COMMAND -h\' to see help message for each 
command\n";
+
+  public static final String CREATE_CMD_USAGE =
+  "Usage: hbase 

[23/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
deleted file mode 100644
index 6330899..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
+++ /dev/null
@@ -1,387 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup.impl;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.impl.BackupSystemTable.WALItem;
-import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
-import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-
-/**
- * After a full backup was created, the incremental backup will only store the 
changes made after
- * the last full or incremental backup. Creating the backup copies the 
logfiles in .logs and
- * .oldlogs since the last backup timestamp.
- */
-@InterfaceAudience.Private
-public class IncrementalBackupManager extends BackupManager {
-  public static final Log LOG = 
LogFactory.getLog(IncrementalBackupManager.class);
-
-  public IncrementalBackupManager(Connection conn, Configuration conf) throws 
IOException {
-super(conn, conf);
-  }
-
-  /**
-   * Obtain the list of logs that need to be copied out for this incremental 
backup. The list is set
-   * in BackupInfo.
-   * @return The new HashMap of RS log time stamps after the log roll for this 
incremental backup.
-   * @throws IOException exception
-   */
-  public HashMap getIncrBackupLogFileMap()
-  throws IOException {
-List logList;
-HashMap newTimestamps;
-HashMap previousTimestampMins;
-
-String savedStartCode = readBackupStartCode();
-
-// key: tableName
-// value: 
-HashMap> previousTimestampMap = 
readLogTimestampMap();
-
-previousTimestampMins = 
BackupUtils.getRSLogTimestampMins(previousTimestampMap);
-
-if (LOG.isDebugEnabled()) {
-  LOG.debug("StartCode " + savedStartCode + "for backupID " + 
backupInfo.getBackupId());
-}
-// get all new log files from .logs and .oldlogs after last TS and before 
new timestamp
-if (savedStartCode == null || previousTimestampMins == null
-|| previousTimestampMins.isEmpty()) {
-  throw new IOException(
-  "Cannot read any previous back up timestamps from backup system 
table. "
-  + "In order to create an incremental backup, at least one full 
backup is needed.");
-}
-
-LOG.info("Execute roll log procedure for incremental backup ...");
-HashMap props = new HashMap();
-props.put("backupRoot", backupInfo.getBackupRootDir());
-
-try (Admin admin = conn.getAdmin();) {
-
-  
admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE,
-LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props);
-
-}
-newTimestamps = readRegionServerLastLogRollResult();
-
-logList = getLogFilesForNewBackup(previousTimestampMins, newTimestamps, 
conf, savedStartCode);
-  

[33/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
--
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
new file mode 100644
index 000..4dab046
--- /dev/null
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
@@ -0,0 +1,2051 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupInfo;
+import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
+import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.SnapshotDescription;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.Pair;
+
+/**
+ * This class provides API to access backup system table
+ *
+ * Backup system table schema:
+ * 
+ * 1. Backup sessions rowkey= "session:"+backupId; value =serialized 
BackupInfo
+ * 2. Backup start code rowkey = "startcode:"+backupRoot; value = 
startcode
+ * 3. Incremental backup set rowkey="incrbackupset:"+backupRoot; 
value=[list of tables]
+ * 4. Table-RS-timestamp map rowkey="trslm:"+backupRoot+table_name;
+ * value = map[RS-> last WAL timestamp]
+ * 5. RS - WAL ts map rowkey="rslogts:"+backupRoot +server; value = last 
WAL timestamp
+ * 6. WALs recorded rowkey="wals:"+WAL unique file name;
+ * value = backupId and full WAL file name
+ * 
+ */
+
+@InterfaceAudience.Private
+public final class BackupSystemTable implements Closeable {
+  private static final Log LOG = LogFactory.getLog(BackupSystemTable.class);
+
+  static class WALItem {
+String backupId;
+String walFile;
+String backupRoot;
+
+WALItem(String backupId, String walFile, String backupRoot) {
+  this.backupId = backupId;
+  this.walFile = walFile;
+  this.backupRoot = backupRoot;
+}
+
+public String getBackupId() {
+  return backupId;
+}
+
+public String getWalFile() {
+  return walFile;
+}
+
+public String getBackupRoot() {
+  return backupRoot;
+}
+
+@Override
+public String toString() {
+  return Path.SEPARATOR + backupRoot + Path.SEPARATOR + backupId + 

[18/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

Signed-off-by: Josh Elser 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2dda3712
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2dda3712
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2dda3712

Branch: refs/heads/master
Commit: 2dda371209b2e810fa76034b8fa8dcf47140e265
Parents: 6c0e219
Author: Vladimir Rodionov 
Authored: Tue Aug 22 17:14:48 2017 -0700
Committer: Josh Elser 
Committed: Wed Aug 23 12:40:57 2017 -0400

--
 hbase-assembly/pom.xml  |6 +-
 .../src/main/assembly/hadoop-two-compat.xml |1 +
 hbase-assembly/src/main/assembly/src.xml|1 +
 hbase-backup/.DS_Store  |  Bin 0 -> 6148 bytes
 hbase-backup/pom.xml|  265 +++
 .../apache/hadoop/hbase/backup/BackupAdmin.java |  136 ++
 .../hbase/backup/BackupClientFactory.java   |   53 +
 .../hadoop/hbase/backup/BackupCopyJob.java  |   55 +
 .../hadoop/hbase/backup/BackupDriver.java   |  210 ++
 .../hadoop/hbase/backup/BackupHFileCleaner.java |  180 ++
 .../apache/hadoop/hbase/backup/BackupInfo.java  |  550 +
 .../hadoop/hbase/backup/BackupMergeJob.java |   40 +
 .../hadoop/hbase/backup/BackupObserver.java |  102 +
 .../hadoop/hbase/backup/BackupRequest.java  |  139 ++
 .../hbase/backup/BackupRestoreConstants.java|  123 ++
 .../hbase/backup/BackupRestoreFactory.java  |   82 +
 .../hadoop/hbase/backup/BackupTableInfo.java|   82 +
 .../hadoop/hbase/backup/HBackupFileSystem.java  |  146 ++
 .../apache/hadoop/hbase/backup/LogUtils.java|   50 +
 .../hadoop/hbase/backup/RestoreDriver.java  |  265 +++
 .../apache/hadoop/hbase/backup/RestoreJob.java  |   46 +
 .../hadoop/hbase/backup/RestoreRequest.java |  135 ++
 .../hbase/backup/impl/BackupAdminImpl.java  |  743 +++
 .../hbase/backup/impl/BackupCommands.java   | 1022 +
 .../hbase/backup/impl/BackupException.java  |   84 +
 .../hadoop/hbase/backup/impl/BackupManager.java |  502 +
 .../hbase/backup/impl/BackupManifest.java   |  674 ++
 .../hbase/backup/impl/BackupSystemTable.java| 2051 ++
 .../backup/impl/FullTableBackupClient.java  |  224 ++
 .../backup/impl/IncrementalBackupManager.java   |  387 
 .../impl/IncrementalTableBackupClient.java  |  377 
 .../hbase/backup/impl/RestoreTablesClient.java  |  278 +++
 .../hbase/backup/impl/TableBackupClient.java|  436 
 .../mapreduce/MapReduceBackupCopyJob.java   |  344 +++
 .../mapreduce/MapReduceBackupMergeJob.java  |  321 +++
 .../mapreduce/MapReduceHFileSplitterJob.java|  181 ++
 .../backup/mapreduce/MapReduceRestoreJob.java   |  136 ++
 .../hbase/backup/master/BackupLogCleaner.java   |  142 ++
 .../master/LogRollMasterProcedureManager.java   |  155 ++
 .../regionserver/LogRollBackupSubprocedure.java |  168 ++
 .../LogRollBackupSubprocedurePool.java  |  139 ++
 .../LogRollRegionServerProcedureManager.java|  185 ++
 .../hadoop/hbase/backup/util/BackupSet.java |   58 +
 .../hadoop/hbase/backup/util/BackupUtils.java   |  747 +++
 .../hadoop/hbase/backup/util/RestoreTool.java   |  516 +
 .../hadoop/hbase/backup/TestBackupBase.java |  503 +
 .../hbase/backup/TestBackupBoundaryTests.java   |   97 +
 .../hbase/backup/TestBackupCommandLineTool.java |  431 
 .../hadoop/hbase/backup/TestBackupDelete.java   |  102 +
 .../hbase/backup/TestBackupDeleteRestore.java   |   70 +
 .../backup/TestBackupDeleteWithFailures.java|  194 ++
 .../hadoop/hbase/backup/TestBackupDescribe.java |  110 +
 .../hbase/backup/TestBackupHFileCleaner.java|  141 ++
 .../hbase/backup/TestBackupMultipleDeletes.java |  158 ++
 .../hadoop/hbase/backup/TestBackupRepair.java   |   91 +
 .../hbase/backup/TestBackupShowHistory.java |  148 ++
 .../hbase/backup/TestBackupStatusProgress.java  |   96 +
 .../hbase/backup/TestBackupSystemTable.java |  511 +
 .../hadoop/hbase/backup/TestFullBackup.java |   59 +
 .../hadoop/hbase/backup/TestFullBackupSet.java  |  103 +
 .../backup/TestFullBackupSetRestoreSet.java |  128 ++
 .../backup/TestFullBackupWithFailures.java  |   79 +
 .../hadoop/hbase/backup/TestFullRestore.java|  345 +++
 .../hbase/backup/TestIncrementalBackup.java |  209 ++
 .../TestIncrementalBackupDeleteTable.java   |  129 ++
 .../TestIncrementalBackupMergeWithFailures.java |  336 +++
 .../TestIncrementalBackupWithBulkLoad.java  |  145 ++
 .../TestIncrementalBackupWithFailures.java  |  161 ++
 .../hadoop/hbase/backup/TestRemoteBackup.java   |  135 ++
 .../hadoop/hbase/backup/TestRemoteRestore.java  |   59 +
 .../backup/TestRepairAfterFailedDelete.java |   93 +
 

[09/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java
deleted file mode 100644
index 1765bf3..000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java
+++ /dev/null
@@ -1,550 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.ArrayList;
-import java.util.Calendar;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos.BackupInfo.Builder;
-import org.apache.hadoop.hbase.util.Bytes;
-
-
-/**
- * An object to encapsulate the information for each backup session
- */
-@InterfaceAudience.Private
-public class BackupInfo implements Comparable {
-  private static final Log LOG = LogFactory.getLog(BackupInfo.class);
-
-  public static interface Filter {
-
-/**
- * Filter interface
- * @param info backup info
- * @return true if info passes filter, false otherwise
- */
-public boolean apply(BackupInfo info);
-  }
-
-  /**
-   * Backup session states
-   */
-  public static enum BackupState {
-RUNNING, COMPLETE, FAILED, ANY;
-  }
-
-  /**
-   * BackupPhase - phases of an ACTIVE backup session (running), when state of 
a backup session is
-   * BackupState.RUNNING
-   */
-  public static enum BackupPhase {
-REQUEST, SNAPSHOT, PREPARE_INCREMENTAL, SNAPSHOTCOPY, INCREMENTAL_COPY, 
STORE_MANIFEST;
-  }
-
-  /**
-   * Backup id
-   */
-  private String backupId;
-
-  /**
-   * Backup type, full or incremental
-   */
-  private BackupType type;
-
-  /**
-   * Target root directory for storing the backup files
-   */
-  private String backupRootDir;
-
-  /**
-   * Backup state
-   */
-  private BackupState state;
-
-  /**
-   * Backup phase
-   */
-  private BackupPhase phase = BackupPhase.REQUEST;
-
-  /**
-   * Backup failure message
-   */
-  private String failedMsg;
-
-  /**
-   * Backup status map for all tables
-   */
-  private Map backupTableInfoMap;
-
-  /**
-   * Actual start timestamp of a backup process
-   */
-  private long startTs;
-
-  /**
-   * Actual end timestamp of the backup process
-   */
-  private long completeTs;
-
-  /**
-   * Total bytes of incremental logs copied
-   */
-  private long totalBytesCopied;
-
-  /**
-   * For incremental backup, a location of a backed-up hlogs
-   */
-  private String hlogTargetDir = null;
-
-  /**
-   * Incremental backup file list
-   */
-  private List incrBackupFileList;
-
-  /**
-   * New region server log timestamps for table set after distributed log roll 
key - table name,
-   * value - map of RegionServer hostname -> last log rolled timestamp
-   */
-  private HashMap> tableSetTimestampMap;
-
-  /**
-   * Backup progress in %% (0-100)
-   */
-  private int progress;
-
-  /**
-   * Number of parallel workers. -1 - system defined
-   */
-  private int workers = -1;
-
-  /**
-   * Bandwidth per worker in MB per sec. -1 - unlimited
-   */
-  private long bandwidth = -1;
-
-  public BackupInfo() {
-backupTableInfoMap = new HashMap();
-  }
-
-  public BackupInfo(String backupId, BackupType type, TableName[] tables, 
String targetRootDir) {
-this();
-this.backupId = backupId;
-this.type = type;
-

[16/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
--
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
new file mode 100644
index 000..650ba2e
--- /dev/null
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
@@ -0,0 +1,1022 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.impl;
+
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH;
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH_DESC;
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH;
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH_DESC;
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER;
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER_DESC;
+import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET;
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_BACKUP_DESC;
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_DESC;
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE;
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_DESC;
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_LIST_DESC;
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS;
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS_DESC;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.List;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Options;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupAdmin;
+import org.apache.hadoop.hbase.backup.BackupInfo;
+import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
+import org.apache.hadoop.hbase.backup.BackupRequest;
+import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
+import org.apache.hadoop.hbase.backup.BackupRestoreConstants.BackupCommand;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.backup.util.BackupSet;
+import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+
+/**
+ * General backup commands, options and usage messages
+ */
+
+@InterfaceAudience.Private
+public final class BackupCommands {
+
+  public final static String INCORRECT_USAGE = "Incorrect usage";
+
+  public static final String USAGE = "Usage: hbase backup COMMAND 
[command-specific arguments]\n"
+  + "where COMMAND is one of:\n" + "  create create a new backup 
image\n"
+  + "  delete delete an existing backup image\n"
+  + "  describe   show the detailed information of a backup image\n"
+  + "  historyshow history of all successful backups\n"
+  + "  progress   show the progress of the latest backup request\n"
+  + "  setbackup set management\n"
+  + "  repair repair backup system table\n"
+  + "  merge  merge backup images\n"
+  + "Run \'hbase backup COMMAND -h\' to see help message for each 
command\n";
+
+  public static final String CREATE_CMD_USAGE =
+  "Usage: hbase 

[27/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java
deleted file mode 100644
index 1765bf3..000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java
+++ /dev/null
@@ -1,550 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.ArrayList;
-import java.util.Calendar;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos.BackupInfo.Builder;
-import org.apache.hadoop.hbase.util.Bytes;
-
-
-/**
- * An object to encapsulate the information for each backup session
- */
-@InterfaceAudience.Private
-public class BackupInfo implements Comparable {
-  private static final Log LOG = LogFactory.getLog(BackupInfo.class);
-
-  public static interface Filter {
-
-/**
- * Filter interface
- * @param info backup info
- * @return true if info passes filter, false otherwise
- */
-public boolean apply(BackupInfo info);
-  }
-
-  /**
-   * Backup session states
-   */
-  public static enum BackupState {
-RUNNING, COMPLETE, FAILED, ANY;
-  }
-
-  /**
-   * BackupPhase - phases of an ACTIVE backup session (running), when state of 
a backup session is
-   * BackupState.RUNNING
-   */
-  public static enum BackupPhase {
-REQUEST, SNAPSHOT, PREPARE_INCREMENTAL, SNAPSHOTCOPY, INCREMENTAL_COPY, 
STORE_MANIFEST;
-  }
-
-  /**
-   * Backup id
-   */
-  private String backupId;
-
-  /**
-   * Backup type, full or incremental
-   */
-  private BackupType type;
-
-  /**
-   * Target root directory for storing the backup files
-   */
-  private String backupRootDir;
-
-  /**
-   * Backup state
-   */
-  private BackupState state;
-
-  /**
-   * Backup phase
-   */
-  private BackupPhase phase = BackupPhase.REQUEST;
-
-  /**
-   * Backup failure message
-   */
-  private String failedMsg;
-
-  /**
-   * Backup status map for all tables
-   */
-  private Map backupTableInfoMap;
-
-  /**
-   * Actual start timestamp of a backup process
-   */
-  private long startTs;
-
-  /**
-   * Actual end timestamp of the backup process
-   */
-  private long completeTs;
-
-  /**
-   * Total bytes of incremental logs copied
-   */
-  private long totalBytesCopied;
-
-  /**
-   * For incremental backup, a location of a backed-up hlogs
-   */
-  private String hlogTargetDir = null;
-
-  /**
-   * Incremental backup file list
-   */
-  private List incrBackupFileList;
-
-  /**
-   * New region server log timestamps for table set after distributed log roll 
key - table name,
-   * value - map of RegionServer hostname -> last log rolled timestamp
-   */
-  private HashMap> tableSetTimestampMap;
-
-  /**
-   * Backup progress in %% (0-100)
-   */
-  private int progress;
-
-  /**
-   * Number of parallel workers. -1 - system defined
-   */
-  private int workers = -1;
-
-  /**
-   * Bandwidth per worker in MB per sec. -1 - unlimited
-   */
-  private long bandwidth = -1;
-
-  public BackupInfo() {
-backupTableInfoMap = new HashMap();
-  }
-
-  public BackupInfo(String backupId, BackupType type, TableName[] tables, 
String targetRootDir) {
-this();
-this.backupId = backupId;
-this.type = type;
-

[35/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
--
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
new file mode 100644
index 000..1c43e88
--- /dev/null
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
@@ -0,0 +1,146 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+import java.util.HashMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupManifest;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+/**
+ * View to an on-disk Backup Image FileSytem Provides the set of methods 
necessary to interact with
+ * the on-disk Backup Image data.
+ */
+@InterfaceAudience.Private
+public class HBackupFileSystem {
+  public static final Log LOG = LogFactory.getLog(HBackupFileSystem.class);
+
+  /**
+   * This is utility class.
+   */
+  private HBackupFileSystem() {
+  }
+
+  /**
+   * Given the backup root dir, backup id and the table name, return the 
backup image location,
+   * which is also where the backup manifest file is. return value look like:
+   * 
"hdfs://backup.hbase.org:9000/user/biadmin/backup/backup_1396650096738/default/t1_dn/",
 where
+   * "hdfs://backup.hbase.org:9000/user/biadmin/backup" is a backup root 
directory
+   * @param backupRootDir backup root directory
+   * @param backupId backup id
+   * @param tableName table name
+   * @return backupPath String for the particular table
+   */
+  public static String
+  getTableBackupDir(String backupRootDir, String backupId, TableName 
tableName) {
+return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
++ tableName.getNamespaceAsString() + Path.SEPARATOR + 
tableName.getQualifierAsString()
++ Path.SEPARATOR;
+  }
+
+  public static String getTableBackupDataDir(String backupRootDir, String 
backupId,
+  TableName tableName) {
+return getTableBackupDir(backupRootDir, backupId, tableName) + 
Path.SEPARATOR + "data";
+  }
+
+  public static Path getBackupPath(String backupRootDir, String backupId) {
+return new Path(backupRootDir + Path.SEPARATOR + backupId);
+  }
+
+  /**
+   * Given the backup root dir, backup id and the table name, return the 
backup image location,
+   * which is also where the backup manifest file is. return value look like:
+   * 
"hdfs://backup.hbase.org:9000/user/biadmin/backup/backup_1396650096738/default/t1_dn/",
 where
+   * "hdfs://backup.hbase.org:9000/user/biadmin/backup" is a backup root 
directory
+   * @param backupRootPath backup root path
+   * @param tableName table name
+   * @param backupId backup Id
+   * @return backupPath for the particular table
+   */
+  public static Path getTableBackupPath(TableName tableName, Path 
backupRootPath, String backupId) {
+return new Path(getTableBackupDir(backupRootPath.toString(), backupId, 
tableName));
+  }
+
+  /**
+   * Given the backup root dir and the backup id, return the log file location 
for an incremental
+   * backup.
+   * @param backupRootDir backup root directory
+   * @param backupId backup id
+   * @return logBackupDir: ".../user/biadmin/backup/WALs/backup_1396650096738"
+   */
+  public static String getLogBackupDir(String backupRootDir, String backupId) {
+return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
++ HConstants.HREGION_LOGDIR_NAME;
+  }
+
+  public static Path getLogBackupPath(String backupRootDir, String backupId) {
+return new Path(getLogBackupDir(backupRootDir, backupId));
+  }
+
+  // TODO we do not keep WAL files anymore
+  // Move manifest file to other place
+  private static Path 

hbase git commit: HBASE-18614 Setting BUCKET_CACHE_COMBINED_KEY to false disables stats on RS UI

2017-08-23 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 4111f3981 -> 3071dc2fd


HBASE-18614 Setting BUCKET_CACHE_COMBINED_KEY to false disables stats on RS UI

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3071dc2f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3071dc2f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3071dc2f

Branch: refs/heads/branch-1.4
Commit: 3071dc2fd3486d8fd2ea132a65c340eaeebbf0e0
Parents: 4111f39
Author: Biju Nair 
Authored: Wed Aug 23 13:10:04 2017 -0400
Committer: tedyu 
Committed: Wed Aug 23 10:54:59 2017 -0700

--
 .../java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java | 4 
 .../apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java| 4 ++--
 2 files changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3071dc2f/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
index f427e04..334e848 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ClassSize;
 import org.apache.hadoop.hbase.util.HasThread;
 import org.apache.hadoop.util.StringUtils;
+import org.codehaus.jackson.annotate.JsonIgnore;
 import org.codehaus.jackson.annotate.JsonIgnoreProperties;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -1152,7 +1153,10 @@ public class LruBlockCache implements 
ResizableBlockCache, HeapSize {
   }
 
   @Override
+  @JsonIgnore
   public BlockCache[] getBlockCaches() {
+if (victimHandler != null)
+  return new BlockCache[] {this, this.victimHandler};
 return null;
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3071dc2f/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java
index 3b9161c..7ce9679 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java
@@ -118,7 +118,7 @@ public class TestBlockCacheReporting {
 BlockCache [] bcs = bc.getBlockCaches();
 if (bcs != null) {
   for (BlockCache sbc: bc.getBlockCaches()) {
-bucketCacheReport(sbc);
+LOG.info(bc.getClass().getSimpleName() + ": " + sbc.getStats());
   }
 }
   }
@@ -156,4 +156,4 @@ public class TestBlockCacheReporting {
 }
 return cbsbf;
   }
-}
\ No newline at end of file
+}



hbase git commit: HBASE-17614 (addendum) Remove extraneous .DS_Store file.

2017-08-23 Thread appy
Repository: hbase
Updated Branches:
  refs/heads/master 6b21f8881 -> ae052e454


HBASE-17614 (addendum) Remove extraneous .DS_Store file.

Change-Id: I604832645f0d66fb972091c076c91558ab9627c5


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ae052e45
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ae052e45
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ae052e45

Branch: refs/heads/master
Commit: ae052e454029ed4963707112f3897b919d43596f
Parents: 6b21f88
Author: Apekshit Sharma 
Authored: Wed Aug 23 11:07:50 2017 -0700
Committer: Apekshit Sharma 
Committed: Wed Aug 23 11:07:50 2017 -0700

--
 hbase-backup/.DS_Store | Bin 6148 -> 0 bytes
 1 file changed, 0 insertions(+), 0 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ae052e45/hbase-backup/.DS_Store
--
diff --git a/hbase-backup/.DS_Store b/hbase-backup/.DS_Store
deleted file mode 100644
index ab57a77..000
Binary files a/hbase-backup/.DS_Store and /dev/null differ



hbase git commit: HBASE-18575 [AMv2] Fixed and enabled TestRestartCluster#testRetainAssignmentOnRestart on master

2017-08-23 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 2dda37120 -> 6b21f8881


HBASE-18575 [AMv2] Fixed and enabled 
TestRestartCluster#testRetainAssignmentOnRestart on master

* Fixed ServerCrashProcedure to set forceNewPlan to false for instances 
AssignProcedure. This enables balancer to find most suitable target server
* Fixed and enabled TestRestartCluster#testRetainAssignmentOnRestart on master
* Renamed method ServerName@isSameHostnameAndPort() to isSameAddress()

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6b21f888
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6b21f888
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6b21f888

Branch: refs/heads/master
Commit: 6b21f8881be7649dadbdecd28dc2e2abe5c4ebe5
Parents: 2dda371
Author: Umesh Agashe 
Authored: Tue Aug 22 16:23:21 2017 -0700
Committer: Michael Stack 
Committed: Wed Aug 23 10:10:56 2017 -0700

--
 .../main/java/org/apache/hadoop/hbase/ServerName.java |  4 ++--
 .../apache/hadoop/hbase/DistributedHBaseCluster.java  |  4 ++--
 .../master/balancer/TestRSGroupBasedLoadBalancer.java |  2 +-
 .../hbase/favored/FavoredNodeAssignmentHelper.java|  3 +--
 .../hadoop/hbase/favored/FavoredNodeLoadBalancer.java |  2 +-
 .../apache/hadoop/hbase/favored/FavoredNodesPlan.java |  2 +-
 .../hadoop/hbase/master/ActiveMasterManager.java  |  2 +-
 .../org/apache/hadoop/hbase/master/DeadServer.java|  4 ++--
 .../org/apache/hadoop/hbase/master/ServerManager.java |  3 +--
 .../master/balancer/FavoredStochasticBalancer.java|  4 ++--
 .../hbase/master/procedure/ServerCrashProcedure.java  |  4 +++-
 .../hadoop/hbase/regionserver/HRegionServer.java  |  3 +--
 .../java/org/apache/hadoop/hbase/TestServerName.java  |  2 +-
 .../favored/TestStartcodeAgnosticServerName.java  |  4 ++--
 .../hadoop/hbase/master/TestRegionPlacement.java  |  8 
 .../hadoop/hbase/master/TestRegionPlacement2.java | 14 +++---
 .../hadoop/hbase/master/TestRestartCluster.java   |  7 ---
 .../TestFavoredStochasticBalancerPickers.java |  4 ++--
 .../balancer/TestFavoredStochasticLoadBalancer.java   |  7 +++
 19 files changed, 41 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6b21f888/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java
index 88abc3f..5f83782 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java
@@ -347,8 +347,8 @@ public class ServerName implements Comparable, 
Serializable {
* @param right
* @return True if other has same hostname and port.
*/
-  public static boolean isSameHostnameAndPort(final ServerName left,
-  final ServerName right) {
+  public static boolean isSameAddress(final ServerName left,
+  final ServerName right) {
 // TODO: Make this left.getAddress().equals(right.getAddress())
 if (left == null) return false;
 if (right == null) return false;

http://git-wip-us.apache.org/repos/asf/hbase/blob/6b21f888/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
index 431ba42..55c6e28 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
@@ -320,7 +320,7 @@ public class DistributedHBaseCluster extends HBaseCluster {
 List deferred = new ArrayList<>();
 //check whether current master has changed
 final ServerName initMaster = initial.getMaster();
-if (!ServerName.isSameHostnameAndPort(initMaster, current.getMaster())) {
+if (!ServerName.isSameAddress(initMaster, current.getMaster())) {
   LOG.info("Restoring cluster - Initial active master : "
   + initMaster.getHostAndPort()
   + " has changed to : "
@@ -340,7 +340,7 @@ public class DistributedHBaseCluster extends HBaseCluster {
 // 2. Stop current master
 // 3. Start backup masters
 for (ServerName currentBackup : current.getBackupMasters()) {
-  if (!ServerName.isSameHostnameAndPort(currentBackup, initMaster)) {
+  if 

[3/3] hbase-thirdparty git commit: Move version to 1.0.1

2017-08-23 Thread stack
Move version to 1.0.1


Project: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/commit/0d136d07
Tree: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/tree/0d136d07
Diff: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/diff/0d136d07

Branch: refs/heads/master
Commit: 0d136d07548863c89650b8e742886d59808a8673
Parents: ae17e3f
Author: Michael Stack 
Authored: Wed Aug 23 11:19:24 2017 -0700
Committer: Michael Stack 
Committed: Wed Aug 23 11:19:24 2017 -0700

--
 hbase-shaded-miscellaneous/pom.xml | 5 -
 hbase-shaded-netty/pom.xml | 5 -
 hbase-shaded-protobuf/pom.xml  | 5 -
 pom.xml| 2 +-
 4 files changed, 13 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/0d136d07/hbase-shaded-miscellaneous/pom.xml
--
diff --git a/hbase-shaded-miscellaneous/pom.xml 
b/hbase-shaded-miscellaneous/pom.xml
index 5650f51..b616593 100644
--- a/hbase-shaded-miscellaneous/pom.xml
+++ b/hbase-shaded-miscellaneous/pom.xml
@@ -32,7 +32,7 @@
   
 org.apache.hbase.thirdparty
 hbase-thirdparty
-1.0.0
+1.0.1
 ..
   
   hbase-shaded-miscellaneous
@@ -72,6 +72,9 @@
 
   com.google
   ${rename.offset}.com.google
+  
+
com.google.errorprone.annotations.CanIgnoreReturnValue
+  
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/0d136d07/hbase-shaded-netty/pom.xml
--
diff --git a/hbase-shaded-netty/pom.xml b/hbase-shaded-netty/pom.xml
index 7112952..a8b9c4a 100644
--- a/hbase-shaded-netty/pom.xml
+++ b/hbase-shaded-netty/pom.xml
@@ -32,7 +32,7 @@
   
 org.apache.hbase.thirdparty
 hbase-thirdparty
-1.0.0
+1.0.1
 ..
   
   hbase-shaded-netty
@@ -71,6 +71,9 @@
 
   io.netty
   ${rename.offset}.io.netty
+  
+
com.google.errorprone.annotations.CanIgnoreReturnValue
+  
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/0d136d07/hbase-shaded-protobuf/pom.xml
--
diff --git a/hbase-shaded-protobuf/pom.xml b/hbase-shaded-protobuf/pom.xml
index 38164e1..f9cf3fe 100644
--- a/hbase-shaded-protobuf/pom.xml
+++ b/hbase-shaded-protobuf/pom.xml
@@ -23,7 +23,7 @@
   
 org.apache.hbase.thirdparty
 hbase-thirdparty
-1.0.0
+1.0.1
 ..
   
   hbase-shaded-protobuf
@@ -164,6 +164,9 @@
 
   com.google.protobuf
   
${rename.offset}.com.google.protobuf
+  
+
com.google.errorprone.annotations.CanIgnoreReturnValue
+  
 
   
 

http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/0d136d07/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 1f5f916..6818561 100644
--- a/pom.xml
+++ b/pom.xml
@@ -38,7 +38,7 @@
   
   org.apache.hbase.thirdparty
   hbase-thirdparty
-  1.0.0
+  1.0.1
   Apache HBase Third-Party Libs
   pom
   



[1/3] hbase-thirdparty git commit: Set version to 1.0.0 for first RC

2017-08-23 Thread stack
Repository: hbase-thirdparty
Updated Branches:
  refs/heads/master 199a65093 -> 0d136d075


Set version to 1.0.0 for first RC


Project: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/commit/8ffaf3dd
Tree: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/tree/8ffaf3dd
Diff: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/diff/8ffaf3dd

Branch: refs/heads/master
Commit: 8ffaf3dd561052bcc71772148ecd04cdf9e224f3
Parents: 199a650
Author: Michael Stack 
Authored: Fri Jun 30 13:37:39 2017 -0700
Committer: Michael Stack 
Committed: Fri Jun 30 13:37:39 2017 -0700

--
 hbase-shaded-miscellaneous/pom.xml | 2 +-
 hbase-shaded-netty/pom.xml | 2 +-
 hbase-shaded-protobuf/pom.xml  | 2 +-
 pom.xml| 2 +-
 4 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/8ffaf3dd/hbase-shaded-miscellaneous/pom.xml
--
diff --git a/hbase-shaded-miscellaneous/pom.xml 
b/hbase-shaded-miscellaneous/pom.xml
index c8adf84..f88ec9f 100644
--- a/hbase-shaded-miscellaneous/pom.xml
+++ b/hbase-shaded-miscellaneous/pom.xml
@@ -32,7 +32,7 @@
   
 org.apache.hbase.thirdparty
 hbase-thirdparty
-1.0.0-SNAPSHOT
+1.0.0
 ..
   
   hbase-shaded-miscellaneous

http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/8ffaf3dd/hbase-shaded-netty/pom.xml
--
diff --git a/hbase-shaded-netty/pom.xml b/hbase-shaded-netty/pom.xml
index 4e5ac42..eda4dbb 100644
--- a/hbase-shaded-netty/pom.xml
+++ b/hbase-shaded-netty/pom.xml
@@ -32,7 +32,7 @@
   
 org.apache.hbase.thirdparty
 hbase-thirdparty
-1.0.0-SNAPSHOT
+1.0.0
 ..
   
   hbase-shaded-netty

http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/8ffaf3dd/hbase-shaded-protobuf/pom.xml
--
diff --git a/hbase-shaded-protobuf/pom.xml b/hbase-shaded-protobuf/pom.xml
index dcb9fd5..1430254 100644
--- a/hbase-shaded-protobuf/pom.xml
+++ b/hbase-shaded-protobuf/pom.xml
@@ -23,7 +23,7 @@
   
 org.apache.hbase.thirdparty
 hbase-thirdparty
-1.0.0-SNAPSHOT
+1.0.0
 ..
   
   hbase-shaded-protobuf

http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/8ffaf3dd/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 7da877b..1f5f916 100644
--- a/pom.xml
+++ b/pom.xml
@@ -38,7 +38,7 @@
   
   org.apache.hbase.thirdparty
   hbase-thirdparty
-  1.0.0-SNAPSHOT
+  1.0.0
   Apache HBase Third-Party Libs
   pom
   



[2/3] hbase-thirdparty git commit: HBASE-18313 [hbase-thirdparty] Produce src jars/tgz

2017-08-23 Thread stack
HBASE-18313 [hbase-thirdparty] Produce src jars/tgz


Project: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/commit/ae17e3f1
Tree: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/tree/ae17e3f1
Diff: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/diff/ae17e3f1

Branch: refs/heads/master
Commit: ae17e3f1c93bbc4305108d61c09111004fe20461
Parents: 8ffaf3d
Author: zhangduo 
Authored: Tue Aug 22 14:02:02 2017 +0800
Committer: Michael Stack 
Committed: Mon Aug 21 23:12:47 2017 -0700

--
 .gitignore |  3 +++
 hbase-shaded-miscellaneous/pom.xml |  2 ++
 hbase-shaded-netty/pom.xml |  2 ++
 hbase-shaded-protobuf/pom.xml  | 13 +
 4 files changed, 20 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/ae17e3f1/.gitignore
--
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 000..afa17c0
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,3 @@
+**/target
+**/dependency-reduced-pom.xml
+hbase-shaded-protobuf/src/main/java/

http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/ae17e3f1/hbase-shaded-miscellaneous/pom.xml
--
diff --git a/hbase-shaded-miscellaneous/pom.xml 
b/hbase-shaded-miscellaneous/pom.xml
index f88ec9f..5650f51 100644
--- a/hbase-shaded-miscellaneous/pom.xml
+++ b/hbase-shaded-miscellaneous/pom.xml
@@ -66,6 +66,8 @@
   shade
 
 
+  true
+  true
   
 
   com.google

http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/ae17e3f1/hbase-shaded-netty/pom.xml
--
diff --git a/hbase-shaded-netty/pom.xml b/hbase-shaded-netty/pom.xml
index eda4dbb..7112952 100644
--- a/hbase-shaded-netty/pom.xml
+++ b/hbase-shaded-netty/pom.xml
@@ -65,6 +65,8 @@
   shade
 
 
+  true
+  true
   
 
   io.netty

http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/ae17e3f1/hbase-shaded-protobuf/pom.xml
--
diff --git a/hbase-shaded-protobuf/pom.xml b/hbase-shaded-protobuf/pom.xml
index 1430254..38164e1 100644
--- a/hbase-shaded-protobuf/pom.xml
+++ b/hbase-shaded-protobuf/pom.xml
@@ -134,6 +134,19 @@
   
 
   
+  
+org.apache.maven.plugins
+maven-source-plugin
+3.0.1
+
+  
+attach-sources
+
+  jar-no-fork
+
+  
+
+  
   
   
 org.apache.maven.plugins



hbase git commit: HBASE-18632 TestMultiParallel#testFlushCommitsWithAbort fails in master branch

2017-08-23 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master 04f114b85 -> 6c0e219dd


HBASE-18632 TestMultiParallel#testFlushCommitsWithAbort fails in master branch


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6c0e219d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6c0e219d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6c0e219d

Branch: refs/heads/master
Commit: 6c0e219dd42f766de345f3bbc991ea8900f0eb2f
Parents: 04f114b
Author: tedyu 
Authored: Wed Aug 23 09:35:08 2017 -0700
Committer: tedyu 
Committed: Wed Aug 23 09:35:08 2017 -0700

--
 .../java/org/apache/hadoop/hbase/client/TestMultiParallel.java| 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6c0e219d/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
index 62b6ae5..cfa7f37 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter;
 import org.apache.hadoop.hbase.codec.KeyValueCodec;
+import org.apache.hadoop.hbase.master.LoadBalancer;
 import org.apache.hadoop.hbase.testclassification.FlakeyTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -77,6 +78,8 @@ public class TestMultiParallel {
 //((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL);
 UTIL.getConfiguration().set(HConstants.RPC_CODEC_CONF_KEY,
 KeyValueCodec.class.getCanonicalName());
+UTIL.getConfiguration().setBoolean(LoadBalancer.TABLES_ON_MASTER, true);
+UTIL.getConfiguration().setBoolean(LoadBalancer.SYSTEM_TABLES_ON_MASTER, 
true);
 UTIL.startMiniCluster(slaves);
 Table t = UTIL.createMultiRegionTable(TEST_TABLE, Bytes.toBytes(FAMILY));
 UTIL.waitTableEnabled(TEST_TABLE);



hbase git commit: HBASE-18632 TestMultiParallel#testFlushCommitsWithAbort fails in master branch

2017-08-23 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-2 1f2518779 -> af8e6aeab


HBASE-18632 TestMultiParallel#testFlushCommitsWithAbort fails in master branch


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/af8e6aea
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/af8e6aea
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/af8e6aea

Branch: refs/heads/branch-2
Commit: af8e6aeab8a1b5c69374d1ad49405b284d7c4ec0
Parents: 1f25187
Author: tedyu 
Authored: Wed Aug 23 09:35:37 2017 -0700
Committer: tedyu 
Committed: Wed Aug 23 09:35:37 2017 -0700

--
 .../java/org/apache/hadoop/hbase/client/TestMultiParallel.java| 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/af8e6aea/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
index 62b6ae5..cfa7f37 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter;
 import org.apache.hadoop.hbase.codec.KeyValueCodec;
+import org.apache.hadoop.hbase.master.LoadBalancer;
 import org.apache.hadoop.hbase.testclassification.FlakeyTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -77,6 +78,8 @@ public class TestMultiParallel {
 //((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL);
 UTIL.getConfiguration().set(HConstants.RPC_CODEC_CONF_KEY,
 KeyValueCodec.class.getCanonicalName());
+UTIL.getConfiguration().setBoolean(LoadBalancer.TABLES_ON_MASTER, true);
+UTIL.getConfiguration().setBoolean(LoadBalancer.SYSTEM_TABLES_ON_MASTER, 
true);
 UTIL.startMiniCluster(slaves);
 Table t = UTIL.createMultiRegionTable(TEST_TABLE, Bytes.toBytes(FAMILY));
 UTIL.waitTableEnabled(TEST_TABLE);



[06/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
deleted file mode 100644
index 4dab046..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
+++ /dev/null
@@ -1,2051 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.backup.impl;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.TreeSet;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.NamespaceDescriptor;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.BackupInfo;
-import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
-import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
-import org.apache.hadoop.hbase.backup.BackupType;
-import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.SnapshotDescription;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.Pair;
-
-/**
- * This class provides API to access backup system table
- *
- * Backup system table schema:
- * 
- * 1. Backup sessions rowkey= "session:"+backupId; value =serialized 
BackupInfo
- * 2. Backup start code rowkey = "startcode:"+backupRoot; value = 
startcode
- * 3. Incremental backup set rowkey="incrbackupset:"+backupRoot; 
value=[list of tables]
- * 4. Table-RS-timestamp map rowkey="trslm:"+backupRoot+table_name;
- * value = map[RS-> last WAL timestamp]
- * 5. RS - WAL ts map rowkey="rslogts:"+backupRoot +server; value = last 
WAL timestamp
- * 6. WALs recorded rowkey="wals:"+WAL unique file name;
- * value = backupId and full WAL file name
- * 
- */
-
-@InterfaceAudience.Private
-public final class BackupSystemTable implements Closeable {
-  private static final Log LOG = LogFactory.getLog(BackupSystemTable.class);
-
-  static class WALItem {
-String backupId;
-String walFile;
-String backupRoot;
-
-WALItem(String backupId, String walFile, String backupRoot) {
-  this.backupId = backupId;
-  this.walFile = walFile;
-  this.backupRoot = backupRoot;
-}
-
-public String getBackupId() {
-  return backupId;
-}
-
-public String getWalFile() {
-  return walFile;
-}
-
-public String getBackupRoot() {
-  return backupRoot;
-}
-
-@Override
-public String toString() {
-  return Path.SEPARATOR + backupRoot + Path.SEPARATOR + backupId + 

[22/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java
deleted file mode 100644
index 49e8c75..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java
+++ /dev/null
@@ -1,181 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.backup.mapreduce;
-
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValue.Type;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.RegionLocator;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.mapreduce.HFileInputFormat;
-import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2;
-import org.apache.hadoop.hbase.mapreduce.KeyValueSortReducer;
-import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.io.NullWritable;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-
-/**
- * A tool to split HFiles into new region boundaries as a MapReduce job. The 
tool generates HFiles
- * for later bulk importing.
- */
-@InterfaceAudience.Private
-public class MapReduceHFileSplitterJob extends Configured implements Tool {
-  private static final Log LOG = 
LogFactory.getLog(MapReduceHFileSplitterJob.class);
-  final static String NAME = "HFileSplitterJob";
-  public final static String BULK_OUTPUT_CONF_KEY = "hfile.bulk.output";
-  public final static String TABLES_KEY = "hfile.input.tables";
-  public final static String TABLE_MAP_KEY = "hfile.input.tablesmap";
-  private final static String JOB_NAME_CONF_KEY = "mapreduce.job.name";
-
-  public MapReduceHFileSplitterJob() {
-  }
-
-  protected MapReduceHFileSplitterJob(final Configuration c) {
-super(c);
-  }
-
-  /**
-   * A mapper that just writes out cells. This one can be used together with
-   * {@link KeyValueSortReducer}
-   */
-  static class HFileCellMapper extends
-  Mapper {
-
-@Override
-public void map(NullWritable key, KeyValue value, Context context) throws 
IOException,
-InterruptedException {
-  // Convert value to KeyValue if subclass
-  if (!value.getClass().equals(KeyValue.class)) {
-value =
-new KeyValue(value.getRowArray(), value.getRowOffset(), 
value.getRowLength(),
-value.getFamilyArray(), value.getFamilyOffset(), 
value.getFamilyLength(),
-value.getQualifierArray(), value.getQualifierOffset(), 
value.getQualifierLength(),
-value.getTimestamp(), Type.codeToType(value.getTypeByte()), 
value.getValueArray(),
-value.getValueOffset(), value.getValueLength());
-  }
-  context.write(new ImmutableBytesWritable(CellUtil.cloneRow(value)), 
value);
-}
-
-@Override
-public void setup(Context context) throws IOException {
-  // do nothing
-}
-  }
-
-  /**
-   * Sets up the actual job.
-   * @param args The command line 

[07/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
deleted file mode 100644
index 7e3201e..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
+++ /dev/null
@@ -1,674 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup.impl;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.TreeMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.BackupInfo;
-import org.apache.hadoop.hbase.backup.BackupType;
-import org.apache.hadoop.hbase.backup.HBackupFileSystem;
-import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-
-/**
- * Backup manifest contains all the meta data of a backup image. The manifest 
info will be bundled
- * as manifest file together with data. So that each backup image will contain 
all the info needed
- * for restore. BackupManifest is a storage container for BackupImage.
- * It is responsible for storing/reading backup image data and has some 
additional utility methods.
- *
- */
-@InterfaceAudience.Private
-public class BackupManifest {
-
-  private static final Log LOG = LogFactory.getLog(BackupManifest.class);
-
-  // manifest file name
-  public static final String MANIFEST_FILE_NAME = ".backup.manifest";
-
-  /**
-   * Backup image, the dependency graph is made up by series of backup images 
BackupImage contains
-   * all the relevant information to restore the backup and is used during 
restore operation
-   */
-
-  public static class BackupImage implements Comparable {
-
-static class Builder {
-  BackupImage image;
-
-  Builder() {
-image = new BackupImage();
-  }
-
-  Builder withBackupId(String backupId) {
-image.setBackupId(backupId);
-return this;
-  }
-
-  Builder withType(BackupType type) {
-image.setType(type);
-return this;
-  }
-
-  Builder withRootDir(String rootDir) {
-image.setRootDir(rootDir);
-return this;
-  }
-
-  Builder withTableList(List tableList) {
-image.setTableList(tableList);
-return this;
-  }
-
-  Builder withStartTime(long startTime) {
-image.setStartTs(startTime);
-return this;
-  }
-
-  Builder withCompleteTime(long completeTime) {
-image.setCompleteTs(completeTime);
-return this;
-  }
-
-  BackupImage build() {
-return image;
-  }
-
-}
-
-private String backupId;
-private BackupType type;
-private String rootDir;
-private List tableList;
-private long startTs;
-private long completeTs;
-private ArrayList ancestors;
-private HashMap> incrTimeRanges;
-
-static Builder newBuilder() {
-  return new Builder();
-}
-
-public BackupImage() {
-  super();
-}
-
-private BackupImage(String backupId, BackupType type, String rootDir,
-List tableList, long startTs, long completeTs) {
-  this.backupId = backupId;
-  this.type = type;
-  

[19/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
deleted file mode 100644
index 7011ed3..000
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
+++ /dev/null
@@ -1,336 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup;
-
-import static org.apache.hadoop.hbase.backup.util.BackupUtils.succeeded;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
-import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
-import org.apache.hadoop.hbase.backup.mapreduce.MapReduceBackupMergeJob;
-import org.apache.hadoop.hbase.backup.mapreduce.MapReduceHFileSplitterJob;
-import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.util.Pair;
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import com.google.common.collect.Lists;
-
-@Category(LargeTests.class)
-public class TestIncrementalBackupMergeWithFailures extends TestBackupBase {
-  private static final Log LOG = 
LogFactory.getLog(TestIncrementalBackupMergeWithFailures.class);
-
-  static enum FailurePhase {
-PHASE1, PHASE2, PHASE3, PHASE4
-  }
-  public final static String FAILURE_PHASE_KEY = "failurePhase";
-
-  static class BackupMergeJobWithFailures extends MapReduceBackupMergeJob {
-
-FailurePhase failurePhase;
-
-@Override
-public void setConf(Configuration conf) {
-  super.setConf(conf);
-  String val = conf.get(FAILURE_PHASE_KEY);
-  if (val != null) {
-failurePhase = FailurePhase.valueOf(val);
-  } else {
-Assert.fail("Failure phase is not set");
-  }
-}
-
-
-/**
- * This is the exact copy of parent's run() with injections
- * of different types of failures
- */
-@Override
-public void run(String[] backupIds) throws IOException {
-  String bulkOutputConfKey;
-
-  // TODO : run player on remote cluster
-  player = new MapReduceHFileSplitterJob();
-  bulkOutputConfKey = MapReduceHFileSplitterJob.BULK_OUTPUT_CONF_KEY;
-  // Player reads all files in arbitrary directory structure and creates
-  // a Map task for each file
-  String bids = StringUtils.join(backupIds, ",");
-
-  if (LOG.isDebugEnabled()) {
-LOG.debug("Merge backup images " + bids);
-  }
-
-  List> processedTableList = new 
ArrayList>();
-  boolean finishedTables = false;
-  Connection conn = ConnectionFactory.createConnection(getConf());
-  BackupSystemTable table = new BackupSystemTable(conn);
-  FileSystem fs = FileSystem.get(getConf());
-
-  try {
-
-// Start backup exclusive operation
-table.startBackupExclusiveOperation();
-// Start merge operation
-table.startMergeOperation(backupIds);
-
-// Select most recent backup id
-String mergedBackupId = findMostRecentBackupId(backupIds);
-
-TableName[] tableNames = 

[15/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
--
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
new file mode 100644
index 000..4dab046
--- /dev/null
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
@@ -0,0 +1,2051 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupInfo;
+import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
+import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.SnapshotDescription;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.Pair;
+
+/**
+ * This class provides API to access backup system table
+ *
+ * Backup system table schema:
+ * 
+ * 1. Backup sessions rowkey= "session:"+backupId; value =serialized 
BackupInfo
+ * 2. Backup start code rowkey = "startcode:"+backupRoot; value = 
startcode
+ * 3. Incremental backup set rowkey="incrbackupset:"+backupRoot; 
value=[list of tables]
+ * 4. Table-RS-timestamp map rowkey="trslm:"+backupRoot+table_name;
+ * value = map[RS-> last WAL timestamp]
+ * 5. RS - WAL ts map rowkey="rslogts:"+backupRoot +server; value = last 
WAL timestamp
+ * 6. WALs recorded rowkey="wals:"+WAL unique file name;
+ * value = backupId and full WAL file name
+ * 
+ */
+
+@InterfaceAudience.Private
+public final class BackupSystemTable implements Closeable {
+  private static final Log LOG = LogFactory.getLog(BackupSystemTable.class);
+
+  static class WALItem {
+String backupId;
+String walFile;
+String backupRoot;
+
+WALItem(String backupId, String walFile, String backupRoot) {
+  this.backupId = backupId;
+  this.walFile = walFile;
+  this.backupRoot = backupRoot;
+}
+
+public String getBackupId() {
+  return backupId;
+}
+
+public String getWalFile() {
+  return walFile;
+}
+
+public String getBackupRoot() {
+  return backupRoot;
+}
+
+@Override
+public String toString() {
+  return Path.SEPARATOR + backupRoot + Path.SEPARATOR + backupId + 

[13/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java
--
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java
new file mode 100644
index 000..49e8c75
--- /dev/null
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java
@@ -0,0 +1,181 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup.mapreduce;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValue.Type;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.mapreduce.HFileInputFormat;
+import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2;
+import org.apache.hadoop.hbase.mapreduce.KeyValueSortReducer;
+import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+
+/**
+ * A tool to split HFiles into new region boundaries as a MapReduce job. The 
tool generates HFiles
+ * for later bulk importing.
+ */
+@InterfaceAudience.Private
+public class MapReduceHFileSplitterJob extends Configured implements Tool {
+  private static final Log LOG = 
LogFactory.getLog(MapReduceHFileSplitterJob.class);
+  final static String NAME = "HFileSplitterJob";
+  public final static String BULK_OUTPUT_CONF_KEY = "hfile.bulk.output";
+  public final static String TABLES_KEY = "hfile.input.tables";
+  public final static String TABLE_MAP_KEY = "hfile.input.tablesmap";
+  private final static String JOB_NAME_CONF_KEY = "mapreduce.job.name";
+
+  public MapReduceHFileSplitterJob() {
+  }
+
+  protected MapReduceHFileSplitterJob(final Configuration c) {
+super(c);
+  }
+
+  /**
+   * A mapper that just writes out cells. This one can be used together with
+   * {@link KeyValueSortReducer}
+   */
+  static class HFileCellMapper extends
+  Mapper {
+
+@Override
+public void map(NullWritable key, KeyValue value, Context context) throws 
IOException,
+InterruptedException {
+  // Convert value to KeyValue if subclass
+  if (!value.getClass().equals(KeyValue.class)) {
+value =
+new KeyValue(value.getRowArray(), value.getRowOffset(), 
value.getRowLength(),
+value.getFamilyArray(), value.getFamilyOffset(), 
value.getFamilyLength(),
+value.getQualifierArray(), value.getQualifierOffset(), 
value.getQualifierLength(),
+value.getTimestamp(), Type.codeToType(value.getTypeByte()), 
value.getValueArray(),
+value.getValueOffset(), value.getValueLength());
+  }
+  context.write(new ImmutableBytesWritable(CellUtil.cloneRow(value)), 
value);
+}
+
+@Override
+public void setup(Context context) throws IOException {
+  // do nothing
+}
+  }
+
+  /**
+   * Sets up the actual job.
+   * @param args The command line 

[03/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
deleted file mode 100644
index 0cfe099..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
+++ /dev/null
@@ -1,516 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup.util;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.TreeMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.BackupRestoreFactory;
-import org.apache.hadoop.hbase.backup.HBackupFileSystem;
-import org.apache.hadoop.hbase.backup.RestoreJob;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.io.HFileLink;
-import org.apache.hadoop.hbase.io.hfile.HFile;
-import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
-import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.FSTableDescriptors;
-
-/**
- * A collection for methods used by multiple classes to restore HBase tables.
- */
-@InterfaceAudience.Private
-public class RestoreTool {
-
-  public static final Log LOG = LogFactory.getLog(BackupUtils.class);
-  private final static long TABLE_AVAILABILITY_WAIT_TIME = 18;
-
-  private final String[] ignoreDirs = { HConstants.RECOVERED_EDITS_DIR };
-  protected Configuration conf = null;
-  protected Path backupRootPath;
-  protected String backupId;
-  protected FileSystem fs;
-
-  // store table name and snapshot dir mapping
-  private final HashMap snapshotMap = new HashMap<>();
-
-  public RestoreTool(Configuration conf, final Path backupRootPath, final 
String backupId)
-  throws IOException {
-this.conf = conf;
-this.backupRootPath = backupRootPath;
-this.backupId = backupId;
-this.fs = backupRootPath.getFileSystem(conf);
-  }
-
-  /**
-   * return value represent path for:
-   * 
".../user/biadmin/backup1/default/t1_dn/backup_1396650096738/archive/data/default/t1_dn"
-   * @param tableName table name
-   * @return path to table archive
-   * @throws IOException exception
-   */
-  Path getTableArchivePath(TableName tableName) throws IOException {
-
-Path baseDir =
-new Path(HBackupFileSystem.getTableBackupPath(tableName, 
backupRootPath, backupId),
-HConstants.HFILE_ARCHIVE_DIRECTORY);
-Path dataDir = new Path(baseDir, HConstants.BASE_NAMESPACE_DIR);
-Path archivePath = new Path(dataDir, tableName.getNamespaceAsString());
-Path tableArchivePath = new Path(archivePath, 
tableName.getQualifierAsString());
-if (!fs.exists(tableArchivePath) || 
!fs.getFileStatus(tableArchivePath).isDirectory()) {
-  LOG.debug("Folder tableArchivePath: " + tableArchivePath.toString() + " 
does not exists");
-  tableArchivePath = null; // empty table has no archive
-}
-return tableArchivePath;
-  }
-
-  /**
-   * Gets region list
- 

[20/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
deleted file mode 100644
index 0944ea2..000
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
+++ /dev/null
@@ -1,158 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-
-/**
- * Create multiple backups for two tables: table1, table2 then perform 1 delete
- */
-@Category(LargeTests.class)
-public class TestBackupMultipleDeletes extends TestBackupBase {
-  private static final Log LOG = 
LogFactory.getLog(TestBackupMultipleDeletes.class);
-
-  @Test
-  public void testBackupMultipleDeletes() throws Exception {
-// #1 - create full backup for all tables
-LOG.info("create full backup image for all tables");
-List tables = Lists.newArrayList(table1, table2);
-HBaseAdmin admin = null;
-Connection conn = ConnectionFactory.createConnection(conf1);
-admin = (HBaseAdmin) conn.getAdmin();
-BackupAdmin client = new BackupAdminImpl(conn);
-BackupRequest request = createBackupRequest(BackupType.FULL, tables, 
BACKUP_ROOT_DIR);
-String backupIdFull = client.backupTables(request);
-assertTrue(checkSucceeded(backupIdFull));
-// #2 - insert some data to table table1
-HTable t1 = (HTable) conn.getTable(table1);
-Put p1;
-for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
-  p1 = new Put(Bytes.toBytes("row-t1" + i));
-  p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
-  t1.put(p1);
-}
-Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH * 2);
-t1.close();
-// #3 - incremental backup for table1
-tables = Lists.newArrayList(table1);
-request = createBackupRequest(BackupType.INCREMENTAL, tables, 
BACKUP_ROOT_DIR);
-String backupIdInc1 = client.backupTables(request);
-assertTrue(checkSucceeded(backupIdInc1));
-// #4 - insert some data to table table2
-HTable t2 = (HTable) conn.getTable(table2);
-Put p2 = null;
-for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
-  p2 = new Put(Bytes.toBytes("row-t2" + i));
-  p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
-  t2.put(p2);
-}
-// #5 - incremental backup for table1, table2
-tables = Lists.newArrayList(table1, table2);
-request = createBackupRequest(BackupType.INCREMENTAL, tables, 
BACKUP_ROOT_DIR);
-String backupIdInc2 = client.backupTables(request);
-assertTrue(checkSucceeded(backupIdInc2));
-// #6 - insert some data to table table1
-t1 = (HTable) conn.getTable(table1);
-for (int i = NB_ROWS_IN_BATCH; i < 2 * NB_ROWS_IN_BATCH; i++) {
-  p1 = new Put(Bytes.toBytes("row-t1" + i));
-  p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
-  t1.put(p1);
-}
-// #7 - incremental backup for table1
-tables = Lists.newArrayList(table1);
-request = createBackupRequest(BackupType.INCREMENTAL, tables, 
BACKUP_ROOT_DIR);
-String 

[24/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
deleted file mode 100644
index 4dab046..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
+++ /dev/null
@@ -1,2051 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.backup.impl;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.TreeSet;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.NamespaceDescriptor;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.BackupInfo;
-import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
-import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
-import org.apache.hadoop.hbase.backup.BackupType;
-import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.SnapshotDescription;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.Pair;
-
-/**
- * This class provides API to access backup system table
- *
- * Backup system table schema:
- * 
- * 1. Backup sessions rowkey= "session:"+backupId; value =serialized 
BackupInfo
- * 2. Backup start code rowkey = "startcode:"+backupRoot; value = 
startcode
- * 3. Incremental backup set rowkey="incrbackupset:"+backupRoot; 
value=[list of tables]
- * 4. Table-RS-timestamp map rowkey="trslm:"+backupRoot+table_name;
- * value = map[RS-> last WAL timestamp]
- * 5. RS - WAL ts map rowkey="rslogts:"+backupRoot +server; value = last 
WAL timestamp
- * 6. WALs recorded rowkey="wals:"+WAL unique file name;
- * value = backupId and full WAL file name
- * 
- */
-
-@InterfaceAudience.Private
-public final class BackupSystemTable implements Closeable {
-  private static final Log LOG = LogFactory.getLog(BackupSystemTable.class);
-
-  static class WALItem {
-String backupId;
-String walFile;
-String backupRoot;
-
-WALItem(String backupId, String walFile, String backupRoot) {
-  this.backupId = backupId;
-  this.walFile = walFile;
-  this.backupRoot = backupRoot;
-}
-
-public String getBackupId() {
-  return backupId;
-}
-
-public String getWalFile() {
-  return walFile;
-}
-
-public String getBackupRoot() {
-  return backupRoot;
-}
-
-@Override
-public String toString() {
-  return Path.SEPARATOR + backupRoot + Path.SEPARATOR + backupId + 

[21/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
deleted file mode 100644
index 0cfe099..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
+++ /dev/null
@@ -1,516 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup.util;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.TreeMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.BackupRestoreFactory;
-import org.apache.hadoop.hbase.backup.HBackupFileSystem;
-import org.apache.hadoop.hbase.backup.RestoreJob;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.io.HFileLink;
-import org.apache.hadoop.hbase.io.hfile.HFile;
-import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
-import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.FSTableDescriptors;
-
-/**
- * A collection for methods used by multiple classes to restore HBase tables.
- */
-@InterfaceAudience.Private
-public class RestoreTool {
-
-  public static final Log LOG = LogFactory.getLog(BackupUtils.class);
-  private final static long TABLE_AVAILABILITY_WAIT_TIME = 18;
-
-  private final String[] ignoreDirs = { HConstants.RECOVERED_EDITS_DIR };
-  protected Configuration conf = null;
-  protected Path backupRootPath;
-  protected String backupId;
-  protected FileSystem fs;
-
-  // store table name and snapshot dir mapping
-  private final HashMap snapshotMap = new HashMap<>();
-
-  public RestoreTool(Configuration conf, final Path backupRootPath, final 
String backupId)
-  throws IOException {
-this.conf = conf;
-this.backupRootPath = backupRootPath;
-this.backupId = backupId;
-this.fs = backupRootPath.getFileSystem(conf);
-  }
-
-  /**
-   * return value represent path for:
-   * 
".../user/biadmin/backup1/default/t1_dn/backup_1396650096738/archive/data/default/t1_dn"
-   * @param tableName table name
-   * @return path to table archive
-   * @throws IOException exception
-   */
-  Path getTableArchivePath(TableName tableName) throws IOException {
-
-Path baseDir =
-new Path(HBackupFileSystem.getTableBackupPath(tableName, 
backupRootPath, backupId),
-HConstants.HFILE_ARCHIVE_DIRECTORY);
-Path dataDir = new Path(baseDir, HConstants.BASE_NAMESPACE_DIR);
-Path archivePath = new Path(dataDir, tableName.getNamespaceAsString());
-Path tableArchivePath = new Path(archivePath, 
tableName.getQualifierAsString());
-if (!fs.exists(tableArchivePath) || 
!fs.getFileStatus(tableArchivePath).isDirectory()) {
-  LOG.debug("Folder tableArchivePath: " + tableArchivePath.toString() + " 
does not exists");
-  tableArchivePath = null; // empty table has no archive
-}
-return tableArchivePath;
-  }
-
-  /**
-   * Gets region list
- 

[10/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
--
diff --git 
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
new file mode 100644
index 000..7011ed3
--- /dev/null
+++ 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
@@ -0,0 +1,336 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.apache.hadoop.hbase.backup.util.BackupUtils.succeeded;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.backup.mapreduce.MapReduceBackupMergeJob;
+import org.apache.hadoop.hbase.backup.mapreduce.MapReduceHFileSplitterJob;
+import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Pair;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.collect.Lists;
+
+@Category(LargeTests.class)
+public class TestIncrementalBackupMergeWithFailures extends TestBackupBase {
+  private static final Log LOG = 
LogFactory.getLog(TestIncrementalBackupMergeWithFailures.class);
+
+  static enum FailurePhase {
+PHASE1, PHASE2, PHASE3, PHASE4
+  }
+  public final static String FAILURE_PHASE_KEY = "failurePhase";
+
+  static class BackupMergeJobWithFailures extends MapReduceBackupMergeJob {
+
+FailurePhase failurePhase;
+
+@Override
+public void setConf(Configuration conf) {
+  super.setConf(conf);
+  String val = conf.get(FAILURE_PHASE_KEY);
+  if (val != null) {
+failurePhase = FailurePhase.valueOf(val);
+  } else {
+Assert.fail("Failure phase is not set");
+  }
+}
+
+
+/**
+ * This is the exact copy of parent's run() with injections
+ * of different types of failures
+ */
+@Override
+public void run(String[] backupIds) throws IOException {
+  String bulkOutputConfKey;
+
+  // TODO : run player on remote cluster
+  player = new MapReduceHFileSplitterJob();
+  bulkOutputConfKey = MapReduceHFileSplitterJob.BULK_OUTPUT_CONF_KEY;
+  // Player reads all files in arbitrary directory structure and creates
+  // a Map task for each file
+  String bids = StringUtils.join(backupIds, ",");
+
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Merge backup images " + bids);
+  }
+
+  List> processedTableList = new 
ArrayList>();
+  boolean finishedTables = false;
+  Connection conn = ConnectionFactory.createConnection(getConf());
+  BackupSystemTable table = new BackupSystemTable(conn);
+  FileSystem fs = FileSystem.get(getConf());
+
+  try {
+
+// Start backup exclusive operation
+table.startBackupExclusiveOperation();
+// Start merge operation
+table.startMergeOperation(backupIds);
+
+// Select most recent backup id
+String mergedBackupId = findMostRecentBackupId(backupIds);
+
+TableName[] tableNames = 

[17/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
--
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
new file mode 100644
index 000..1c43e88
--- /dev/null
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
@@ -0,0 +1,146 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+import java.util.HashMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupManifest;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+/**
+ * View to an on-disk Backup Image FileSytem Provides the set of methods 
necessary to interact with
+ * the on-disk Backup Image data.
+ */
+@InterfaceAudience.Private
+public class HBackupFileSystem {
+  public static final Log LOG = LogFactory.getLog(HBackupFileSystem.class);
+
+  /**
+   * This is utility class.
+   */
+  private HBackupFileSystem() {
+  }
+
+  /**
+   * Given the backup root dir, backup id and the table name, return the 
backup image location,
+   * which is also where the backup manifest file is. return value look like:
+   * 
"hdfs://backup.hbase.org:9000/user/biadmin/backup/backup_1396650096738/default/t1_dn/",
 where
+   * "hdfs://backup.hbase.org:9000/user/biadmin/backup" is a backup root 
directory
+   * @param backupRootDir backup root directory
+   * @param backupId backup id
+   * @param tableName table name
+   * @return backupPath String for the particular table
+   */
+  public static String
+  getTableBackupDir(String backupRootDir, String backupId, TableName 
tableName) {
+return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
++ tableName.getNamespaceAsString() + Path.SEPARATOR + 
tableName.getQualifierAsString()
++ Path.SEPARATOR;
+  }
+
+  public static String getTableBackupDataDir(String backupRootDir, String 
backupId,
+  TableName tableName) {
+return getTableBackupDir(backupRootDir, backupId, tableName) + 
Path.SEPARATOR + "data";
+  }
+
+  public static Path getBackupPath(String backupRootDir, String backupId) {
+return new Path(backupRootDir + Path.SEPARATOR + backupId);
+  }
+
+  /**
+   * Given the backup root dir, backup id and the table name, return the 
backup image location,
+   * which is also where the backup manifest file is. return value look like:
+   * 
"hdfs://backup.hbase.org:9000/user/biadmin/backup/backup_1396650096738/default/t1_dn/",
 where
+   * "hdfs://backup.hbase.org:9000/user/biadmin/backup" is a backup root 
directory
+   * @param backupRootPath backup root path
+   * @param tableName table name
+   * @param backupId backup Id
+   * @return backupPath for the particular table
+   */
+  public static Path getTableBackupPath(TableName tableName, Path 
backupRootPath, String backupId) {
+return new Path(getTableBackupDir(backupRootPath.toString(), backupId, 
tableName));
+  }
+
+  /**
+   * Given the backup root dir and the backup id, return the log file location 
for an incremental
+   * backup.
+   * @param backupRootDir backup root directory
+   * @param backupId backup id
+   * @return logBackupDir: ".../user/biadmin/backup/WALs/backup_1396650096738"
+   */
+  public static String getLogBackupDir(String backupRootDir, String backupId) {
+return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
++ HConstants.HREGION_LOGDIR_NAME;
+  }
+
+  public static Path getLogBackupPath(String backupRootDir, String backupId) {
+return new Path(getLogBackupDir(backupRootDir, backupId));
+  }
+
+  // TODO we do not keep WAL files anymore
+  // Move manifest file to other place
+  private static Path 

[25/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
deleted file mode 100644
index 7e3201e..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
+++ /dev/null
@@ -1,674 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup.impl;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.TreeMap;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.BackupInfo;
-import org.apache.hadoop.hbase.backup.BackupType;
-import org.apache.hadoop.hbase.backup.HBackupFileSystem;
-import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-
-/**
- * Backup manifest contains all the meta data of a backup image. The manifest 
info will be bundled
- * as manifest file together with data. So that each backup image will contain 
all the info needed
- * for restore. BackupManifest is a storage container for BackupImage.
- * It is responsible for storing/reading backup image data and has some 
additional utility methods.
- *
- */
-@InterfaceAudience.Private
-public class BackupManifest {
-
-  private static final Log LOG = LogFactory.getLog(BackupManifest.class);
-
-  // manifest file name
-  public static final String MANIFEST_FILE_NAME = ".backup.manifest";
-
-  /**
-   * Backup image, the dependency graph is made up by series of backup images 
BackupImage contains
-   * all the relevant information to restore the backup and is used during 
restore operation
-   */
-
-  public static class BackupImage implements Comparable {
-
-static class Builder {
-  BackupImage image;
-
-  Builder() {
-image = new BackupImage();
-  }
-
-  Builder withBackupId(String backupId) {
-image.setBackupId(backupId);
-return this;
-  }
-
-  Builder withType(BackupType type) {
-image.setType(type);
-return this;
-  }
-
-  Builder withRootDir(String rootDir) {
-image.setRootDir(rootDir);
-return this;
-  }
-
-  Builder withTableList(List tableList) {
-image.setTableList(tableList);
-return this;
-  }
-
-  Builder withStartTime(long startTime) {
-image.setStartTs(startTime);
-return this;
-  }
-
-  Builder withCompleteTime(long completeTime) {
-image.setCompleteTs(completeTime);
-return this;
-  }
-
-  BackupImage build() {
-return image;
-  }
-
-}
-
-private String backupId;
-private BackupType type;
-private String rootDir;
-private List tableList;
-private long startTs;
-private long completeTs;
-private ArrayList ancestors;
-private HashMap> incrTimeRanges;
-
-static Builder newBuilder() {
-  return new Builder();
-}
-
-public BackupImage() {
-  super();
-}
-
-private BackupImage(String backupId, BackupType type, String rootDir,
-List tableList, long startTs, long completeTs) {
-  this.backupId = backupId;
-  this.type = type;
-  

[04/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java
deleted file mode 100644
index 49e8c75..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java
+++ /dev/null
@@ -1,181 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.backup.mapreduce;
-
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValue.Type;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.RegionLocator;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.mapreduce.HFileInputFormat;
-import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2;
-import org.apache.hadoop.hbase.mapreduce.KeyValueSortReducer;
-import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.io.NullWritable;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-
-/**
- * A tool to split HFiles into new region boundaries as a MapReduce job. The 
tool generates HFiles
- * for later bulk importing.
- */
-@InterfaceAudience.Private
-public class MapReduceHFileSplitterJob extends Configured implements Tool {
-  private static final Log LOG = 
LogFactory.getLog(MapReduceHFileSplitterJob.class);
-  final static String NAME = "HFileSplitterJob";
-  public final static String BULK_OUTPUT_CONF_KEY = "hfile.bulk.output";
-  public final static String TABLES_KEY = "hfile.input.tables";
-  public final static String TABLE_MAP_KEY = "hfile.input.tablesmap";
-  private final static String JOB_NAME_CONF_KEY = "mapreduce.job.name";
-
-  public MapReduceHFileSplitterJob() {
-  }
-
-  protected MapReduceHFileSplitterJob(final Configuration c) {
-super(c);
-  }
-
-  /**
-   * A mapper that just writes out cells. This one can be used together with
-   * {@link KeyValueSortReducer}
-   */
-  static class HFileCellMapper extends
-  Mapper {
-
-@Override
-public void map(NullWritable key, KeyValue value, Context context) throws 
IOException,
-InterruptedException {
-  // Convert value to KeyValue if subclass
-  if (!value.getClass().equals(KeyValue.class)) {
-value =
-new KeyValue(value.getRowArray(), value.getRowOffset(), 
value.getRowLength(),
-value.getFamilyArray(), value.getFamilyOffset(), 
value.getFamilyLength(),
-value.getQualifierArray(), value.getQualifierOffset(), 
value.getQualifierLength(),
-value.getTimestamp(), Type.codeToType(value.getTypeByte()), 
value.getValueArray(),
-value.getValueOffset(), value.getValueLength());
-  }
-  context.write(new ImmutableBytesWritable(CellUtil.cloneRow(value)), 
value);
-}
-
-@Override
-public void setup(Context context) throws IOException {
-  // do nothing
-}
-  }
-
-  /**
-   * Sets up the actual job.
-   * @param args The command line 

[11/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
--
diff --git 
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
new file mode 100644
index 000..0944ea2
--- /dev/null
+++ 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
@@ -0,0 +1,158 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+
+/**
+ * Create multiple backups for two tables: table1, table2 then perform 1 delete
+ */
+@Category(LargeTests.class)
+public class TestBackupMultipleDeletes extends TestBackupBase {
+  private static final Log LOG = 
LogFactory.getLog(TestBackupMultipleDeletes.class);
+
+  @Test
+  public void testBackupMultipleDeletes() throws Exception {
+// #1 - create full backup for all tables
+LOG.info("create full backup image for all tables");
+List tables = Lists.newArrayList(table1, table2);
+HBaseAdmin admin = null;
+Connection conn = ConnectionFactory.createConnection(conf1);
+admin = (HBaseAdmin) conn.getAdmin();
+BackupAdmin client = new BackupAdminImpl(conn);
+BackupRequest request = createBackupRequest(BackupType.FULL, tables, 
BACKUP_ROOT_DIR);
+String backupIdFull = client.backupTables(request);
+assertTrue(checkSucceeded(backupIdFull));
+// #2 - insert some data to table table1
+HTable t1 = (HTable) conn.getTable(table1);
+Put p1;
+for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
+  p1 = new Put(Bytes.toBytes("row-t1" + i));
+  p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+  t1.put(p1);
+}
+Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH * 2);
+t1.close();
+// #3 - incremental backup for table1
+tables = Lists.newArrayList(table1);
+request = createBackupRequest(BackupType.INCREMENTAL, tables, 
BACKUP_ROOT_DIR);
+String backupIdInc1 = client.backupTables(request);
+assertTrue(checkSucceeded(backupIdInc1));
+// #4 - insert some data to table table2
+HTable t2 = (HTable) conn.getTable(table2);
+Put p2 = null;
+for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
+  p2 = new Put(Bytes.toBytes("row-t2" + i));
+  p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+  t2.put(p2);
+}
+// #5 - incremental backup for table1, table2
+tables = Lists.newArrayList(table1, table2);
+request = createBackupRequest(BackupType.INCREMENTAL, tables, 
BACKUP_ROOT_DIR);
+String backupIdInc2 = client.backupTables(request);
+assertTrue(checkSucceeded(backupIdInc2));
+// #6 - insert some data to table table1
+t1 = (HTable) conn.getTable(table1);
+for (int i = NB_ROWS_IN_BATCH; i < 2 * NB_ROWS_IN_BATCH; i++) {
+  p1 = new Put(Bytes.toBytes("row-t1" + i));
+  p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+  t1.put(p1);
+}
+// #7 - incremental backup for table1
+tables = Lists.newArrayList(table1);
+request = createBackupRequest(BackupType.INCREMENTAL, tables, 
BACKUP_ROOT_DIR);
+String backupIdInc3 = 

[14/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
--
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
new file mode 100644
index 000..6330899
--- /dev/null
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
@@ -0,0 +1,387 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable.WALItem;
+import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
+import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
+
+/**
+ * After a full backup was created, the incremental backup will only store the 
changes made after
+ * the last full or incremental backup. Creating the backup copies the 
logfiles in .logs and
+ * .oldlogs since the last backup timestamp.
+ */
+@InterfaceAudience.Private
+public class IncrementalBackupManager extends BackupManager {
+  public static final Log LOG = 
LogFactory.getLog(IncrementalBackupManager.class);
+
+  public IncrementalBackupManager(Connection conn, Configuration conf) throws 
IOException {
+super(conn, conf);
+  }
+
+  /**
+   * Obtain the list of logs that need to be copied out for this incremental 
backup. The list is set
+   * in BackupInfo.
+   * @return The new HashMap of RS log time stamps after the log roll for this 
incremental backup.
+   * @throws IOException exception
+   */
+  public HashMap getIncrBackupLogFileMap()
+  throws IOException {
+List logList;
+HashMap newTimestamps;
+HashMap previousTimestampMins;
+
+String savedStartCode = readBackupStartCode();
+
+// key: tableName
+// value: 
+HashMap> previousTimestampMap = 
readLogTimestampMap();
+
+previousTimestampMins = 
BackupUtils.getRSLogTimestampMins(previousTimestampMap);
+
+if (LOG.isDebugEnabled()) {
+  LOG.debug("StartCode " + savedStartCode + "for backupID " + 
backupInfo.getBackupId());
+}
+// get all new log files from .logs and .oldlogs after last TS and before 
new timestamp
+if (savedStartCode == null || previousTimestampMins == null
+|| previousTimestampMins.isEmpty()) {
+  throw new IOException(
+  "Cannot read any previous back up timestamps from backup system 
table. "
+  + "In order to create an incremental backup, at least one full 
backup is needed.");
+}
+
+LOG.info("Execute roll log procedure for incremental backup ...");
+HashMap props = new HashMap();
+props.put("backupRoot", backupInfo.getBackupRootDir());
+
+try (Admin admin = conn.getAdmin();) {
+
+  
admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE,
+LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props);
+
+}
+newTimestamps = readRegionServerLastLogRollResult();
+
+logList = getLogFilesForNewBackup(previousTimestampMins, newTimestamps, 
conf, savedStartCode);
+

[36/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

Signed-off-by: Josh Elser 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/37c65946
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/37c65946
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/37c65946

Branch: refs/heads/branch-2
Commit: 37c65946274c6acdb8650fc91f889b61f95b3a64
Parents: af8e6ae
Author: Vladimir Rodionov 
Authored: Tue Aug 22 17:14:48 2017 -0700
Committer: Josh Elser 
Committed: Wed Aug 23 12:46:04 2017 -0400

--
 hbase-assembly/pom.xml  |6 +-
 .../src/main/assembly/hadoop-two-compat.xml |1 +
 hbase-assembly/src/main/assembly/src.xml|1 +
 hbase-backup/.DS_Store  |  Bin 0 -> 6148 bytes
 hbase-backup/pom.xml|  265 +++
 .../apache/hadoop/hbase/backup/BackupAdmin.java |  136 ++
 .../hbase/backup/BackupClientFactory.java   |   53 +
 .../hadoop/hbase/backup/BackupCopyJob.java  |   55 +
 .../hadoop/hbase/backup/BackupDriver.java   |  210 ++
 .../hadoop/hbase/backup/BackupHFileCleaner.java |  180 ++
 .../apache/hadoop/hbase/backup/BackupInfo.java  |  550 +
 .../hadoop/hbase/backup/BackupMergeJob.java |   40 +
 .../hadoop/hbase/backup/BackupObserver.java |  102 +
 .../hadoop/hbase/backup/BackupRequest.java  |  139 ++
 .../hbase/backup/BackupRestoreConstants.java|  123 ++
 .../hbase/backup/BackupRestoreFactory.java  |   82 +
 .../hadoop/hbase/backup/BackupTableInfo.java|   82 +
 .../hadoop/hbase/backup/HBackupFileSystem.java  |  146 ++
 .../apache/hadoop/hbase/backup/LogUtils.java|   50 +
 .../hadoop/hbase/backup/RestoreDriver.java  |  265 +++
 .../apache/hadoop/hbase/backup/RestoreJob.java  |   46 +
 .../hadoop/hbase/backup/RestoreRequest.java |  135 ++
 .../hbase/backup/impl/BackupAdminImpl.java  |  743 +++
 .../hbase/backup/impl/BackupCommands.java   | 1022 +
 .../hbase/backup/impl/BackupException.java  |   84 +
 .../hadoop/hbase/backup/impl/BackupManager.java |  502 +
 .../hbase/backup/impl/BackupManifest.java   |  674 ++
 .../hbase/backup/impl/BackupSystemTable.java| 2051 ++
 .../backup/impl/FullTableBackupClient.java  |  224 ++
 .../backup/impl/IncrementalBackupManager.java   |  387 
 .../impl/IncrementalTableBackupClient.java  |  377 
 .../hbase/backup/impl/RestoreTablesClient.java  |  278 +++
 .../hbase/backup/impl/TableBackupClient.java|  436 
 .../mapreduce/MapReduceBackupCopyJob.java   |  344 +++
 .../mapreduce/MapReduceBackupMergeJob.java  |  321 +++
 .../mapreduce/MapReduceHFileSplitterJob.java|  181 ++
 .../backup/mapreduce/MapReduceRestoreJob.java   |  136 ++
 .../hbase/backup/master/BackupLogCleaner.java   |  142 ++
 .../master/LogRollMasterProcedureManager.java   |  155 ++
 .../regionserver/LogRollBackupSubprocedure.java |  168 ++
 .../LogRollBackupSubprocedurePool.java  |  139 ++
 .../LogRollRegionServerProcedureManager.java|  185 ++
 .../hadoop/hbase/backup/util/BackupSet.java |   58 +
 .../hadoop/hbase/backup/util/BackupUtils.java   |  747 +++
 .../hadoop/hbase/backup/util/RestoreTool.java   |  516 +
 .../hadoop/hbase/backup/TestBackupBase.java |  503 +
 .../hbase/backup/TestBackupBoundaryTests.java   |   97 +
 .../hbase/backup/TestBackupCommandLineTool.java |  431 
 .../hadoop/hbase/backup/TestBackupDelete.java   |  102 +
 .../hbase/backup/TestBackupDeleteRestore.java   |   70 +
 .../backup/TestBackupDeleteWithFailures.java|  194 ++
 .../hadoop/hbase/backup/TestBackupDescribe.java |  110 +
 .../hbase/backup/TestBackupHFileCleaner.java|  141 ++
 .../hbase/backup/TestBackupMultipleDeletes.java |  158 ++
 .../hadoop/hbase/backup/TestBackupRepair.java   |   91 +
 .../hbase/backup/TestBackupShowHistory.java |  148 ++
 .../hbase/backup/TestBackupStatusProgress.java  |   96 +
 .../hbase/backup/TestBackupSystemTable.java |  511 +
 .../hadoop/hbase/backup/TestFullBackup.java |   59 +
 .../hadoop/hbase/backup/TestFullBackupSet.java  |  103 +
 .../backup/TestFullBackupSetRestoreSet.java |  128 ++
 .../backup/TestFullBackupWithFailures.java  |   79 +
 .../hadoop/hbase/backup/TestFullRestore.java|  345 +++
 .../hbase/backup/TestIncrementalBackup.java |  209 ++
 .../TestIncrementalBackupDeleteTable.java   |  129 ++
 .../TestIncrementalBackupMergeWithFailures.java |  336 +++
 .../TestIncrementalBackupWithBulkLoad.java  |  145 ++
 .../TestIncrementalBackupWithFailures.java  |  161 ++
 .../hadoop/hbase/backup/TestRemoteBackup.java   |  135 ++
 .../hadoop/hbase/backup/TestRemoteRestore.java  |   59 +
 .../backup/TestRepairAfterFailedDelete.java |   93 +
 

[31/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java
--
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java
new file mode 100644
index 000..49e8c75
--- /dev/null
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceHFileSplitterJob.java
@@ -0,0 +1,181 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup.mapreduce;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValue.Type;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.mapreduce.HFileInputFormat;
+import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2;
+import org.apache.hadoop.hbase.mapreduce.KeyValueSortReducer;
+import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+
+/**
+ * A tool to split HFiles into new region boundaries as a MapReduce job. The 
tool generates HFiles
+ * for later bulk importing.
+ */
+@InterfaceAudience.Private
+public class MapReduceHFileSplitterJob extends Configured implements Tool {
+  private static final Log LOG = 
LogFactory.getLog(MapReduceHFileSplitterJob.class);
+  final static String NAME = "HFileSplitterJob";
+  public final static String BULK_OUTPUT_CONF_KEY = "hfile.bulk.output";
+  public final static String TABLES_KEY = "hfile.input.tables";
+  public final static String TABLE_MAP_KEY = "hfile.input.tablesmap";
+  private final static String JOB_NAME_CONF_KEY = "mapreduce.job.name";
+
+  public MapReduceHFileSplitterJob() {
+  }
+
+  protected MapReduceHFileSplitterJob(final Configuration c) {
+super(c);
+  }
+
+  /**
+   * A mapper that just writes out cells. This one can be used together with
+   * {@link KeyValueSortReducer}
+   */
+  static class HFileCellMapper extends
+  Mapper {
+
+@Override
+public void map(NullWritable key, KeyValue value, Context context) throws 
IOException,
+InterruptedException {
+  // Convert value to KeyValue if subclass
+  if (!value.getClass().equals(KeyValue.class)) {
+value =
+new KeyValue(value.getRowArray(), value.getRowOffset(), 
value.getRowLength(),
+value.getFamilyArray(), value.getFamilyOffset(), 
value.getFamilyLength(),
+value.getQualifierArray(), value.getQualifierOffset(), 
value.getQualifierLength(),
+value.getTimestamp(), Type.codeToType(value.getTypeByte()), 
value.getValueArray(),
+value.getValueOffset(), value.getValueLength());
+  }
+  context.write(new ImmutableBytesWritable(CellUtil.cloneRow(value)), 
value);
+}
+
+@Override
+public void setup(Context context) throws IOException {
+  // do nothing
+}
+  }
+
+  /**
+   * Sets up the actual job.
+   * @param args The command line 

[32/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
--
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
new file mode 100644
index 000..6330899
--- /dev/null
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
@@ -0,0 +1,387 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable.WALItem;
+import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
+import org.apache.hadoop.hbase.backup.util.BackupUtils;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
+
+/**
+ * After a full backup was created, the incremental backup will only store the 
changes made after
+ * the last full or incremental backup. Creating the backup copies the 
logfiles in .logs and
+ * .oldlogs since the last backup timestamp.
+ */
+@InterfaceAudience.Private
+public class IncrementalBackupManager extends BackupManager {
+  public static final Log LOG = 
LogFactory.getLog(IncrementalBackupManager.class);
+
+  public IncrementalBackupManager(Connection conn, Configuration conf) throws 
IOException {
+super(conn, conf);
+  }
+
+  /**
+   * Obtain the list of logs that need to be copied out for this incremental 
backup. The list is set
+   * in BackupInfo.
+   * @return The new HashMap of RS log time stamps after the log roll for this 
incremental backup.
+   * @throws IOException exception
+   */
+  public HashMap getIncrBackupLogFileMap()
+  throws IOException {
+List logList;
+HashMap newTimestamps;
+HashMap previousTimestampMins;
+
+String savedStartCode = readBackupStartCode();
+
+// key: tableName
+// value: 
+HashMap> previousTimestampMap = 
readLogTimestampMap();
+
+previousTimestampMins = 
BackupUtils.getRSLogTimestampMins(previousTimestampMap);
+
+if (LOG.isDebugEnabled()) {
+  LOG.debug("StartCode " + savedStartCode + "for backupID " + 
backupInfo.getBackupId());
+}
+// get all new log files from .logs and .oldlogs after last TS and before 
new timestamp
+if (savedStartCode == null || previousTimestampMins == null
+|| previousTimestampMins.isEmpty()) {
+  throw new IOException(
+  "Cannot read any previous back up timestamps from backup system 
table. "
+  + "In order to create an incremental backup, at least one full 
backup is needed.");
+}
+
+LOG.info("Execute roll log procedure for incremental backup ...");
+HashMap props = new HashMap();
+props.put("backupRoot", backupInfo.getBackupRootDir());
+
+try (Admin admin = conn.getAdmin();) {
+
+  
admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE,
+LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props);
+
+}
+newTimestamps = readRegionServerLastLogRollResult();
+
+logList = getLogFilesForNewBackup(previousTimestampMins, newTimestamps, 
conf, savedStartCode);
+

[12/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
--
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
new file mode 100644
index 000..0cfe099
--- /dev/null
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
@@ -0,0 +1,516 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.util;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupRestoreFactory;
+import org.apache.hadoop.hbase.backup.HBackupFileSystem;
+import org.apache.hadoop.hbase.backup.RestoreJob;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.io.HFileLink;
+import org.apache.hadoop.hbase.io.hfile.HFile;
+import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
+
+/**
+ * A collection for methods used by multiple classes to restore HBase tables.
+ */
+@InterfaceAudience.Private
+public class RestoreTool {
+
+  public static final Log LOG = LogFactory.getLog(BackupUtils.class);
+  private final static long TABLE_AVAILABILITY_WAIT_TIME = 18;
+
+  private final String[] ignoreDirs = { HConstants.RECOVERED_EDITS_DIR };
+  protected Configuration conf = null;
+  protected Path backupRootPath;
+  protected String backupId;
+  protected FileSystem fs;
+
+  // store table name and snapshot dir mapping
+  private final HashMap snapshotMap = new HashMap<>();
+
+  public RestoreTool(Configuration conf, final Path backupRootPath, final 
String backupId)
+  throws IOException {
+this.conf = conf;
+this.backupRootPath = backupRootPath;
+this.backupId = backupId;
+this.fs = backupRootPath.getFileSystem(conf);
+  }
+
+  /**
+   * return value represent path for:
+   * 
".../user/biadmin/backup1/default/t1_dn/backup_1396650096738/archive/data/default/t1_dn"
+   * @param tableName table name
+   * @return path to table archive
+   * @throws IOException exception
+   */
+  Path getTableArchivePath(TableName tableName) throws IOException {
+
+Path baseDir =
+new Path(HBackupFileSystem.getTableBackupPath(tableName, 
backupRootPath, backupId),
+HConstants.HFILE_ARCHIVE_DIRECTORY);
+Path dataDir = new Path(baseDir, HConstants.BASE_NAMESPACE_DIR);
+Path archivePath = new Path(dataDir, tableName.getNamespaceAsString());
+Path tableArchivePath = new Path(archivePath, 
tableName.getQualifierAsString());
+if (!fs.exists(tableArchivePath) || 
!fs.getFileStatus(tableArchivePath).isDirectory()) {
+  LOG.debug("Folder tableArchivePath: " + tableArchivePath.toString() + " 
does not exists");
+  tableArchivePath = null; // empty table has no archive
+}
+return tableArchivePath;
+  }
+
+  /**
+   * Gets region list
+   * 

[29/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
--
diff --git 
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
new file mode 100644
index 000..0944ea2
--- /dev/null
+++ 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
@@ -0,0 +1,158 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+
+/**
+ * Create multiple backups for two tables: table1, table2 then perform 1 delete
+ */
+@Category(LargeTests.class)
+public class TestBackupMultipleDeletes extends TestBackupBase {
+  private static final Log LOG = 
LogFactory.getLog(TestBackupMultipleDeletes.class);
+
+  @Test
+  public void testBackupMultipleDeletes() throws Exception {
+// #1 - create full backup for all tables
+LOG.info("create full backup image for all tables");
+List tables = Lists.newArrayList(table1, table2);
+HBaseAdmin admin = null;
+Connection conn = ConnectionFactory.createConnection(conf1);
+admin = (HBaseAdmin) conn.getAdmin();
+BackupAdmin client = new BackupAdminImpl(conn);
+BackupRequest request = createBackupRequest(BackupType.FULL, tables, 
BACKUP_ROOT_DIR);
+String backupIdFull = client.backupTables(request);
+assertTrue(checkSucceeded(backupIdFull));
+// #2 - insert some data to table table1
+HTable t1 = (HTable) conn.getTable(table1);
+Put p1;
+for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
+  p1 = new Put(Bytes.toBytes("row-t1" + i));
+  p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+  t1.put(p1);
+}
+Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH * 2);
+t1.close();
+// #3 - incremental backup for table1
+tables = Lists.newArrayList(table1);
+request = createBackupRequest(BackupType.INCREMENTAL, tables, 
BACKUP_ROOT_DIR);
+String backupIdInc1 = client.backupTables(request);
+assertTrue(checkSucceeded(backupIdInc1));
+// #4 - insert some data to table table2
+HTable t2 = (HTable) conn.getTable(table2);
+Put p2 = null;
+for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
+  p2 = new Put(Bytes.toBytes("row-t2" + i));
+  p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+  t2.put(p2);
+}
+// #5 - incremental backup for table1, table2
+tables = Lists.newArrayList(table1, table2);
+request = createBackupRequest(BackupType.INCREMENTAL, tables, 
BACKUP_ROOT_DIR);
+String backupIdInc2 = client.backupTables(request);
+assertTrue(checkSucceeded(backupIdInc2));
+// #6 - insert some data to table table1
+t1 = (HTable) conn.getTable(table1);
+for (int i = NB_ROWS_IN_BATCH; i < 2 * NB_ROWS_IN_BATCH; i++) {
+  p1 = new Put(Bytes.toBytes("row-t1" + i));
+  p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+  t1.put(p1);
+}
+// #7 - incremental backup for table1
+tables = Lists.newArrayList(table1);
+request = createBackupRequest(BackupType.INCREMENTAL, tables, 
BACKUP_ROOT_DIR);
+String backupIdInc3 = 

[05/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
deleted file mode 100644
index 6330899..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
+++ /dev/null
@@ -1,387 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup.impl;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.impl.BackupSystemTable.WALItem;
-import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
-import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-
-/**
- * After a full backup was created, the incremental backup will only store the 
changes made after
- * the last full or incremental backup. Creating the backup copies the 
logfiles in .logs and
- * .oldlogs since the last backup timestamp.
- */
-@InterfaceAudience.Private
-public class IncrementalBackupManager extends BackupManager {
-  public static final Log LOG = 
LogFactory.getLog(IncrementalBackupManager.class);
-
-  public IncrementalBackupManager(Connection conn, Configuration conf) throws 
IOException {
-super(conn, conf);
-  }
-
-  /**
-   * Obtain the list of logs that need to be copied out for this incremental 
backup. The list is set
-   * in BackupInfo.
-   * @return The new HashMap of RS log time stamps after the log roll for this 
incremental backup.
-   * @throws IOException exception
-   */
-  public HashMap getIncrBackupLogFileMap()
-  throws IOException {
-List logList;
-HashMap newTimestamps;
-HashMap previousTimestampMins;
-
-String savedStartCode = readBackupStartCode();
-
-// key: tableName
-// value: 
-HashMap> previousTimestampMap = 
readLogTimestampMap();
-
-previousTimestampMins = 
BackupUtils.getRSLogTimestampMins(previousTimestampMap);
-
-if (LOG.isDebugEnabled()) {
-  LOG.debug("StartCode " + savedStartCode + "for backupID " + 
backupInfo.getBackupId());
-}
-// get all new log files from .logs and .oldlogs after last TS and before 
new timestamp
-if (savedStartCode == null || previousTimestampMins == null
-|| previousTimestampMins.isEmpty()) {
-  throw new IOException(
-  "Cannot read any previous back up timestamps from backup system 
table. "
-  + "In order to create an incremental backup, at least one full 
backup is needed.");
-}
-
-LOG.info("Execute roll log procedure for incremental backup ...");
-HashMap props = new HashMap();
-props.put("backupRoot", backupInfo.getBackupRootDir());
-
-try (Admin admin = conn.getAdmin();) {
-
-  
admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE,
-LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props);
-
-}
-newTimestamps = readRegionServerLastLogRollResult();
-
-logList = getLogFilesForNewBackup(previousTimestampMins, newTimestamps, 
conf, savedStartCode);
-  

[02/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
deleted file mode 100644
index 0944ea2..000
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java
+++ /dev/null
@@ -1,158 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-
-/**
- * Create multiple backups for two tables: table1, table2 then perform 1 delete
- */
-@Category(LargeTests.class)
-public class TestBackupMultipleDeletes extends TestBackupBase {
-  private static final Log LOG = 
LogFactory.getLog(TestBackupMultipleDeletes.class);
-
-  @Test
-  public void testBackupMultipleDeletes() throws Exception {
-// #1 - create full backup for all tables
-LOG.info("create full backup image for all tables");
-List tables = Lists.newArrayList(table1, table2);
-HBaseAdmin admin = null;
-Connection conn = ConnectionFactory.createConnection(conf1);
-admin = (HBaseAdmin) conn.getAdmin();
-BackupAdmin client = new BackupAdminImpl(conn);
-BackupRequest request = createBackupRequest(BackupType.FULL, tables, 
BACKUP_ROOT_DIR);
-String backupIdFull = client.backupTables(request);
-assertTrue(checkSucceeded(backupIdFull));
-// #2 - insert some data to table table1
-HTable t1 = (HTable) conn.getTable(table1);
-Put p1;
-for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
-  p1 = new Put(Bytes.toBytes("row-t1" + i));
-  p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
-  t1.put(p1);
-}
-Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH * 2);
-t1.close();
-// #3 - incremental backup for table1
-tables = Lists.newArrayList(table1);
-request = createBackupRequest(BackupType.INCREMENTAL, tables, 
BACKUP_ROOT_DIR);
-String backupIdInc1 = client.backupTables(request);
-assertTrue(checkSucceeded(backupIdInc1));
-// #4 - insert some data to table table2
-HTable t2 = (HTable) conn.getTable(table2);
-Put p2 = null;
-for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
-  p2 = new Put(Bytes.toBytes("row-t2" + i));
-  p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
-  t2.put(p2);
-}
-// #5 - incremental backup for table1, table2
-tables = Lists.newArrayList(table1, table2);
-request = createBackupRequest(BackupType.INCREMENTAL, tables, 
BACKUP_ROOT_DIR);
-String backupIdInc2 = client.backupTables(request);
-assertTrue(checkSucceeded(backupIdInc2));
-// #6 - insert some data to table table1
-t1 = (HTable) conn.getTable(table1);
-for (int i = NB_ROWS_IN_BATCH; i < 2 * NB_ROWS_IN_BATCH; i++) {
-  p1 = new Put(Bytes.toBytes("row-t1" + i));
-  p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
-  t1.put(p1);
-}
-// #7 - incremental backup for table1
-tables = Lists.newArrayList(table1);
-request = createBackupRequest(BackupType.INCREMENTAL, tables, 
BACKUP_ROOT_DIR);
-String 

[08/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
deleted file mode 100644
index 99fb06c..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
+++ /dev/null
@@ -1,743 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.backup.impl;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.BackupAdmin;
-import org.apache.hadoop.hbase.backup.BackupClientFactory;
-import org.apache.hadoop.hbase.backup.BackupInfo;
-import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
-import org.apache.hadoop.hbase.backup.BackupMergeJob;
-import org.apache.hadoop.hbase.backup.BackupRequest;
-import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
-import org.apache.hadoop.hbase.backup.BackupRestoreFactory;
-import org.apache.hadoop.hbase.backup.BackupType;
-import org.apache.hadoop.hbase.backup.HBackupFileSystem;
-import org.apache.hadoop.hbase.backup.RestoreRequest;
-import org.apache.hadoop.hbase.backup.util.BackupSet;
-import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-
-@InterfaceAudience.Private
-public class BackupAdminImpl implements BackupAdmin {
-  public final static String CHECK_OK = "Checking backup images: OK";
-  public final static String CHECK_FAILED =
-  "Checking backup images: Failed. Some dependencies are missing for 
restore";
-  private static final Log LOG = LogFactory.getLog(BackupAdminImpl.class);
-
-  private final Connection conn;
-
-  public BackupAdminImpl(Connection conn) {
-this.conn = conn;
-  }
-
-  @Override
-  public void close() throws IOException {
-  }
-
-  @Override
-  public BackupInfo getBackupInfo(String backupId) throws IOException {
-BackupInfo backupInfo = null;
-try (final BackupSystemTable table = new BackupSystemTable(conn)) {
-  if (backupId == null) {
-ArrayList recentSessions = 
table.getBackupInfos(BackupState.RUNNING);
-if (recentSessions.isEmpty()) {
-  LOG.warn("No ongoing sessions found.");
-  return null;
-}
-// else show status for ongoing session
-// must be one maximum
-return recentSessions.get(0);
-  } else {
-backupInfo = table.readBackupInfo(backupId);
-return backupInfo;
-  }
-}
-  }
-
-  @Override
-  public int deleteBackups(String[] backupIds) throws IOException {
-
-int totalDeleted = 0;
-Map allTablesMap = new HashMap();
-
-boolean deleteSessionStarted = false;
-boolean snapshotDone = false;
-try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) {
-
-  // Step 1: Make sure there is no active session
-  // is running by using startBackupSession API
-  // If there is an active session in progress, exception will be thrown
-  try {
-sysTable.startBackupExclusiveOperation();
-deleteSessionStarted = true;
-  } catch (IOException e) {
-LOG.warn("You can not run delete command while active backup session 
is in progress. \n"
-+ "If there is no active 

[30/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
--
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
new file mode 100644
index 000..0cfe099
--- /dev/null
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
@@ -0,0 +1,516 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.util;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupRestoreFactory;
+import org.apache.hadoop.hbase.backup.HBackupFileSystem;
+import org.apache.hadoop.hbase.backup.RestoreJob;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.io.HFileLink;
+import org.apache.hadoop.hbase.io.hfile.HFile;
+import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
+
+/**
+ * A collection for methods used by multiple classes to restore HBase tables.
+ */
+@InterfaceAudience.Private
+public class RestoreTool {
+
+  public static final Log LOG = LogFactory.getLog(BackupUtils.class);
+  private final static long TABLE_AVAILABILITY_WAIT_TIME = 18;
+
+  private final String[] ignoreDirs = { HConstants.RECOVERED_EDITS_DIR };
+  protected Configuration conf = null;
+  protected Path backupRootPath;
+  protected String backupId;
+  protected FileSystem fs;
+
+  // store table name and snapshot dir mapping
+  private final HashMap snapshotMap = new HashMap<>();
+
+  public RestoreTool(Configuration conf, final Path backupRootPath, final 
String backupId)
+  throws IOException {
+this.conf = conf;
+this.backupRootPath = backupRootPath;
+this.backupId = backupId;
+this.fs = backupRootPath.getFileSystem(conf);
+  }
+
+  /**
+   * return value represent path for:
+   * 
".../user/biadmin/backup1/default/t1_dn/backup_1396650096738/archive/data/default/t1_dn"
+   * @param tableName table name
+   * @return path to table archive
+   * @throws IOException exception
+   */
+  Path getTableArchivePath(TableName tableName) throws IOException {
+
+Path baseDir =
+new Path(HBackupFileSystem.getTableBackupPath(tableName, 
backupRootPath, backupId),
+HConstants.HFILE_ARCHIVE_DIRECTORY);
+Path dataDir = new Path(baseDir, HConstants.BASE_NAMESPACE_DIR);
+Path archivePath = new Path(dataDir, tableName.getNamespaceAsString());
+Path tableArchivePath = new Path(archivePath, 
tableName.getQualifierAsString());
+if (!fs.exists(tableArchivePath) || 
!fs.getFileStatus(tableArchivePath).isDirectory()) {
+  LOG.debug("Folder tableArchivePath: " + tableArchivePath.toString() + " 
does not exists");
+  tableArchivePath = null; // empty table has no archive
+}
+return tableArchivePath;
+  }
+
+  /**
+   * Gets region list
+   * 

[26/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
http://git-wip-us.apache.org/repos/asf/hbase/blob/37c65946/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
deleted file mode 100644
index 99fb06c..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
+++ /dev/null
@@ -1,743 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.backup.impl;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.BackupAdmin;
-import org.apache.hadoop.hbase.backup.BackupClientFactory;
-import org.apache.hadoop.hbase.backup.BackupInfo;
-import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
-import org.apache.hadoop.hbase.backup.BackupMergeJob;
-import org.apache.hadoop.hbase.backup.BackupRequest;
-import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
-import org.apache.hadoop.hbase.backup.BackupRestoreFactory;
-import org.apache.hadoop.hbase.backup.BackupType;
-import org.apache.hadoop.hbase.backup.HBackupFileSystem;
-import org.apache.hadoop.hbase.backup.RestoreRequest;
-import org.apache.hadoop.hbase.backup.util.BackupSet;
-import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-
-@InterfaceAudience.Private
-public class BackupAdminImpl implements BackupAdmin {
-  public final static String CHECK_OK = "Checking backup images: OK";
-  public final static String CHECK_FAILED =
-  "Checking backup images: Failed. Some dependencies are missing for 
restore";
-  private static final Log LOG = LogFactory.getLog(BackupAdminImpl.class);
-
-  private final Connection conn;
-
-  public BackupAdminImpl(Connection conn) {
-this.conn = conn;
-  }
-
-  @Override
-  public void close() throws IOException {
-  }
-
-  @Override
-  public BackupInfo getBackupInfo(String backupId) throws IOException {
-BackupInfo backupInfo = null;
-try (final BackupSystemTable table = new BackupSystemTable(conn)) {
-  if (backupId == null) {
-ArrayList recentSessions = 
table.getBackupInfos(BackupState.RUNNING);
-if (recentSessions.isEmpty()) {
-  LOG.warn("No ongoing sessions found.");
-  return null;
-}
-// else show status for ongoing session
-// must be one maximum
-return recentSessions.get(0);
-  } else {
-backupInfo = table.readBackupInfo(backupId);
-return backupInfo;
-  }
-}
-  }
-
-  @Override
-  public int deleteBackups(String[] backupIds) throws IOException {
-
-int totalDeleted = 0;
-Map allTablesMap = new HashMap();
-
-boolean deleteSessionStarted = false;
-boolean snapshotDone = false;
-try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) {
-
-  // Step 1: Make sure there is no active session
-  // is running by using startBackupSession API
-  // If there is an active session in progress, exception will be thrown
-  try {
-sysTable.startBackupExclusiveOperation();
-deleteSessionStarted = true;
-  } catch (IOException e) {
-LOG.warn("You can not run delete command while active backup session 
is in progress. \n"
-+ "If there is no active 

[01/36] hbase git commit: HBASE-17614: Move Backup/Restore into separate module (Vladimir Rodionov)

2017-08-23 Thread elserj
Repository: hbase
Updated Branches:
  refs/heads/branch-2 af8e6aeab -> 37c659462
  refs/heads/master 6c0e219dd -> 2dda37120


http://git-wip-us.apache.org/repos/asf/hbase/blob/2dda3712/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
deleted file mode 100644
index 7011ed3..000
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
+++ /dev/null
@@ -1,336 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.backup;
-
-import static org.apache.hadoop.hbase.backup.util.BackupUtils.succeeded;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl;
-import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
-import org.apache.hadoop.hbase.backup.mapreduce.MapReduceBackupMergeJob;
-import org.apache.hadoop.hbase.backup.mapreduce.MapReduceHFileSplitterJob;
-import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.util.Pair;
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import com.google.common.collect.Lists;
-
-@Category(LargeTests.class)
-public class TestIncrementalBackupMergeWithFailures extends TestBackupBase {
-  private static final Log LOG = 
LogFactory.getLog(TestIncrementalBackupMergeWithFailures.class);
-
-  static enum FailurePhase {
-PHASE1, PHASE2, PHASE3, PHASE4
-  }
-  public final static String FAILURE_PHASE_KEY = "failurePhase";
-
-  static class BackupMergeJobWithFailures extends MapReduceBackupMergeJob {
-
-FailurePhase failurePhase;
-
-@Override
-public void setConf(Configuration conf) {
-  super.setConf(conf);
-  String val = conf.get(FAILURE_PHASE_KEY);
-  if (val != null) {
-failurePhase = FailurePhase.valueOf(val);
-  } else {
-Assert.fail("Failure phase is not set");
-  }
-}
-
-
-/**
- * This is the exact copy of parent's run() with injections
- * of different types of failures
- */
-@Override
-public void run(String[] backupIds) throws IOException {
-  String bulkOutputConfKey;
-
-  // TODO : run player on remote cluster
-  player = new MapReduceHFileSplitterJob();
-  bulkOutputConfKey = MapReduceHFileSplitterJob.BULK_OUTPUT_CONF_KEY;
-  // Player reads all files in arbitrary directory structure and creates
-  // a Map task for each file
-  String bids = StringUtils.join(backupIds, ",");
-
-  if (LOG.isDebugEnabled()) {
-LOG.debug("Merge backup images " + bids);
-  }
-
-  List> processedTableList = new 
ArrayList>();
-  boolean finishedTables = false;
-  Connection conn = ConnectionFactory.createConnection(getConf());
-  BackupSystemTable table = new BackupSystemTable(conn);
-  FileSystem fs = FileSystem.get(getConf());
-
-  try {
-
-// Start backup exclusive operation
-table.startBackupExclusiveOperation();
-// Start merge operation
-table.startMergeOperation(backupIds);
-
-// Select most recent backup 

[2/2] hbase-thirdparty git commit: Up version to 1.0.1.

2017-08-23 Thread stack
Up version to 1.0.1.


Project: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/commit/68f0e0ee
Tree: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/tree/68f0e0ee
Diff: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/diff/68f0e0ee

Branch: refs/heads/master
Commit: 68f0e0ee1a300c0534dbc31520f21751388389fb
Parents: c095205
Author: Michael Stack 
Authored: Wed Aug 23 12:52:08 2017 -0700
Committer: Michael Stack 
Committed: Wed Aug 23 12:52:08 2017 -0700

--
 hbase-shaded-miscellaneous/pom.xml | 2 +-
 hbase-shaded-netty/pom.xml | 2 +-
 hbase-shaded-protobuf/pom.xml  | 2 +-
 pom.xml| 2 +-
 4 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/68f0e0ee/hbase-shaded-miscellaneous/pom.xml
--
diff --git a/hbase-shaded-miscellaneous/pom.xml 
b/hbase-shaded-miscellaneous/pom.xml
index 5650f51..564a791 100644
--- a/hbase-shaded-miscellaneous/pom.xml
+++ b/hbase-shaded-miscellaneous/pom.xml
@@ -32,7 +32,7 @@
   
 org.apache.hbase.thirdparty
 hbase-thirdparty
-1.0.0
+1.0.1
 ..
   
   hbase-shaded-miscellaneous

http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/68f0e0ee/hbase-shaded-netty/pom.xml
--
diff --git a/hbase-shaded-netty/pom.xml b/hbase-shaded-netty/pom.xml
index 7112952..b06d7dd 100644
--- a/hbase-shaded-netty/pom.xml
+++ b/hbase-shaded-netty/pom.xml
@@ -32,7 +32,7 @@
   
 org.apache.hbase.thirdparty
 hbase-thirdparty
-1.0.0
+1.0.1
 ..
   
   hbase-shaded-netty

http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/68f0e0ee/hbase-shaded-protobuf/pom.xml
--
diff --git a/hbase-shaded-protobuf/pom.xml b/hbase-shaded-protobuf/pom.xml
index 38164e1..28a7514 100644
--- a/hbase-shaded-protobuf/pom.xml
+++ b/hbase-shaded-protobuf/pom.xml
@@ -23,7 +23,7 @@
   
 org.apache.hbase.thirdparty
 hbase-thirdparty
-1.0.0
+1.0.1
 ..
   
   hbase-shaded-protobuf

http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/68f0e0ee/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 1f5f916..6818561 100644
--- a/pom.xml
+++ b/pom.xml
@@ -38,7 +38,7 @@
   
   org.apache.hbase.thirdparty
   hbase-thirdparty
-  1.0.0
+  1.0.1
   Apache HBase Third-Party Libs
   pom
   



[1/2] hbase-thirdparty git commit: Revert "Move version to 1.0.1"

2017-08-23 Thread stack
Repository: hbase-thirdparty
Updated Branches:
  refs/heads/master 0d136d075 -> 68f0e0ee1


Revert "Move version to 1.0.1"

This reverts commit 0d136d07548863c89650b8e742886d59808a8673.


Project: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/commit/c0952058
Tree: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/tree/c0952058
Diff: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/diff/c0952058

Branch: refs/heads/master
Commit: c095205830ddbe3c27537d578b4fd306105a3c30
Parents: 0d136d0
Author: Michael Stack 
Authored: Wed Aug 23 12:50:34 2017 -0700
Committer: Michael Stack 
Committed: Wed Aug 23 12:50:34 2017 -0700

--
 hbase-shaded-miscellaneous/pom.xml | 5 +
 hbase-shaded-netty/pom.xml | 5 +
 hbase-shaded-protobuf/pom.xml  | 5 +
 pom.xml| 2 +-
 4 files changed, 4 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/c0952058/hbase-shaded-miscellaneous/pom.xml
--
diff --git a/hbase-shaded-miscellaneous/pom.xml 
b/hbase-shaded-miscellaneous/pom.xml
index b616593..5650f51 100644
--- a/hbase-shaded-miscellaneous/pom.xml
+++ b/hbase-shaded-miscellaneous/pom.xml
@@ -32,7 +32,7 @@
   
 org.apache.hbase.thirdparty
 hbase-thirdparty
-1.0.1
+1.0.0
 ..
   
   hbase-shaded-miscellaneous
@@ -72,9 +72,6 @@
 
   com.google
   ${rename.offset}.com.google
-  
-
com.google.errorprone.annotations.CanIgnoreReturnValue
-  
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/c0952058/hbase-shaded-netty/pom.xml
--
diff --git a/hbase-shaded-netty/pom.xml b/hbase-shaded-netty/pom.xml
index a8b9c4a..7112952 100644
--- a/hbase-shaded-netty/pom.xml
+++ b/hbase-shaded-netty/pom.xml
@@ -32,7 +32,7 @@
   
 org.apache.hbase.thirdparty
 hbase-thirdparty
-1.0.1
+1.0.0
 ..
   
   hbase-shaded-netty
@@ -71,9 +71,6 @@
 
   io.netty
   ${rename.offset}.io.netty
-  
-
com.google.errorprone.annotations.CanIgnoreReturnValue
-  
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/c0952058/hbase-shaded-protobuf/pom.xml
--
diff --git a/hbase-shaded-protobuf/pom.xml b/hbase-shaded-protobuf/pom.xml
index f9cf3fe..38164e1 100644
--- a/hbase-shaded-protobuf/pom.xml
+++ b/hbase-shaded-protobuf/pom.xml
@@ -23,7 +23,7 @@
   
 org.apache.hbase.thirdparty
 hbase-thirdparty
-1.0.1
+1.0.0
 ..
   
   hbase-shaded-protobuf
@@ -164,9 +164,6 @@
 
   com.google.protobuf
   
${rename.offset}.com.google.protobuf
-  
-
com.google.errorprone.annotations.CanIgnoreReturnValue
-  
 
   
 

http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/c0952058/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 6818561..1f5f916 100644
--- a/pom.xml
+++ b/pom.xml
@@ -38,7 +38,7 @@
   
   org.apache.hbase.thirdparty
   hbase-thirdparty
-  1.0.1
+  1.0.0
   Apache HBase Third-Party Libs
   pom
   



hbase-thirdparty git commit: HBASE-18321 [hbase-thirdparty] Fix generation of META-INF/DEPENDENCIES to include dependency list and versions

2017-08-23 Thread stack
Repository: hbase-thirdparty
Updated Branches:
  refs/heads/master b37531a5f -> e07089bee


HBASE-18321 [hbase-thirdparty] Fix generation of META-INF/DEPENDENCIES to 
include dependency list and versions


Project: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/commit/e07089be
Tree: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/tree/e07089be
Diff: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/diff/e07089be

Branch: refs/heads/master
Commit: e07089bee6f51aec65de932b302894507903bd6e
Parents: b37531a
Author: Michael Stack 
Authored: Wed Aug 23 15:22:59 2017 -0700
Committer: Michael Stack 
Committed: Wed Aug 23 15:22:59 2017 -0700

--
 hbase-shaded-miscellaneous/pom.xml | 29 -
 hbase-shaded-protobuf/pom.xml  | 12 +++-
 2 files changed, 39 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/e07089be/hbase-shaded-miscellaneous/pom.xml
--
diff --git a/hbase-shaded-miscellaneous/pom.xml 
b/hbase-shaded-miscellaneous/pom.xml
index b616593..aec16f2 100644
--- a/hbase-shaded-miscellaneous/pom.xml
+++ b/hbase-shaded-miscellaneous/pom.xml
@@ -81,13 +81,16 @@
 
   
   com.google.protobuf:protobuf-java
   com.google.code.findbugs:jsr305
   
com.google.errorprone:error_prone_annotations
   com.google.j2objc:j2objc-annotations
   
org.codehaus.mojo:animal-sniffer-annotations
-  
org.codehaus.mojo:animal-sniffer-annotations
 
   
 
@@ -106,6 +109,24 @@
   com.google.guava
   guava
   22.0
+  
+
+  com.google.code.findbugs
+  jsr305
+
+
+  com.google.errorprone
+  error_prone_annotations
+
+
+  com.google.j2objc
+  j2objc-annotations
+
+
+  org.codehaus.mojo
+  animal-sniffer-annotations
+
+  
 
 
   com.google.protobuf
@@ -113,6 +134,12 @@
   
   3.3.0
+  
+
+  com.google.protobuf
+  protobuf-java
+
+  
 
   
 

http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/e07089be/hbase-shaded-protobuf/pom.xml
--
diff --git a/hbase-shaded-protobuf/pom.xml b/hbase-shaded-protobuf/pom.xml
index f9cf3fe..d762786 100644
--- a/hbase-shaded-protobuf/pom.xml
+++ b/hbase-shaded-protobuf/pom.xml
@@ -31,6 +31,9 @@
   
 Pulls down protobuf, patches it, compiles, and then relocates/shades.
   
+  
+3.3.1
+  
   
 
   
@@ -92,7 +95,7 @@
 
   com.google.protobuf
   protobuf-java
-  3.3.1
+  ${protobuf.version}
   sources
   jar
   true
@@ -175,4 +178,11 @@
   
 
   
+  
+
+  com.google.protobuf
+  protobuf-java
+  ${protobuf.version}
+
+  
 



[4/4] hbase git commit: HBASE-17442 Move most of the replication related classes from hbase-client to new hbase-replication package. (Guanghao Zhang).

2017-08-23 Thread appy
HBASE-17442 Move most of the replication related classes from hbase-client to 
new hbase-replication package. (Guanghao Zhang).

Change-Id: Ie0e24cc617ab4bf56de8b1747062d1b78a5d4669


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/26e6c2ce
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/26e6c2ce
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/26e6c2ce

Branch: refs/heads/master
Commit: 26e6c2ceb4db80e561ec321dba52673e7f285d1b
Parents: ae052e4
Author: Apekshit Sharma 
Authored: Thu Aug 17 20:59:35 2017 -0700
Committer: Apekshit Sharma 
Committed: Wed Aug 23 14:41:58 2017 -0700

--
 .../client/replication/ReplicationAdmin.java| 102 +---
 .../hbase/replication/ReplicationFactory.java   |  66 ---
 .../hbase/replication/ReplicationListener.java  |  51 --
 .../hbase/replication/ReplicationPeer.java  |  89 ---
 .../ReplicationPeerConfigListener.java  |  33 --
 .../replication/ReplicationPeerZKImpl.java  | 318 ---
 .../hbase/replication/ReplicationPeers.java | 177 --
 .../replication/ReplicationPeersZKImpl.java | 546 ---
 .../hbase/replication/ReplicationQueueInfo.java | 130 -
 .../hbase/replication/ReplicationQueues.java| 160 --
 .../replication/ReplicationQueuesArguments.java |  70 ---
 .../replication/ReplicationQueuesClient.java|  93 
 .../ReplicationQueuesClientArguments.java   |  40 --
 .../ReplicationQueuesClientZKImpl.java  | 175 --
 .../replication/ReplicationQueuesZKImpl.java| 407 --
 .../replication/ReplicationStateZKBase.java | 155 --
 .../hbase/replication/ReplicationTableBase.java | 441 ---
 .../hbase/replication/ReplicationTracker.java   |  49 --
 .../replication/ReplicationTrackerZKImpl.java   | 250 -
 .../TableBasedReplicationQueuesClientImpl.java  | 112 
 .../TableBasedReplicationQueuesImpl.java| 450 ---
 .../apache/hadoop/hbase/zookeeper/ZKUtil.java   |  14 +-
 .../hadoop/hbase/zookeeper/ZNodePaths.java  |  22 +-
 hbase-replication/pom.xml   | 264 +
 .../hbase/replication/ReplicationFactory.java   |  66 +++
 .../hbase/replication/ReplicationListener.java  |  51 ++
 .../hbase/replication/ReplicationPeer.java  |  89 +++
 .../ReplicationPeerConfigListener.java  |  33 ++
 .../replication/ReplicationPeerZKImpl.java  | 318 +++
 .../hbase/replication/ReplicationPeers.java | 177 ++
 .../replication/ReplicationPeersZKImpl.java | 546 +++
 .../hbase/replication/ReplicationQueueInfo.java | 130 +
 .../hbase/replication/ReplicationQueues.java| 160 ++
 .../replication/ReplicationQueuesArguments.java |  70 +++
 .../replication/ReplicationQueuesClient.java|  93 
 .../ReplicationQueuesClientArguments.java   |  40 ++
 .../ReplicationQueuesClientZKImpl.java  | 175 ++
 .../replication/ReplicationQueuesZKImpl.java| 407 ++
 .../replication/ReplicationStateZKBase.java | 155 ++
 .../hbase/replication/ReplicationTableBase.java | 441 +++
 .../hbase/replication/ReplicationTracker.java   |  49 ++
 .../replication/ReplicationTrackerZKImpl.java   | 250 +
 .../TableBasedReplicationQueuesClientImpl.java  | 112 
 .../TableBasedReplicationQueuesImpl.java| 450 +++
 hbase-server/pom.xml|   4 +
 .../replication/BaseReplicationEndpoint.java|   2 -
 pom.xml |   6 +
 47 files changed, 4113 insertions(+), 3925 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/26e6c2ce/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
index 752d18c..615a79d 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
@@ -26,37 +26,22 @@ import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
-import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeMap;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import 

[3/4] hbase git commit: HBASE-17442 Move most of the replication related classes from hbase-client to new hbase-replication package. (Guanghao Zhang).

2017-08-23 Thread appy
http://git-wip-us.apache.org/repos/asf/hbase/blob/26e6c2ce/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
deleted file mode 100644
index 1981131..000
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.replication;
-
-import java.util.List;
-import java.util.Set;
-
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableSet;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.zookeeper.ZKUtil;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.data.Stat;
-
-@InterfaceAudience.Private
-public class ReplicationQueuesClientZKImpl extends ReplicationStateZKBase 
implements
-ReplicationQueuesClient {
-
-  Log LOG = LogFactory.getLog(ReplicationQueuesClientZKImpl.class);
-
-  public ReplicationQueuesClientZKImpl(ReplicationQueuesClientArguments args) {
-this(args.getZk(), args.getConf(), args.getAbortable());
-  }
-
-  public ReplicationQueuesClientZKImpl(final ZooKeeperWatcher zk, 
Configuration conf,
-  Abortable abortable) {
-super(zk, conf, abortable);
-  }
-
-  @Override
-  public void init() throws ReplicationException {
-try {
-  if (ZKUtil.checkExists(this.zookeeper, this.queuesZNode) < 0) {
-ZKUtil.createWithParents(this.zookeeper, this.queuesZNode);
-  }
-} catch (KeeperException e) {
-  throw new ReplicationException("Internal error while initializing a 
queues client", e);
-}
-  }
-
-  @Override
-  public List getLogsInQueue(String serverName, String queueId) throws 
KeeperException {
-String znode = ZKUtil.joinZNode(this.queuesZNode, serverName);
-znode = ZKUtil.joinZNode(znode, queueId);
-List result = null;
-try {
-  result = ZKUtil.listChildrenNoWatch(this.zookeeper, znode);
-} catch (KeeperException e) {
-  this.abortable.abort("Failed to get list of wals for queueId=" + queueId
-  + " and serverName=" + serverName, e);
-  throw e;
-}
-return result;
-  }
-
-  @Override
-  public List getAllQueues(String serverName) throws KeeperException {
-String znode = ZKUtil.joinZNode(this.queuesZNode, serverName);
-List result = null;
-try {
-  result = ZKUtil.listChildrenNoWatch(this.zookeeper, znode);
-} catch (KeeperException e) {
-  this.abortable.abort("Failed to get list of queues for serverName=" + 
serverName, e);
-  throw e;
-}
-return result;
-  }
-
-  @Override
-  public Set getAllWALs() throws KeeperException {
-/**
- * Load all wals in all replication queues from ZK. This method guarantees 
to return a
- * snapshot which contains all WALs in the zookeeper at the start of this 
call even there
- * is concurrent queue failover. However, some newly created WALs during 
the call may
- * not be included.
- */
-for (int retry = 0; ; retry++) {
-  int v0 = getQueuesZNodeCversion();
-  List rss = getListOfReplicators();
-  if (rss == null || rss.isEmpty()) {
-LOG.debug("Didn't find any region server that replicates, won't 
prevent any deletions.");
-return ImmutableSet.of();
-  }
-  Set wals = Sets.newHashSet();
-  for (String rs : rss) {
-List listOfPeers = getAllQueues(rs);
-// if rs just died, this will be null
-if (listOfPeers == null) {
-  continue;
-}
-for (String id : listOfPeers) {
-  List 

[1/4] hbase git commit: HBASE-17442 Move most of the replication related classes from hbase-client to new hbase-replication package. (Guanghao Zhang).

2017-08-23 Thread appy
Repository: hbase
Updated Branches:
  refs/heads/master ae052e454 -> 26e6c2ceb


http://git-wip-us.apache.org/repos/asf/hbase/blob/26e6c2ce/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
new file mode 100644
index 000..4733706
--- /dev/null
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
@@ -0,0 +1,407 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.zookeeper.KeeperException;
+
+/**
+ * This class provides an implementation of the
+ * interface using ZooKeeper. The
+ * base znode that this class works at is the myQueuesZnode. The myQueuesZnode 
contains a list of
+ * all outstanding WAL files on this region server that need to be replicated. 
The myQueuesZnode is
+ * the regionserver name (a concatenation of the region server’s hostname, 
client port and start
+ * code). For example:
+ *
+ * /hbase/replication/rs/hostname.example.org,6020,1234
+ *
+ * Within this znode, the region server maintains a set of WAL replication 
queues. These queues are
+ * represented by child znodes named using there give queue id. For example:
+ *
+ * /hbase/replication/rs/hostname.example.org,6020,1234/1
+ * /hbase/replication/rs/hostname.example.org,6020,1234/2
+ *
+ * Each queue has one child znode for every WAL that still needs to be 
replicated. The value of
+ * these WAL child znodes is the latest position that has been replicated. 
This position is updated
+ * every time a WAL entry is replicated. For example:
+ *
+ * /hbase/replication/rs/hostname.example.org,6020,1234/1/23522342.23422 
[VALUE: 254]
+ */
+@InterfaceAudience.Private
+public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements 
ReplicationQueues {
+
+  /** Znode containing all replication queues for this region server. */
+  private String myQueuesZnode;
+
+  private static final Log LOG = 
LogFactory.getLog(ReplicationQueuesZKImpl.class);
+
+  public ReplicationQueuesZKImpl(ReplicationQueuesArguments args) {
+this(args.getZk(), args.getConf(), args.getAbortable());
+  }
+
+  public ReplicationQueuesZKImpl(final ZooKeeperWatcher zk, Configuration conf,
+  Abortable abortable) {
+super(zk, conf, abortable);
+  }
+
+  @Override
+  public void init(String serverName) throws ReplicationException {
+this.myQueuesZnode = ZKUtil.joinZNode(this.queuesZNode, serverName);
+try {
+  if (ZKUtil.checkExists(this.zookeeper, this.myQueuesZnode) < 0) {
+ZKUtil.createWithParents(this.zookeeper, this.myQueuesZnode);
+  }
+} catch (KeeperException e) {
+  throw new ReplicationException("Could not initialize replication 
queues.", e);
+}
+if (conf.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY,
+  HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT)) {
+  try {
+if (ZKUtil.checkExists(this.zookeeper, this.hfileRefsZNode) < 0) {
+  ZKUtil.createWithParents(this.zookeeper, 

[2/4] hbase git commit: HBASE-17442 Move most of the replication related classes from hbase-client to new hbase-replication package. (Guanghao Zhang).

2017-08-23 Thread appy
http://git-wip-us.apache.org/repos/asf/hbase/blob/26e6c2ce/hbase-replication/pom.xml
--
diff --git a/hbase-replication/pom.xml b/hbase-replication/pom.xml
new file mode 100644
index 000..858e9fc
--- /dev/null
+++ b/hbase-replication/pom.xml
@@ -0,0 +1,264 @@
+
+http://maven.apache.org/POM/4.0.0; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd;>
+  
+  4.0.0
+  
+hbase-build-configuration
+org.apache.hbase
+3.0.0-SNAPSHOT
+..
+  
+  hbase-replication
+  Apache HBase - Replication
+  HBase Replication Support
+
+  
+
+  
+org.apache.maven.plugins
+maven-site-plugin
+
+  true
+
+  
+  
+
+maven-assembly-plugin
+
+  true
+
+  
+  
+maven-surefire-plugin
+
+
+  
+secondPartTestsExecution
+test
+
+  test
+
+
+  true
+
+  
+
+  
+  
+  
+org.apache.maven.plugins
+maven-source-plugin
+  
+
+
+  
+
+
+  org.eclipse.m2e
+  lifecycle-mapping
+  1.0.0
+  
+
+  
+
+  
+org.apache.maven.plugins
+maven-compiler-plugin
+[3.2,)
+
+  compile
+
+  
+  
+
+  
+
+  
+
+  
+
+  
+
+  
+
+  
+
+
+  org.apache.hbase
+  hbase-annotations
+  
+
+  jdk.tools
+  jdk.tools
+
+  
+
+
+  org.apache.hbase
+  hbase-annotations
+  test-jar
+  test
+
+
+  org.apache.hbase
+  hbase-client
+
+
+  org.apache.hbase
+  hbase-common
+
+
+  org.apache.hbase
+  hbase-common
+  test-jar
+
+
+
+  commons-codec
+  commons-codec
+
+
+  commons-io
+  commons-io
+
+
+  commons-lang
+  commons-lang
+
+
+  commons-logging
+  commons-logging
+
+
+  com.google.protobuf
+  protobuf-java
+
+
+  org.apache.zookeeper
+  zookeeper
+
+
+  log4j
+  log4j
+  test
+
+  
+
+  
+
+
+  hadoop-2.0
+  
+
+
+!hadoop.profile
+
+  
+  
+
+   com.github.stephenc.findbugs
+   findbugs-annotations
+   true
+
+
+  org.apache.hadoop
+  hadoop-auth
+
+
+  org.apache.hadoop
+  hadoop-common
+  
+
+  net.java.dev.jets3t
+  jets3t
+
+
+  javax.servlet.jsp
+  jsp-api
+
+
+  org.mortbay.jetty
+  jetty
+
+
+  com.sun.jersey
+  jersey-server
+
+
+  com.sun.jersey
+  jersey-core
+
+
+  com.sun.jersey
+  jersey-json
+
+
+  javax.servlet
+  servlet-api
+
+
+  tomcat
+  jasper-compiler
+
+
+  tomcat
+  jasper-runtime
+
+
+  com.google.code.findbugs
+  jsr305
+
+  
+
+  
+
+
+
+
+  hadoop-3.0
+  
+
+  hadoop.profile
+  3.0
+
+  
+  
+3.0-SNAPSHOT
+  
+  
+
+  org.apache.hadoop
+  hadoop-auth
+
+
+  org.apache.hadoop
+  hadoop-common
+
+  
+
+  
+

http://git-wip-us.apache.org/repos/asf/hbase/blob/26e6c2ce/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
new file mode 100644
index 000..8506cbb
--- /dev/null
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
@@ -0,0 +1,66 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for 

[hbase-thirdparty] Git Push Summary

2017-08-23 Thread stack
Repository: hbase-thirdparty
Updated Tags:  refs/tags/1.0.1RC0 [created] d218d1c13


svn commit: r21284 - in /dev/hbase/hbase-thirdparty: 1.0.0RC0/ 1.0.1RC0/

2017-08-23 Thread stack
Author: stack
Date: Wed Aug 23 22:36:28 2017
New Revision: 21284

Log:
Remove 1.0.1RC0 and add 1.0.1RC0

Added:
dev/hbase/hbase-thirdparty/1.0.1RC0/
dev/hbase/hbase-thirdparty/1.0.1RC0/hbase-thirdparty-1.0.1-src.tar.gz   
(with props)
dev/hbase/hbase-thirdparty/1.0.1RC0/hbase-thirdparty-1.0.1-src.tar.gz.asc
dev/hbase/hbase-thirdparty/1.0.1RC0/hbase-thirdparty-1.0.1-src.tar.gz.md5
dev/hbase/hbase-thirdparty/1.0.1RC0/hbase-thirdparty-1.0.1-src.tar.gz.mds
dev/hbase/hbase-thirdparty/1.0.1RC0/hbase-thirdparty-1.0.1-src.tar.gz.sha
Removed:
dev/hbase/hbase-thirdparty/1.0.0RC0/

Added: dev/hbase/hbase-thirdparty/1.0.1RC0/hbase-thirdparty-1.0.1-src.tar.gz
==
Binary file - no diff available.

Propchange: 
dev/hbase/hbase-thirdparty/1.0.1RC0/hbase-thirdparty-1.0.1-src.tar.gz
--
svn:mime-type = application/octet-stream

Added: dev/hbase/hbase-thirdparty/1.0.1RC0/hbase-thirdparty-1.0.1-src.tar.gz.asc
==
--- dev/hbase/hbase-thirdparty/1.0.1RC0/hbase-thirdparty-1.0.1-src.tar.gz.asc 
(added)
+++ dev/hbase/hbase-thirdparty/1.0.1RC0/hbase-thirdparty-1.0.1-src.tar.gz.asc 
Wed Aug 23 22:36:28 2017
@@ -0,0 +1,17 @@
+-BEGIN PGP SIGNATURE-
+Version: GnuPG v2
+
+iQIcBAABCAAGBQJZngM/AAoJEJgWx/yKzJPSvUIP/1FNty33F+XsAx8AXYGWKQ7Z
+nWGiLqE3VuN6dgbAocEzOwbh04l8m84mbKK3c8Z3LPtvrzTvvt8jKR8N+o7KUkxd
+bbJPNofBcHGCmxLBz1i7XnIOo6vU2mwZs2pjkYOHwykiJi6bsLeKh9W7NBFk7mlQ
+2bfUO8dynHiwnh0CA9jZrGbjoPXY2j4mgnwnPHIk0rGI/cxZPmp1YNSOy9Ov2hwp
+dyq+hfmn7oM/7KscypfiaTAh+bcnvauO03eCBgAPV+ZpFBBCjfWguhptCPGDbPgk
+wn1eNH1nGV0u9mwyytAHpoUDsTHb53gRWBT5zObqIHfu5A2tvO2S4UYEErRwFUPb
+dhx0EEjrzJlG00vExsUyrvDjDl+cdgoS0UyjWQk8Ms27lN2784djJDIVa5u9vRA+
+EJuEwbbKr/RwRWTsqG6jrl6mJ4F3TBiduowcrEaJ8C3kgCHH82xWNgqerC7X0l+g
+XrOBbKss0tkERoQ9B+55l6QFR/9f3rKDDV+sxnHykr8m4NErmRU79LI1980qEuR6
+8RzWjZrIjXPsWbxqp7kV1NbfkTIzyx/cNoGWFQdKLlbbNv5ntdrI+0WDkcHd0sBy
+zVZjOCe+GwzG9ih6gDDK6vEnG/CXkIcgALz44pRlRVeQk2nSKRuwFHpECVhQ13Pp
+SbpQvD/tKEIB7wz5Sf4A
+=AK4a
+-END PGP SIGNATURE-

Added: dev/hbase/hbase-thirdparty/1.0.1RC0/hbase-thirdparty-1.0.1-src.tar.gz.md5
==
--- dev/hbase/hbase-thirdparty/1.0.1RC0/hbase-thirdparty-1.0.1-src.tar.gz.md5 
(added)
+++ dev/hbase/hbase-thirdparty/1.0.1RC0/hbase-thirdparty-1.0.1-src.tar.gz.md5 
Wed Aug 23 22:36:28 2017
@@ -0,0 +1,2 @@
+hbase-thirdparty-1.0.1-src.tar.gz: FC E8 0D 75 2F AE BA 9C  AE D9 7C E9 BA 8D 
17
+   F5

Added: dev/hbase/hbase-thirdparty/1.0.1RC0/hbase-thirdparty-1.0.1-src.tar.gz.mds
==
--- dev/hbase/hbase-thirdparty/1.0.1RC0/hbase-thirdparty-1.0.1-src.tar.gz.mds 
(added)
+++ dev/hbase/hbase-thirdparty/1.0.1RC0/hbase-thirdparty-1.0.1-src.tar.gz.mds 
Wed Aug 23 22:36:28 2017
@@ -0,0 +1,17 @@
+hbase-thirdparty-1.0.1-src.tar.gz:MD5 = FC E8 0D 75 2F AE BA 9C  AE D9 7C 
E9
+BA 8D 17 F5
+hbase-thirdparty-1.0.1-src.tar.gz:   SHA1 = BBD3 F0E6 0298 9D75 8DE2  67E2 052F
+9570 A46B 069F
+hbase-thirdparty-1.0.1-src.tar.gz: RMD160 = C4F3 5EA0 C764 92DB 4DA2  3F06 3761
+E43C F3B9 0F59
+hbase-thirdparty-1.0.1-src.tar.gz: SHA224 = 0BE5C57B B38B3503 E74DAC5D 4056E987
+FB24DAB4 4EFCF9E3 9F287AFF
+hbase-thirdparty-1.0.1-src.tar.gz: SHA256 = E0D784E4 8D0FFE93 22256FBD 1264B926
+E550B2EB 41BE78FD 9AF373DD 104C750B
+hbase-thirdparty-1.0.1-src.tar.gz: SHA384 = 727847B5 C40D94E3 960034BE E749502D
+8A3F6EB8 9802E3FD 30C20D00 428FE916
+B2A80F5E 20292D02 219D510D 1C98467B
+hbase-thirdparty-1.0.1-src.tar.gz: SHA512 = 2370B8E7 431382A3 F6EE3058 1EAEE3AA
+3CD428C8 7546FE56 C8792C2A 58A9CB70
+923F24C8 2D445644 FBC33931 74D7FCA5
+78DBB7FE 7417AB5D 4B0D99A8 89F44D03

Added: dev/hbase/hbase-thirdparty/1.0.1RC0/hbase-thirdparty-1.0.1-src.tar.gz.sha
==
--- dev/hbase/hbase-thirdparty/1.0.1RC0/hbase-thirdparty-1.0.1-src.tar.gz.sha 
(added)
+++ dev/hbase/hbase-thirdparty/1.0.1RC0/hbase-thirdparty-1.0.1-src.tar.gz.sha 
Wed Aug 23 22:36:28 2017
@@ -0,0 +1,4 @@
+hbase-thirdparty-1.0.1-src.tar.gz: 2370B8E7 431382A3 F6EE3058 1EAEE3AA 3CD428C8
+   7546FE56 C8792C2A 58A9CB70 923F24C8 2D445644
+   FBC33931 74D7FCA5 78DBB7FE 7417AB5D 4B0D99A8
+

hbase git commit: HBASE-18509 (ADDENDUM) Fix NPEs when mocking RS clocks in tests

2017-08-23 Thread appy
Repository: hbase
Updated Branches:
  refs/heads/HBASE-14070.HLC 4c56a3c28 -> bc48d841b


HBASE-18509 (ADDENDUM) Fix NPEs when mocking RS clocks in tests

Change-Id: I3a302fb2a89ab950bea89a0e9e6bdba30cff5358
Signed-off-by: Apekshit Sharma 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bc48d841
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bc48d841
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bc48d841

Branch: refs/heads/HBASE-14070.HLC
Commit: bc48d841bce1dbcc31016ff55837b9f1b242e3d6
Parents: 4c56a3c
Author: Amit Patel 
Authored: Wed Aug 23 13:00:06 2017 -0700
Committer: Apekshit Sharma 
Committed: Wed Aug 23 15:07:07 2017 -0700

--
 .../main/java/org/apache/hadoop/hbase/regionserver/Clocks.java | 5 +
 .../hadoop/hbase/regionserver/TestHRegionReplayEvents.java | 2 +-
 .../hadoop/hbase/regionserver/TestRegionSplitPolicy.java   | 6 ++
 .../org/apache/hadoop/hbase/regionserver/TestWALLockup.java| 2 +-
 .../hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java   | 5 -
 5 files changed, 13 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bc48d841/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Clocks.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Clocks.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Clocks.java
index a401007..ba4d13a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Clocks.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Clocks.java
@@ -51,6 +51,11 @@ public class Clocks {
 clockTypes.add(hybridLogicalClock.getClockType());
   }
 
+  @VisibleForTesting
+  public Clocks() {
+this(Clock.DEFAULT_MAX_CLOCK_SKEW_IN_MS);
+  }
+
   public long update(ClockType clockType, long timestamp) {
 return getClock(clockType).update(timestamp);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/bc48d841/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
index 54eaa57..ac7d28e 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
@@ -173,7 +173,7 @@ public class TestHRegionReplayEvents {
 when(rss.getServerName()).thenReturn(ServerName.valueOf("foo", 1, 1));
 when(rss.getConfiguration()).thenReturn(CONF);
 when(rss.getRegionServerAccounting()).thenReturn(new 
RegionServerAccounting(CONF));
-when(rss.getClocks().getClock(any())).thenReturn(new SystemClock());
+when(rss.getClocks()).thenReturn(new Clocks());
 String string = 
org.apache.hadoop.hbase.executor.EventType.RS_COMPACTED_FILES_DISCHARGER
 .toString();
 ExecutorService es = new ExecutorService(string);

http://git-wip-us.apache.org/repos/asf/hbase/blob/bc48d841/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java
index ecc2d8d..811c0a2 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java
@@ -27,11 +27,9 @@ import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.SystemClock;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.ClockType;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
@@ -108,7 +106,7 @@ public class TestRegionSplitPolicy {
 final List regions = new ArrayList<>();
 Mockito.when(rss.getOnlineRegions(TABLENAME)).thenReturn(regions);
 Mockito.when(mockRegion.getRegionServerServices()).thenReturn(rss);
-Mockito.when(rss.getClocks().getClock(ClockType.SYSTEM)).thenReturn(new 
SystemClock());
+

hbase git commit: Add Huaxaing Sun to pom.xml

2017-08-23 Thread huaxiangsun
Repository: hbase
Updated Branches:
  refs/heads/master 26e6c2ceb -> 1b8509cca


Add Huaxaing Sun to pom.xml


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1b8509cc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1b8509cc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1b8509cc

Branch: refs/heads/master
Commit: 1b8509cca94e3f264251f1fb74398ebf2ff01551
Parents: 26e6c2c
Author: Huaxiang Sun 
Authored: Wed Aug 23 15:13:05 2017 -0700
Committer: Huaxiang Sun 
Committed: Wed Aug 23 15:13:05 2017 -0700

--
 pom.xml | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1b8509cc/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 4d10324..8ee9168 100755
--- a/pom.xml
+++ b/pom.xml
@@ -301,6 +301,12 @@
   -8
 
 
+  huaxiangsun
+  Huaxiang Sun
+  huaxiang...@apache.org
+  -8
+
+
   jdcryans
   Jean-Daniel Cryans
   jdcry...@apache.org



[1/2] hbase git commit: HBASE-18628 Fix event pre-emption in ZKPermWatcher

2017-08-23 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 19a80c823 -> 362a2924d
  refs/heads/branch-1.4 40dedb8df -> e293da211


HBASE-18628 Fix event pre-emption in ZKPermWatcher

Instead of using an Atomic Reference to data and aborting when we detect
that new data comes in, use the native cancellation/pre-emption features
of Java Future.

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e293da21
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e293da21
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e293da21

Branch: refs/heads/branch-1.4
Commit: e293da2114cea79326eb799def72d05aee1753f2
Parents: 40dedb8
Author: Mike Drob 
Authored: Mon Aug 21 16:23:27 2017 -0500
Committer: Andrew Purtell 
Committed: Wed Aug 23 16:42:28 2017 -0700

--
 .../security/access/ZKPermissionWatcher.java| 60 ++--
 1 file changed, 30 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e293da21/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
index b4bf510..4c37b52 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
@@ -38,6 +38,7 @@ import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
 import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.atomic.AtomicReference;
 
@@ -55,12 +56,11 @@ public class ZKPermissionWatcher extends ZooKeeperListener 
implements Closeable
   private static final Log LOG = LogFactory.getLog(ZKPermissionWatcher.class);
   // parent node for permissions lists
   static final String ACL_NODE = "acl";
-  TableAuthManager authManager;
-  String aclZNode;
-  CountDownLatch initialized = new CountDownLatch(1);
-  AtomicReference nodes =
-  new AtomicReference(null);
-  ExecutorService executor;
+  private final TableAuthManager authManager;
+  private final String aclZNode;
+  private final CountDownLatch initialized = new CountDownLatch(1);
+  private final ExecutorService executor;
+  private Future childrenChangedFuture;
 
   public ZKPermissionWatcher(ZooKeeperWatcher watcher,
   TableAuthManager authManager, Configuration conf) {
@@ -83,7 +83,7 @@ public class ZKPermissionWatcher extends ZooKeeperListener 
implements Closeable
   List existing =
   ZKUtil.getChildDataAndWatchForNewChildren(watcher, aclZNode);
   if (existing != null) {
-refreshNodes(existing, null);
+refreshNodes(existing);
   }
   return null;
 }
@@ -127,7 +127,7 @@ public class ZKPermissionWatcher extends ZooKeeperListener 
implements Closeable
   try {
 List nodes =
 ZKUtil.getChildDataAndWatchForNewChildren(watcher, aclZNode);
-refreshNodes(nodes, null);
+refreshNodes(nodes);
   } catch (KeeperException ke) {
 LOG.error("Error reading data from zookeeper", ke);
 // only option is to abort
@@ -185,37 +185,36 @@ public class ZKPermissionWatcher extends 
ZooKeeperListener implements Closeable
 waitUntilStarted();
 if (path.equals(aclZNode)) {
   try {
-List nodeList =
+final List nodeList =
 ZKUtil.getChildDataAndWatchForNewChildren(watcher, aclZNode);
-while (!nodes.compareAndSet(null, nodeList)) {
-  try {
-Thread.sleep(20);
-  } catch (InterruptedException e) {
-LOG.warn("Interrupted while setting node list", e);
-Thread.currentThread().interrupt();
+// preempt any existing nodeChildrenChanged event processing
+if (childrenChangedFuture != null && !childrenChangedFuture.isDone()) {
+  boolean cancelled = childrenChangedFuture.cancel(true);
+  if (!cancelled) {
+// task may have finished between our check and attempted cancel, 
this is fine.
+if (! childrenChangedFuture.isDone()) {
+  LOG.warn("Could not cancel processing node children changed 
event, " +
+  "please file a JIRA and attach logs if possible.");
+}
  

[2/2] hbase git commit: HBASE-18628 Fix event pre-emption in ZKPermWatcher

2017-08-23 Thread apurtell
HBASE-18628 Fix event pre-emption in ZKPermWatcher

Instead of using an Atomic Reference to data and aborting when we detect
that new data comes in, use the native cancellation/pre-emption features
of Java Future.

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/362a2924
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/362a2924
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/362a2924

Branch: refs/heads/branch-1
Commit: 362a2924d2a61630b5644a2776cf8a6a96b03954
Parents: 19a80c8
Author: Mike Drob 
Authored: Mon Aug 21 16:23:27 2017 -0500
Committer: Andrew Purtell 
Committed: Wed Aug 23 16:42:33 2017 -0700

--
 .../security/access/ZKPermissionWatcher.java| 60 ++--
 1 file changed, 30 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/362a2924/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
index b4bf510..4c37b52 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
@@ -38,6 +38,7 @@ import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
 import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.atomic.AtomicReference;
 
@@ -55,12 +56,11 @@ public class ZKPermissionWatcher extends ZooKeeperListener 
implements Closeable
   private static final Log LOG = LogFactory.getLog(ZKPermissionWatcher.class);
   // parent node for permissions lists
   static final String ACL_NODE = "acl";
-  TableAuthManager authManager;
-  String aclZNode;
-  CountDownLatch initialized = new CountDownLatch(1);
-  AtomicReference nodes =
-  new AtomicReference(null);
-  ExecutorService executor;
+  private final TableAuthManager authManager;
+  private final String aclZNode;
+  private final CountDownLatch initialized = new CountDownLatch(1);
+  private final ExecutorService executor;
+  private Future childrenChangedFuture;
 
   public ZKPermissionWatcher(ZooKeeperWatcher watcher,
   TableAuthManager authManager, Configuration conf) {
@@ -83,7 +83,7 @@ public class ZKPermissionWatcher extends ZooKeeperListener 
implements Closeable
   List existing =
   ZKUtil.getChildDataAndWatchForNewChildren(watcher, aclZNode);
   if (existing != null) {
-refreshNodes(existing, null);
+refreshNodes(existing);
   }
   return null;
 }
@@ -127,7 +127,7 @@ public class ZKPermissionWatcher extends ZooKeeperListener 
implements Closeable
   try {
 List nodes =
 ZKUtil.getChildDataAndWatchForNewChildren(watcher, aclZNode);
-refreshNodes(nodes, null);
+refreshNodes(nodes);
   } catch (KeeperException ke) {
 LOG.error("Error reading data from zookeeper", ke);
 // only option is to abort
@@ -185,37 +185,36 @@ public class ZKPermissionWatcher extends 
ZooKeeperListener implements Closeable
 waitUntilStarted();
 if (path.equals(aclZNode)) {
   try {
-List nodeList =
+final List nodeList =
 ZKUtil.getChildDataAndWatchForNewChildren(watcher, aclZNode);
-while (!nodes.compareAndSet(null, nodeList)) {
-  try {
-Thread.sleep(20);
-  } catch (InterruptedException e) {
-LOG.warn("Interrupted while setting node list", e);
-Thread.currentThread().interrupt();
+// preempt any existing nodeChildrenChanged event processing
+if (childrenChangedFuture != null && !childrenChangedFuture.isDone()) {
+  boolean cancelled = childrenChangedFuture.cancel(true);
+  if (!cancelled) {
+// task may have finished between our check and attempted cancel, 
this is fine.
+if (! childrenChangedFuture.isDone()) {
+  LOG.warn("Could not cancel processing node children changed 
event, " +
+  "please file a JIRA and attach logs if possible.");
+}
   }
 }
+childrenChangedFuture = asyncProcessNodeUpdate(new Runnable() {
+  @Override
+  public 

hbase git commit: HBASE-17442 (Addendum) Fix build errors due to wrong relativePath for parent pom.

2017-08-23 Thread appy
Repository: hbase
Updated Branches:
  refs/heads/master 1b8509cca -> ae3b51a7a


HBASE-17442 (Addendum) Fix build errors due to wrong relativePath for parent 
pom.

Change-Id: I0796dcb08cee64089289b6e315b99ad623edbbeb


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ae3b51a7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ae3b51a7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ae3b51a7

Branch: refs/heads/master
Commit: ae3b51a7a73a5fbe216451f087a3fefa596a7d54
Parents: 1b8509c
Author: Apekshit Sharma 
Authored: Wed Aug 23 16:12:02 2017 -0700
Committer: Apekshit Sharma 
Committed: Wed Aug 23 16:14:23 2017 -0700

--
 hbase-replication/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ae3b51a7/hbase-replication/pom.xml
--
diff --git a/hbase-replication/pom.xml b/hbase-replication/pom.xml
index 858e9fc..87664f8 100644
--- a/hbase-replication/pom.xml
+++ b/hbase-replication/pom.xml
@@ -25,7 +25,7 @@
 hbase-build-configuration
 org.apache.hbase
 3.0.0-SNAPSHOT
-..
+../hbase-build-configuration
   
   hbase-replication
   Apache HBase - Replication



[2/2] hbase git commit: HBASE-18616 Shell warns about already initialized constants at startup

2017-08-23 Thread apurtell
HBASE-18616 Shell warns about already initialized constants at startup

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f73b7628
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f73b7628
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f73b7628

Branch: refs/heads/branch-1.4
Commit: f73b7628727874bdc8f997584c195245b791ae3b
Parents: e293da2
Author: Guangxu Cheng 
Authored: Thu Aug 17 09:55:26 2017 +0800
Committer: Andrew Purtell 
Committed: Wed Aug 23 18:08:27 2017 -0700

--
 hbase-shell/src/main/ruby/hbase.rb | 5 -
 1 file changed, 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f73b7628/hbase-shell/src/main/ruby/hbase.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase.rb 
b/hbase-shell/src/main/ruby/hbase.rb
index 88a6f04..0fa1649 100644
--- a/hbase-shell/src/main/ruby/hbase.rb
+++ b/hbase-shell/src/main/ruby/hbase.rb
@@ -79,12 +79,7 @@ module HBaseConstants
   TYPE = 'TYPE'
   NONE = 'NONE'
   VALUE = 'VALUE'
-  ENDPOINT_CLASSNAME = 'ENDPOINT_CLASSNAME'
-  CLUSTER_KEY = 'CLUSTER_KEY'
-  TABLE_CFS = 'TABLE_CFS'
   NAMESPACES = 'NAMESPACES'
-  CONFIG = 'CONFIG'
-  DATA = 'DATA'
   SERVER_NAME = 'SERVER_NAME'
   LOCALITY_THRESHOLD = 'LOCALITY_THRESHOLD'
   RESTORE_ACL = 'RESTORE_ACL'



[1/2] hbase git commit: HBASE-18616 Shell warns about already initialized constants at startup

2017-08-23 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 362a2924d -> 5015913ce
  refs/heads/branch-1.4 e293da211 -> f73b76287


HBASE-18616 Shell warns about already initialized constants at startup

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5015913c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5015913c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5015913c

Branch: refs/heads/branch-1
Commit: 5015913cee9ec4ec8347bdaf9d8771f700f792be
Parents: 362a292
Author: Guangxu Cheng 
Authored: Thu Aug 17 09:55:26 2017 +0800
Committer: Andrew Purtell 
Committed: Wed Aug 23 18:08:01 2017 -0700

--
 hbase-shell/src/main/ruby/hbase.rb | 5 -
 1 file changed, 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5015913c/hbase-shell/src/main/ruby/hbase.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase.rb 
b/hbase-shell/src/main/ruby/hbase.rb
index 88a6f04..0fa1649 100644
--- a/hbase-shell/src/main/ruby/hbase.rb
+++ b/hbase-shell/src/main/ruby/hbase.rb
@@ -79,12 +79,7 @@ module HBaseConstants
   TYPE = 'TYPE'
   NONE = 'NONE'
   VALUE = 'VALUE'
-  ENDPOINT_CLASSNAME = 'ENDPOINT_CLASSNAME'
-  CLUSTER_KEY = 'CLUSTER_KEY'
-  TABLE_CFS = 'TABLE_CFS'
   NAMESPACES = 'NAMESPACES'
-  CONFIG = 'CONFIG'
-  DATA = 'DATA'
   SERVER_NAME = 'SERVER_NAME'
   LOCALITY_THRESHOLD = 'LOCALITY_THRESHOLD'
   RESTORE_ACL = 'RESTORE_ACL'



[2/4] hbase git commit: HBASE-18503 Change ***Util and Master to use TableDescriptor and ColumnFamilyDescriptor

2017-08-23 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/b0334863/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
index fce4eaa..979a351 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
@@ -33,9 +33,9 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
@@ -89,7 +89,7 @@ public abstract class TakeSnapshotHandler extends 
EventHandler implements Snapsh
   protected final SnapshotManifest snapshotManifest;
   protected final SnapshotManager snapshotManager;
 
-  protected HTableDescriptor htd;
+  protected TableDescriptor htd;
 
   /**
* @param snapshot descriptor of the snapshot to take
@@ -124,12 +124,12 @@ public abstract class TakeSnapshotHandler extends 
EventHandler implements Snapsh
   "Taking " + snapshot.getType() + " snapshot on table: " + snapshotTable);
   }
 
-  private HTableDescriptor loadTableDescriptor()
+  private TableDescriptor loadTableDescriptor()
   throws FileNotFoundException, IOException {
-HTableDescriptor htd =
+TableDescriptor htd =
   this.master.getTableDescriptors().get(snapshotTable);
 if (htd == null) {
-  throw new IOException("HTableDescriptor missing for " + snapshotTable);
+  throw new IOException("TableDescriptor missing for " + snapshotTable);
 }
 return htd;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b0334863/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
index d4a54bb..b1d1415 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
@@ -26,15 +26,15 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -59,7 +59,7 @@ public class ExpiredMobFileCleaner extends Configured 
implements Tool {
* @param tableName The current table name.
* @param family The current family.
*/
-  public void cleanExpiredMobFiles(String tableName, HColumnDescriptor family) 
throws IOException {
+  public void cleanExpiredMobFiles(String tableName, ColumnFamilyDescriptor 
family) throws IOException {
 Configuration conf = getConf();
 TableName tn = TableName.valueOf(tableName);
 FileSystem fs = FileSystem.get(conf);
@@ -98,8 +98,8 @@ public class ExpiredMobFileCleaner extends Configured 
implements Tool {
 Connection connection = ConnectionFactory.createConnection(getConf());
 Admin admin = connection.getAdmin();
 try {
-  HTableDescriptor htd = admin.getTableDescriptor(tn);
-  HColumnDescriptor family = htd.getFamily(Bytes.toBytes(familyName));
+  TableDescriptor htd = admin.listTableDescriptor(tn);
+  ColumnFamilyDescriptor family = 
htd.getColumnFamily(Bytes.toBytes(familyName));
   if (family == null || !family.isMobEnabled()) {
 throw new IOException("Column family " + familyName + " is not a MOB 
column family");
   }


[1/4] hbase git commit: HBASE-18503 Change ***Util and Master to use TableDescriptor and ColumnFamilyDescriptor

2017-08-23 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/master ae3b51a7a -> b03348630


http://git-wip-us.apache.org/repos/asf/hbase/blob/b0334863/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 18b1114..28d2a24 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -71,6 +71,7 @@ import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.ImmutableHTableDescriptor;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
@@ -466,10 +467,20 @@ public class HBaseTestingUtility extends 
HBaseCommonTestingUtility {
 
   /**
* @return META table descriptor
+   * @deprecated since 2.0 version and will be removed in 3.0 version.
+   * use {@link #getMetaDescriptor()}
*/
+  @Deprecated
   public HTableDescriptor getMetaTableDescriptor() {
+return new 
ImmutableHTableDescriptor(getMetaTableDescriptorBuilder().build());
+  }
+
+  /**
+   * @return META table descriptor
+   */
+  public TableDescriptorBuilder getMetaTableDescriptorBuilder() {
 try {
-  return new FSTableDescriptors(conf).get(TableName.META_TABLE_NAME);
+  return FSTableDescriptors.createMetaTableDescriptorBuilder(conf);
 } catch (IOException e) {
   throw new RuntimeException("Unable to create META table descriptor", e);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b0334863/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
index 7457f43..95997f2 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
@@ -25,10 +25,13 @@ import java.io.IOException;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
-import org.junit.*;
+import org.junit.Rule;
+import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
@@ -46,9 +49,9 @@ public class TestFSTableDescriptorForceCreation {
 FileSystem fs = FileSystem.get(UTIL.getConfiguration());
 Path rootdir = new Path(UTIL.getDataTestDir(), name);
 FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), 
fs, rootdir);
-HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
 
-assertTrue("Should create new table descriptor", 
fstd.createTableDescriptor(htd, false));
+assertTrue("Should create new table descriptor",
+  
fstd.createTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build(),
 false));
   }
 
   @Test
@@ -59,7 +62,7 @@ public class TestFSTableDescriptorForceCreation {
 // Cleanup old tests if any detritus laying around.
 Path rootdir = new Path(UTIL.getDataTestDir(), name);
 FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), 
fs, rootdir);
-HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
+TableDescriptor htd = 
TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
 fstd.add(htd);
 assertFalse("Should not create new table descriptor", 
fstd.createTableDescriptor(htd, false));
   }
@@ -71,7 +74,7 @@ public class TestFSTableDescriptorForceCreation {
 FileSystem fs = FileSystem.get(UTIL.getConfiguration());
 Path rootdir = new Path(UTIL.getDataTestDir(), name);
 FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), 
fs, rootdir);
-HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
+TableDescriptor htd = 
TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
 fstd.createTableDescriptor(htd, false);
 assertTrue("Should create new table descriptor",
 fstd.createTableDescriptor(htd, true));


[4/4] hbase git commit: HBASE-18503 Change ***Util and Master to use TableDescriptor and ColumnFamilyDescriptor

2017-08-23 Thread chia7712
HBASE-18503 Change ***Util and Master to use TableDescriptor and 
ColumnFamilyDescriptor


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b0334863
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b0334863
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b0334863

Branch: refs/heads/master
Commit: b03348630c145aa6cc29f0f295442c6deb28a28e
Parents: ae3b51a
Author: Chia-Ping Tsai 
Authored: Thu Aug 24 12:56:58 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Thu Aug 24 12:56:58 2017 +0800

--
 .../hadoop/hbase/backup/util/BackupUtils.java   |   4 +-
 .../hadoop/hbase/backup/util/RestoreTool.java   |  48 ++--
 .../apache/hadoop/hbase/HColumnDescriptor.java  |  11 +-
 .../apache/hadoop/hbase/HTableDescriptor.java   |  11 +-
 .../client/ColumnFamilyDescriptorBuilder.java   |  13 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |  41 +--
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |   2 +-
 .../hbase/client/TableDescriptorBuilder.java|  20 +-
 .../hbase/shaded/protobuf/ProtobufUtil.java | 101 +---
 .../hbase/shaded/protobuf/RequestConverter.java |  18 +-
 .../apache/hadoop/hbase/TableDescriptors.java   |  15 +-
 .../hbase/client/ClientSideRegionScanner.java   |   3 +-
 .../hbase/client/TableSnapshotScanner.java  |   3 +-
 .../mapreduce/TableSnapshotInputFormatImpl.java |  18 +-
 .../hadoop/hbase/master/CatalogJanitor.java |  13 +-
 .../master/ExpiredMobFileCleanerChore.java  |  10 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  86 +++
 .../hadoop/hbase/master/MasterFileSystem.java   |  24 +-
 .../hbase/master/MasterMobCompactionThread.java |  10 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |  25 +-
 .../hadoop/hbase/master/MasterServices.java |  15 +-
 .../hadoop/hbase/master/MobCompactionChore.java |  10 +-
 .../hadoop/hbase/master/TableStateManager.java  |   6 +-
 .../assignment/MergeTableRegionsProcedure.java  |  10 +-
 .../master/assignment/RegionStateStore.java |  12 +-
 .../master/balancer/RegionLocationFinder.java   |  12 +-
 .../master/cleaner/ReplicationMetaCleaner.java  |  10 +-
 .../procedure/AddColumnFamilyProcedure.java |  50 ++--
 .../procedure/CloneSnapshotProcedure.java   |  51 ++--
 .../master/procedure/CreateTableProcedure.java  |  66 ++---
 .../procedure/DeleteColumnFamilyProcedure.java  |  37 +--
 .../procedure/ModifyColumnFamilyProcedure.java  |  43 ++--
 .../master/procedure/ModifyTableProcedure.java  |  75 +++---
 .../procedure/RestoreSnapshotProcedure.java |  34 +--
 .../procedure/TruncateTableProcedure.java   |  22 +-
 .../master/snapshot/MasterSnapshotVerifier.java |   8 +-
 .../hbase/master/snapshot/SnapshotManager.java  |  29 ++-
 .../master/snapshot/TakeSnapshotHandler.java|  10 +-
 .../hadoop/hbase/mob/ExpiredMobFileCleaner.java |  10 +-
 .../org/apache/hadoop/hbase/mob/MobUtils.java   |  27 +-
 .../hbase/mob/compactions/MobCompactor.java |   6 +-
 .../compactions/PartitionedMobCompactor.java|   4 +-
 .../hbase/regionserver/CompactionTool.java  |  16 +-
 .../hbase/regionserver/HRegionFileSystem.java   |   8 +-
 .../hbase/regionserver/HRegionServer.java   |   8 +-
 .../hbase/regionserver/RSRpcServices.java   |   8 +-
 .../regionserver/handler/OpenMetaHandler.java   |   6 +-
 .../handler/OpenPriorityRegionHandler.java  |   5 +-
 .../regionserver/handler/OpenRegionHandler.java |  10 +-
 .../RegionReplicaReplicationEndpoint.java   |   4 +-
 .../hbase/snapshot/RestoreSnapshotHelper.java   |  14 +-
 .../hadoop/hbase/snapshot/SnapshotManifest.java |  18 +-
 .../hadoop/hbase/util/FSTableDescriptors.java   | 258 ++-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |  68 ++---
 .../hadoop/hbase/util/HBaseFsckRepair.java  |   4 +-
 .../hadoop/hbase/util/ModifyRegionUtils.java|  33 ++-
 .../hadoop/hbase/HBaseTestingUtility.java   |  13 +-
 .../TestFSTableDescriptorForceCreation.java |  13 +-
 .../TestHColumnDescriptorDefaultVersions.java   |  12 +-
 .../hbase/client/TestAsyncTableAdminApi.java|   5 +-
 .../hbase/master/MockNoopMasterServices.java|  13 +-
 .../master/assignment/MockMasterServices.java   |  24 +-
 .../MasterProcedureTestingUtility.java  |  40 +--
 .../procedure/TestCreateTableProcedure.java |  33 ++-
 .../TestMasterFailoverWithProcedures.java   |   4 +-
 .../procedure/TestMasterProcedureWalLease.java  |   4 +-
 ...stTableDescriptorModificationFromClient.java |   7 +-
 .../TestPartitionedMobCompactor.java|   3 +-
 .../regionserver/TestGetClosestAtOrBefore.java  |   6 +-
 .../TestRegionMergeTransactionOnCluster.java|  14 +-
 .../regionserver/TestRegionServerNoMaster.java  |   4 +-
 .../hbase/security/access/SecureTestUtil.java   |  21 +-
 .../hbase/snapshot/MobSnapshotTestingUtils.java |  65 ++---
 

[3/4] hbase git commit: HBASE-18503 Change ***Util and Master to use TableDescriptor and ColumnFamilyDescriptor

2017-08-23 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/b0334863/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
index 42a5445..476c65c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
@@ -24,11 +24,11 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ScheduledChore;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.locking.LockManager;
 import org.apache.hadoop.hbase.master.locking.LockProcedure;
@@ -55,8 +55,8 @@ public class MobCompactionChore extends ScheduledChore {
   protected void chore() {
 try {
   TableDescriptors htds = master.getTableDescriptors();
-  Map map = htds.getAll();
-  for (HTableDescriptor htd : map.values()) {
+  Map map = htds.getAll();
+  for (TableDescriptor htd : map.values()) {
 if (!master.getTableStateManager().isTableState(htd.getTableName(),
   TableState.State.ENABLED)) {
   continue;
@@ -66,7 +66,7 @@ public class MobCompactionChore extends ScheduledChore {
   final LockManager.MasterLock lock = 
master.getLockManager().createMasterLock(
   MobUtils.getTableLockName(htd.getTableName()), 
LockProcedure.LockType.EXCLUSIVE,
   this.getClass().getName() + ": mob compaction");
-  for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
+  for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
 if (!hcd.isMobEnabled()) {
   continue;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b0334863/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
index 18f6856..fb83971 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
@@ -24,12 +24,12 @@ import java.util.Set;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
 import edu.umd.cs.findbugs.annotations.NonNull;
 import edu.umd.cs.findbugs.annotations.Nullable;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
@@ -198,7 +198,7 @@ public class TableStateManager {
 
   public static void fixTableStates(TableDescriptors tableDescriptors, 
Connection connection)
   throws IOException {
-final Map allDescriptors =
+final Map allDescriptors =
 tableDescriptors.getAllDescriptors();
 final Map states = new HashMap<>();
 MetaTableAccessor.fullScanTables(connection, new 
MetaTableAccessor.Visitor() {
@@ -210,7 +210,7 @@ public class TableStateManager {
 return true;
   }
 });
-for (Map.Entry entry : 
allDescriptors.entrySet()) {
+for (Map.Entry entry : allDescriptors.entrySet()) 
{
   String table = entry.getKey();
   if (table.equals(TableName.META_TABLE_NAME.getNameAsString()))
 continue;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b0334863/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index 9aaf297..c398c9a 100644
--- 

[4/4] hbase git commit: HBASE-17442 Move most of the replication related classes from hbase-client to new hbase-replication package. (Guanghao Zhang).

2017-08-23 Thread appy
HBASE-17442 Move most of the replication related classes from hbase-client to 
new hbase-replication package. (Guanghao Zhang).

Change-Id: Ie0e24cc617ab4bf56de8b1747062d1b78a5d4669


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e2ce252b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e2ce252b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e2ce252b

Branch: refs/heads/branch-2
Commit: e2ce252b5921c2ac8c44548883e1b1c31cb536f2
Parents: 205016c
Author: Apekshit Sharma 
Authored: Thu Aug 17 20:59:35 2017 -0700
Committer: Apekshit Sharma 
Committed: Wed Aug 23 22:18:26 2017 -0700

--
 .../client/replication/ReplicationAdmin.java| 102 +---
 .../hbase/replication/ReplicationFactory.java   |  66 ---
 .../hbase/replication/ReplicationListener.java  |  51 --
 .../hbase/replication/ReplicationPeer.java  |  89 ---
 .../ReplicationPeerConfigListener.java  |  33 --
 .../replication/ReplicationPeerZKImpl.java  | 318 ---
 .../hbase/replication/ReplicationPeers.java | 177 --
 .../replication/ReplicationPeersZKImpl.java | 546 ---
 .../hbase/replication/ReplicationQueueInfo.java | 130 -
 .../hbase/replication/ReplicationQueues.java| 160 --
 .../replication/ReplicationQueuesArguments.java |  70 ---
 .../replication/ReplicationQueuesClient.java|  93 
 .../ReplicationQueuesClientArguments.java   |  40 --
 .../ReplicationQueuesClientZKImpl.java  | 175 --
 .../replication/ReplicationQueuesZKImpl.java| 407 --
 .../replication/ReplicationStateZKBase.java | 155 --
 .../hbase/replication/ReplicationTableBase.java | 441 ---
 .../hbase/replication/ReplicationTracker.java   |  49 --
 .../replication/ReplicationTrackerZKImpl.java   | 250 -
 .../TableBasedReplicationQueuesClientImpl.java  | 112 
 .../TableBasedReplicationQueuesImpl.java| 450 ---
 .../apache/hadoop/hbase/zookeeper/ZKUtil.java   |  14 +-
 .../hadoop/hbase/zookeeper/ZNodePaths.java  |  22 +-
 hbase-replication/pom.xml   | 264 +
 .../hbase/replication/ReplicationFactory.java   |  66 +++
 .../hbase/replication/ReplicationListener.java  |  51 ++
 .../hbase/replication/ReplicationPeer.java  |  89 +++
 .../ReplicationPeerConfigListener.java  |  33 ++
 .../replication/ReplicationPeerZKImpl.java  | 318 +++
 .../hbase/replication/ReplicationPeers.java | 177 ++
 .../replication/ReplicationPeersZKImpl.java | 546 +++
 .../hbase/replication/ReplicationQueueInfo.java | 130 +
 .../hbase/replication/ReplicationQueues.java| 160 ++
 .../replication/ReplicationQueuesArguments.java |  70 +++
 .../replication/ReplicationQueuesClient.java|  93 
 .../ReplicationQueuesClientArguments.java   |  40 ++
 .../ReplicationQueuesClientZKImpl.java  | 175 ++
 .../replication/ReplicationQueuesZKImpl.java| 407 ++
 .../replication/ReplicationStateZKBase.java | 155 ++
 .../hbase/replication/ReplicationTableBase.java | 441 +++
 .../hbase/replication/ReplicationTracker.java   |  49 ++
 .../replication/ReplicationTrackerZKImpl.java   | 250 +
 .../TableBasedReplicationQueuesClientImpl.java  | 112 
 .../TableBasedReplicationQueuesImpl.java| 450 +++
 hbase-server/pom.xml|   4 +
 .../replication/BaseReplicationEndpoint.java|   2 -
 pom.xml |   6 +
 47 files changed, 4113 insertions(+), 3925 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e2ce252b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
index 752d18c..615a79d 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
@@ -26,37 +26,22 @@ import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
-import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeMap;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import 

[1/4] hbase git commit: HBASE-17442 Move most of the replication related classes from hbase-client to new hbase-replication package. (Guanghao Zhang).

2017-08-23 Thread appy
Repository: hbase
Updated Branches:
  refs/heads/branch-2 205016ca7 -> e2ce252b5


http://git-wip-us.apache.org/repos/asf/hbase/blob/e2ce252b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
new file mode 100644
index 000..4733706
--- /dev/null
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
@@ -0,0 +1,407 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.zookeeper.KeeperException;
+
+/**
+ * This class provides an implementation of the
+ * interface using ZooKeeper. The
+ * base znode that this class works at is the myQueuesZnode. The myQueuesZnode 
contains a list of
+ * all outstanding WAL files on this region server that need to be replicated. 
The myQueuesZnode is
+ * the regionserver name (a concatenation of the region server’s hostname, 
client port and start
+ * code). For example:
+ *
+ * /hbase/replication/rs/hostname.example.org,6020,1234
+ *
+ * Within this znode, the region server maintains a set of WAL replication 
queues. These queues are
+ * represented by child znodes named using there give queue id. For example:
+ *
+ * /hbase/replication/rs/hostname.example.org,6020,1234/1
+ * /hbase/replication/rs/hostname.example.org,6020,1234/2
+ *
+ * Each queue has one child znode for every WAL that still needs to be 
replicated. The value of
+ * these WAL child znodes is the latest position that has been replicated. 
This position is updated
+ * every time a WAL entry is replicated. For example:
+ *
+ * /hbase/replication/rs/hostname.example.org,6020,1234/1/23522342.23422 
[VALUE: 254]
+ */
+@InterfaceAudience.Private
+public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements 
ReplicationQueues {
+
+  /** Znode containing all replication queues for this region server. */
+  private String myQueuesZnode;
+
+  private static final Log LOG = 
LogFactory.getLog(ReplicationQueuesZKImpl.class);
+
+  public ReplicationQueuesZKImpl(ReplicationQueuesArguments args) {
+this(args.getZk(), args.getConf(), args.getAbortable());
+  }
+
+  public ReplicationQueuesZKImpl(final ZooKeeperWatcher zk, Configuration conf,
+  Abortable abortable) {
+super(zk, conf, abortable);
+  }
+
+  @Override
+  public void init(String serverName) throws ReplicationException {
+this.myQueuesZnode = ZKUtil.joinZNode(this.queuesZNode, serverName);
+try {
+  if (ZKUtil.checkExists(this.zookeeper, this.myQueuesZnode) < 0) {
+ZKUtil.createWithParents(this.zookeeper, this.myQueuesZnode);
+  }
+} catch (KeeperException e) {
+  throw new ReplicationException("Could not initialize replication 
queues.", e);
+}
+if (conf.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY,
+  HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT)) {
+  try {
+if (ZKUtil.checkExists(this.zookeeper, this.hfileRefsZNode) < 0) {
+  ZKUtil.createWithParents(this.zookeeper, 

[2/4] hbase git commit: HBASE-17442 Move most of the replication related classes from hbase-client to new hbase-replication package. (Guanghao Zhang).

2017-08-23 Thread appy
http://git-wip-us.apache.org/repos/asf/hbase/blob/e2ce252b/hbase-replication/pom.xml
--
diff --git a/hbase-replication/pom.xml b/hbase-replication/pom.xml
new file mode 100644
index 000..c4db874
--- /dev/null
+++ b/hbase-replication/pom.xml
@@ -0,0 +1,264 @@
+
+http://maven.apache.org/POM/4.0.0; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd;>
+  
+  4.0.0
+  
+hbase
+org.apache.hbase
+2.0.0-alpha3-SNAPSHOT
+..
+  
+  hbase-replication
+  Apache HBase - Replication
+  HBase Replication Support
+
+  
+
+  
+org.apache.maven.plugins
+maven-site-plugin
+
+  true
+
+  
+  
+
+maven-assembly-plugin
+
+  true
+
+  
+  
+maven-surefire-plugin
+
+
+  
+secondPartTestsExecution
+test
+
+  test
+
+
+  true
+
+  
+
+  
+  
+  
+org.apache.maven.plugins
+maven-source-plugin
+  
+
+
+  
+
+
+  org.eclipse.m2e
+  lifecycle-mapping
+  1.0.0
+  
+
+  
+
+  
+org.apache.maven.plugins
+maven-compiler-plugin
+[3.2,)
+
+  compile
+
+  
+  
+
+  
+
+  
+
+  
+
+  
+
+  
+
+  
+
+
+  org.apache.hbase
+  hbase-annotations
+  
+
+  jdk.tools
+  jdk.tools
+
+  
+
+
+  org.apache.hbase
+  hbase-annotations
+  test-jar
+  test
+
+
+  org.apache.hbase
+  hbase-client
+
+
+  org.apache.hbase
+  hbase-common
+
+
+  org.apache.hbase
+  hbase-common
+  test-jar
+
+
+
+  commons-codec
+  commons-codec
+
+
+  commons-io
+  commons-io
+
+
+  commons-lang
+  commons-lang
+
+
+  commons-logging
+  commons-logging
+
+
+  com.google.protobuf
+  protobuf-java
+
+
+  org.apache.zookeeper
+  zookeeper
+
+
+  log4j
+  log4j
+  test
+
+  
+
+  
+
+
+  hadoop-2.0
+  
+
+
+!hadoop.profile
+
+  
+  
+
+   com.github.stephenc.findbugs
+   findbugs-annotations
+   true
+
+
+  org.apache.hadoop
+  hadoop-auth
+
+
+  org.apache.hadoop
+  hadoop-common
+  
+
+  net.java.dev.jets3t
+  jets3t
+
+
+  javax.servlet.jsp
+  jsp-api
+
+
+  org.mortbay.jetty
+  jetty
+
+
+  com.sun.jersey
+  jersey-server
+
+
+  com.sun.jersey
+  jersey-core
+
+
+  com.sun.jersey
+  jersey-json
+
+
+  javax.servlet
+  servlet-api
+
+
+  tomcat
+  jasper-compiler
+
+
+  tomcat
+  jasper-runtime
+
+
+  com.google.code.findbugs
+  jsr305
+
+  
+
+  
+
+
+
+
+  hadoop-3.0
+  
+
+  hadoop.profile
+  3.0
+
+  
+  
+3.0-SNAPSHOT
+  
+  
+
+  org.apache.hadoop
+  hadoop-auth
+
+
+  org.apache.hadoop
+  hadoop-common
+
+  
+
+  
+

http://git-wip-us.apache.org/repos/asf/hbase/blob/e2ce252b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
new file mode 100644
index 000..8506cbb
--- /dev/null
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
@@ -0,0 +1,66 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional 

[3/4] hbase git commit: HBASE-17442 Move most of the replication related classes from hbase-client to new hbase-replication package. (Guanghao Zhang).

2017-08-23 Thread appy
http://git-wip-us.apache.org/repos/asf/hbase/blob/e2ce252b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
deleted file mode 100644
index 1981131..000
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.replication;
-
-import java.util.List;
-import java.util.Set;
-
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableSet;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.zookeeper.ZKUtil;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.data.Stat;
-
-@InterfaceAudience.Private
-public class ReplicationQueuesClientZKImpl extends ReplicationStateZKBase 
implements
-ReplicationQueuesClient {
-
-  Log LOG = LogFactory.getLog(ReplicationQueuesClientZKImpl.class);
-
-  public ReplicationQueuesClientZKImpl(ReplicationQueuesClientArguments args) {
-this(args.getZk(), args.getConf(), args.getAbortable());
-  }
-
-  public ReplicationQueuesClientZKImpl(final ZooKeeperWatcher zk, 
Configuration conf,
-  Abortable abortable) {
-super(zk, conf, abortable);
-  }
-
-  @Override
-  public void init() throws ReplicationException {
-try {
-  if (ZKUtil.checkExists(this.zookeeper, this.queuesZNode) < 0) {
-ZKUtil.createWithParents(this.zookeeper, this.queuesZNode);
-  }
-} catch (KeeperException e) {
-  throw new ReplicationException("Internal error while initializing a 
queues client", e);
-}
-  }
-
-  @Override
-  public List getLogsInQueue(String serverName, String queueId) throws 
KeeperException {
-String znode = ZKUtil.joinZNode(this.queuesZNode, serverName);
-znode = ZKUtil.joinZNode(znode, queueId);
-List result = null;
-try {
-  result = ZKUtil.listChildrenNoWatch(this.zookeeper, znode);
-} catch (KeeperException e) {
-  this.abortable.abort("Failed to get list of wals for queueId=" + queueId
-  + " and serverName=" + serverName, e);
-  throw e;
-}
-return result;
-  }
-
-  @Override
-  public List getAllQueues(String serverName) throws KeeperException {
-String znode = ZKUtil.joinZNode(this.queuesZNode, serverName);
-List result = null;
-try {
-  result = ZKUtil.listChildrenNoWatch(this.zookeeper, znode);
-} catch (KeeperException e) {
-  this.abortable.abort("Failed to get list of queues for serverName=" + 
serverName, e);
-  throw e;
-}
-return result;
-  }
-
-  @Override
-  public Set getAllWALs() throws KeeperException {
-/**
- * Load all wals in all replication queues from ZK. This method guarantees 
to return a
- * snapshot which contains all WALs in the zookeeper at the start of this 
call even there
- * is concurrent queue failover. However, some newly created WALs during 
the call may
- * not be included.
- */
-for (int retry = 0; ; retry++) {
-  int v0 = getQueuesZNodeCversion();
-  List rss = getListOfReplicators();
-  if (rss == null || rss.isEmpty()) {
-LOG.debug("Didn't find any region server that replicates, won't 
prevent any deletions.");
-return ImmutableSet.of();
-  }
-  Set wals = Sets.newHashSet();
-  for (String rs : rss) {
-List listOfPeers = getAllQueues(rs);
-// if rs just died, this will be null
-if (listOfPeers == null) {
-  continue;
-}
-for (String id : listOfPeers) {
-  List 

[5/8] hbase git commit: HBASE-18503 Change ***Util and Master to use TableDescriptor and ColumnFamilyDescriptor

2017-08-23 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 18b1114..28d2a24 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -71,6 +71,7 @@ import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.ImmutableHTableDescriptor;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
@@ -466,10 +467,20 @@ public class HBaseTestingUtility extends 
HBaseCommonTestingUtility {
 
   /**
* @return META table descriptor
+   * @deprecated since 2.0 version and will be removed in 3.0 version.
+   * use {@link #getMetaDescriptor()}
*/
+  @Deprecated
   public HTableDescriptor getMetaTableDescriptor() {
+return new 
ImmutableHTableDescriptor(getMetaTableDescriptorBuilder().build());
+  }
+
+  /**
+   * @return META table descriptor
+   */
+  public TableDescriptorBuilder getMetaTableDescriptorBuilder() {
 try {
-  return new FSTableDescriptors(conf).get(TableName.META_TABLE_NAME);
+  return FSTableDescriptors.createMetaTableDescriptorBuilder(conf);
 } catch (IOException e) {
   throw new RuntimeException("Unable to create META table descriptor", e);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
index 7457f43..95997f2 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
@@ -25,10 +25,13 @@ import java.io.IOException;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
-import org.junit.*;
+import org.junit.Rule;
+import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
@@ -46,9 +49,9 @@ public class TestFSTableDescriptorForceCreation {
 FileSystem fs = FileSystem.get(UTIL.getConfiguration());
 Path rootdir = new Path(UTIL.getDataTestDir(), name);
 FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), 
fs, rootdir);
-HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
 
-assertTrue("Should create new table descriptor", 
fstd.createTableDescriptor(htd, false));
+assertTrue("Should create new table descriptor",
+  
fstd.createTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build(),
 false));
   }
 
   @Test
@@ -59,7 +62,7 @@ public class TestFSTableDescriptorForceCreation {
 // Cleanup old tests if any detritus laying around.
 Path rootdir = new Path(UTIL.getDataTestDir(), name);
 FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), 
fs, rootdir);
-HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
+TableDescriptor htd = 
TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
 fstd.add(htd);
 assertFalse("Should not create new table descriptor", 
fstd.createTableDescriptor(htd, false));
   }
@@ -71,7 +74,7 @@ public class TestFSTableDescriptorForceCreation {
 FileSystem fs = FileSystem.get(UTIL.getConfiguration());
 Path rootdir = new Path(UTIL.getDataTestDir(), name);
 FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), 
fs, rootdir);
-HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
+TableDescriptor htd = 
TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
 fstd.createTableDescriptor(htd, false);
 assertTrue("Should create new table descriptor",
 fstd.createTableDescriptor(htd, true));


[8/8] hbase git commit: HBASE-18503 Change ***Util and Master to use TableDescriptor and ColumnFamilyDescriptor

2017-08-23 Thread chia7712
HBASE-18503 Change ***Util and Master to use TableDescriptor and 
ColumnFamilyDescriptor


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/25ff9d0b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/25ff9d0b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/25ff9d0b

Branch: refs/heads/master
Commit: 25ff9d0bbf36a68cdac99035c8d5ab1eb889ceb9
Parents: 12f2b02
Author: Chia-Ping Tsai 
Authored: Thu Aug 24 13:03:38 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Thu Aug 24 13:03:38 2017 +0800

--
 .../hadoop/hbase/backup/util/BackupUtils.java   |   4 +-
 .../hadoop/hbase/backup/util/RestoreTool.java   |  48 ++--
 .../apache/hadoop/hbase/HColumnDescriptor.java  |  11 +-
 .../apache/hadoop/hbase/HTableDescriptor.java   |  11 +-
 .../client/ColumnFamilyDescriptorBuilder.java   |  13 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |  41 +--
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |   2 +-
 .../hbase/client/TableDescriptorBuilder.java|  20 +-
 .../hbase/shaded/protobuf/ProtobufUtil.java | 101 +---
 .../hbase/shaded/protobuf/RequestConverter.java |  18 +-
 .../apache/hadoop/hbase/TableDescriptors.java   |  15 +-
 .../hbase/client/ClientSideRegionScanner.java   |   3 +-
 .../hbase/client/TableSnapshotScanner.java  |   3 +-
 .../mapreduce/TableSnapshotInputFormatImpl.java |  18 +-
 .../hadoop/hbase/master/CatalogJanitor.java |  13 +-
 .../master/ExpiredMobFileCleanerChore.java  |  10 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  86 +++
 .../hadoop/hbase/master/MasterFileSystem.java   |  24 +-
 .../hbase/master/MasterMobCompactionThread.java |  10 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |  25 +-
 .../hadoop/hbase/master/MasterServices.java |  15 +-
 .../hadoop/hbase/master/MobCompactionChore.java |  10 +-
 .../hadoop/hbase/master/TableStateManager.java  |   6 +-
 .../assignment/MergeTableRegionsProcedure.java  |  10 +-
 .../master/assignment/RegionStateStore.java |  12 +-
 .../master/balancer/RegionLocationFinder.java   |  12 +-
 .../master/cleaner/ReplicationMetaCleaner.java  |  10 +-
 .../procedure/AddColumnFamilyProcedure.java |  50 ++--
 .../procedure/CloneSnapshotProcedure.java   |  51 ++--
 .../master/procedure/CreateTableProcedure.java  |  66 ++---
 .../procedure/DeleteColumnFamilyProcedure.java  |  37 +--
 .../procedure/ModifyColumnFamilyProcedure.java  |  43 ++--
 .../master/procedure/ModifyTableProcedure.java  |  75 +++---
 .../procedure/RestoreSnapshotProcedure.java |  34 +--
 .../procedure/TruncateTableProcedure.java   |  22 +-
 .../master/snapshot/MasterSnapshotVerifier.java |   8 +-
 .../hbase/master/snapshot/SnapshotManager.java  |  29 ++-
 .../master/snapshot/TakeSnapshotHandler.java|  10 +-
 .../hadoop/hbase/mob/ExpiredMobFileCleaner.java |  10 +-
 .../org/apache/hadoop/hbase/mob/MobUtils.java   |  27 +-
 .../hbase/mob/compactions/MobCompactor.java |   6 +-
 .../compactions/PartitionedMobCompactor.java|   4 +-
 .../hbase/regionserver/CompactionTool.java  |  16 +-
 .../hbase/regionserver/HRegionFileSystem.java   |   8 +-
 .../hbase/regionserver/HRegionServer.java   |   8 +-
 .../hbase/regionserver/RSRpcServices.java   |   8 +-
 .../regionserver/handler/OpenMetaHandler.java   |   6 +-
 .../handler/OpenPriorityRegionHandler.java  |   5 +-
 .../regionserver/handler/OpenRegionHandler.java |  10 +-
 .../RegionReplicaReplicationEndpoint.java   |   4 +-
 .../hbase/snapshot/RestoreSnapshotHelper.java   |  14 +-
 .../hadoop/hbase/snapshot/SnapshotManifest.java |  18 +-
 .../hadoop/hbase/util/FSTableDescriptors.java   | 258 ++-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |  68 ++---
 .../hadoop/hbase/util/HBaseFsckRepair.java  |   4 +-
 .../hadoop/hbase/util/ModifyRegionUtils.java|  33 ++-
 .../hadoop/hbase/HBaseTestingUtility.java   |  13 +-
 .../TestFSTableDescriptorForceCreation.java |  13 +-
 .../TestHColumnDescriptorDefaultVersions.java   |  12 +-
 .../hbase/client/TestAsyncTableAdminApi.java|   5 +-
 .../hbase/master/MockNoopMasterServices.java|  13 +-
 .../master/assignment/MockMasterServices.java   |  24 +-
 .../MasterProcedureTestingUtility.java  |  40 +--
 .../procedure/TestCreateTableProcedure.java |  33 ++-
 .../TestMasterFailoverWithProcedures.java   |   4 +-
 .../procedure/TestMasterProcedureWalLease.java  |   4 +-
 ...stTableDescriptorModificationFromClient.java |   7 +-
 .../TestPartitionedMobCompactor.java|   3 +-
 .../regionserver/TestGetClosestAtOrBefore.java  |   6 +-
 .../TestRegionMergeTransactionOnCluster.java|  14 +-
 .../regionserver/TestRegionServerNoMaster.java  |   4 +-
 .../hbase/security/access/SecureTestUtil.java   |  21 +-
 .../hbase/snapshot/MobSnapshotTestingUtils.java |  65 ++---
 

[2/8] hbase git commit: Revert "HBASE-18503 Change ***Util and Master to use TableDescriptor and ColumnFamilyDescriptor" Wrong author information This reverts commit b03348630c145aa6cc29f0f295442c6deb

2017-08-23 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
index 979a351..fce4eaa 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
@@ -33,9 +33,9 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
@@ -89,7 +89,7 @@ public abstract class TakeSnapshotHandler extends 
EventHandler implements Snapsh
   protected final SnapshotManifest snapshotManifest;
   protected final SnapshotManager snapshotManager;
 
-  protected TableDescriptor htd;
+  protected HTableDescriptor htd;
 
   /**
* @param snapshot descriptor of the snapshot to take
@@ -124,12 +124,12 @@ public abstract class TakeSnapshotHandler extends 
EventHandler implements Snapsh
   "Taking " + snapshot.getType() + " snapshot on table: " + snapshotTable);
   }
 
-  private TableDescriptor loadTableDescriptor()
+  private HTableDescriptor loadTableDescriptor()
   throws FileNotFoundException, IOException {
-TableDescriptor htd =
+HTableDescriptor htd =
   this.master.getTableDescriptors().get(snapshotTable);
 if (htd == null) {
-  throw new IOException("TableDescriptor missing for " + snapshotTable);
+  throw new IOException("HTableDescriptor missing for " + snapshotTable);
 }
 return htd;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
index b1d1415..d4a54bb 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
@@ -26,15 +26,15 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -59,7 +59,7 @@ public class ExpiredMobFileCleaner extends Configured 
implements Tool {
* @param tableName The current table name.
* @param family The current family.
*/
-  public void cleanExpiredMobFiles(String tableName, ColumnFamilyDescriptor 
family) throws IOException {
+  public void cleanExpiredMobFiles(String tableName, HColumnDescriptor family) 
throws IOException {
 Configuration conf = getConf();
 TableName tn = TableName.valueOf(tableName);
 FileSystem fs = FileSystem.get(conf);
@@ -98,8 +98,8 @@ public class ExpiredMobFileCleaner extends Configured 
implements Tool {
 Connection connection = ConnectionFactory.createConnection(getConf());
 Admin admin = connection.getAdmin();
 try {
-  TableDescriptor htd = admin.listTableDescriptor(tn);
-  ColumnFamilyDescriptor family = 
htd.getColumnFamily(Bytes.toBytes(familyName));
+  HTableDescriptor htd = admin.getTableDescriptor(tn);
+  HColumnDescriptor family = htd.getFamily(Bytes.toBytes(familyName));
   if (family == null || !family.isMobEnabled()) {
 throw new IOException("Column family " + familyName + " is not a MOB 
column family");
   }


[4/8] hbase git commit: Revert "HBASE-18503 Change ***Util and Master to use TableDescriptor and ColumnFamilyDescriptor" Wrong author information This reverts commit b03348630c145aa6cc29f0f295442c6deb

2017-08-23 Thread chia7712
Revert "HBASE-18503 Change ***Util and Master to use TableDescriptor and 
ColumnFamilyDescriptor"
Wrong author information
This reverts commit b03348630c145aa6cc29f0f295442c6deb28a28e.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/12f2b02a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/12f2b02a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/12f2b02a

Branch: refs/heads/master
Commit: 12f2b02a805817bda2800bf1017c6b1c58dba866
Parents: b033486
Author: Chia-Ping Tsai 
Authored: Thu Aug 24 13:02:11 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Thu Aug 24 13:02:11 2017 +0800

--
 .../hadoop/hbase/backup/util/BackupUtils.java   |   4 +-
 .../hadoop/hbase/backup/util/RestoreTool.java   |  48 ++--
 .../apache/hadoop/hbase/HColumnDescriptor.java  |  11 +-
 .../apache/hadoop/hbase/HTableDescriptor.java   |  11 +-
 .../client/ColumnFamilyDescriptorBuilder.java   |  13 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |  41 ++-
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |   2 +-
 .../hbase/client/TableDescriptorBuilder.java|  20 +-
 .../hbase/shaded/protobuf/ProtobufUtil.java | 101 +++-
 .../hbase/shaded/protobuf/RequestConverter.java |  18 +-
 .../apache/hadoop/hbase/TableDescriptors.java   |  15 +-
 .../hbase/client/ClientSideRegionScanner.java   |   3 +-
 .../hbase/client/TableSnapshotScanner.java  |   3 +-
 .../mapreduce/TableSnapshotInputFormatImpl.java |  18 +-
 .../hadoop/hbase/master/CatalogJanitor.java |  13 +-
 .../master/ExpiredMobFileCleanerChore.java  |  10 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  86 ---
 .../hadoop/hbase/master/MasterFileSystem.java   |  24 +-
 .../hbase/master/MasterMobCompactionThread.java |  10 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |  25 +-
 .../hadoop/hbase/master/MasterServices.java |  15 +-
 .../hadoop/hbase/master/MobCompactionChore.java |  10 +-
 .../hadoop/hbase/master/TableStateManager.java  |   6 +-
 .../assignment/MergeTableRegionsProcedure.java  |  10 +-
 .../master/assignment/RegionStateStore.java |  12 +-
 .../master/balancer/RegionLocationFinder.java   |  12 +-
 .../master/cleaner/ReplicationMetaCleaner.java  |  10 +-
 .../procedure/AddColumnFamilyProcedure.java |  50 ++--
 .../procedure/CloneSnapshotProcedure.java   |  51 ++--
 .../master/procedure/CreateTableProcedure.java  |  66 ++---
 .../procedure/DeleteColumnFamilyProcedure.java  |  37 ++-
 .../procedure/ModifyColumnFamilyProcedure.java  |  43 ++--
 .../master/procedure/ModifyTableProcedure.java  |  75 +++---
 .../procedure/RestoreSnapshotProcedure.java |  34 +--
 .../procedure/TruncateTableProcedure.java   |  22 +-
 .../master/snapshot/MasterSnapshotVerifier.java |   8 +-
 .../hbase/master/snapshot/SnapshotManager.java  |  29 +--
 .../master/snapshot/TakeSnapshotHandler.java|  10 +-
 .../hadoop/hbase/mob/ExpiredMobFileCleaner.java |  10 +-
 .../org/apache/hadoop/hbase/mob/MobUtils.java   |  27 +-
 .../hbase/mob/compactions/MobCompactor.java |   6 +-
 .../compactions/PartitionedMobCompactor.java|   4 +-
 .../hbase/regionserver/CompactionTool.java  |  16 +-
 .../hbase/regionserver/HRegionFileSystem.java   |   8 +-
 .../hbase/regionserver/HRegionServer.java   |   8 +-
 .../hbase/regionserver/RSRpcServices.java   |   8 +-
 .../regionserver/handler/OpenMetaHandler.java   |   6 +-
 .../handler/OpenPriorityRegionHandler.java  |   5 +-
 .../regionserver/handler/OpenRegionHandler.java |  10 +-
 .../RegionReplicaReplicationEndpoint.java   |   4 +-
 .../hbase/snapshot/RestoreSnapshotHelper.java   |  14 +-
 .../hadoop/hbase/snapshot/SnapshotManifest.java |  18 +-
 .../hadoop/hbase/util/FSTableDescriptors.java   | 258 +--
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |  68 +++--
 .../hadoop/hbase/util/HBaseFsckRepair.java  |   4 +-
 .../hadoop/hbase/util/ModifyRegionUtils.java|  33 +--
 .../hadoop/hbase/HBaseTestingUtility.java   |  13 +-
 .../TestFSTableDescriptorForceCreation.java |  13 +-
 .../TestHColumnDescriptorDefaultVersions.java   |  12 +-
 .../hbase/client/TestAsyncTableAdminApi.java|   5 +-
 .../hbase/master/MockNoopMasterServices.java|  13 +-
 .../master/assignment/MockMasterServices.java   |  24 +-
 .../MasterProcedureTestingUtility.java  |  40 ++-
 .../procedure/TestCreateTableProcedure.java |  33 +--
 .../TestMasterFailoverWithProcedures.java   |   4 +-
 .../procedure/TestMasterProcedureWalLease.java  |   4 +-
 ...stTableDescriptorModificationFromClient.java |   7 +-
 .../TestPartitionedMobCompactor.java|   3 +-
 .../regionserver/TestGetClosestAtOrBefore.java  |   6 +-
 .../TestRegionMergeTransactionOnCluster.java|  14 +-
 .../regionserver/TestRegionServerNoMaster.java  |   4 +-
 

[6/8] hbase git commit: HBASE-18503 Change ***Util and Master to use TableDescriptor and ColumnFamilyDescriptor

2017-08-23 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
index fce4eaa..979a351 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
@@ -33,9 +33,9 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
@@ -89,7 +89,7 @@ public abstract class TakeSnapshotHandler extends 
EventHandler implements Snapsh
   protected final SnapshotManifest snapshotManifest;
   protected final SnapshotManager snapshotManager;
 
-  protected HTableDescriptor htd;
+  protected TableDescriptor htd;
 
   /**
* @param snapshot descriptor of the snapshot to take
@@ -124,12 +124,12 @@ public abstract class TakeSnapshotHandler extends 
EventHandler implements Snapsh
   "Taking " + snapshot.getType() + " snapshot on table: " + snapshotTable);
   }
 
-  private HTableDescriptor loadTableDescriptor()
+  private TableDescriptor loadTableDescriptor()
   throws FileNotFoundException, IOException {
-HTableDescriptor htd =
+TableDescriptor htd =
   this.master.getTableDescriptors().get(snapshotTable);
 if (htd == null) {
-  throw new IOException("HTableDescriptor missing for " + snapshotTable);
+  throw new IOException("TableDescriptor missing for " + snapshotTable);
 }
 return htd;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
index d4a54bb..b1d1415 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
@@ -26,15 +26,15 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -59,7 +59,7 @@ public class ExpiredMobFileCleaner extends Configured 
implements Tool {
* @param tableName The current table name.
* @param family The current family.
*/
-  public void cleanExpiredMobFiles(String tableName, HColumnDescriptor family) 
throws IOException {
+  public void cleanExpiredMobFiles(String tableName, ColumnFamilyDescriptor 
family) throws IOException {
 Configuration conf = getConf();
 TableName tn = TableName.valueOf(tableName);
 FileSystem fs = FileSystem.get(conf);
@@ -98,8 +98,8 @@ public class ExpiredMobFileCleaner extends Configured 
implements Tool {
 Connection connection = ConnectionFactory.createConnection(getConf());
 Admin admin = connection.getAdmin();
 try {
-  HTableDescriptor htd = admin.getTableDescriptor(tn);
-  HColumnDescriptor family = htd.getFamily(Bytes.toBytes(familyName));
+  TableDescriptor htd = admin.listTableDescriptor(tn);
+  ColumnFamilyDescriptor family = 
htd.getColumnFamily(Bytes.toBytes(familyName));
   if (family == null || !family.isMobEnabled()) {
 throw new IOException("Column family " + familyName + " is not a MOB 
column family");
   }


[3/8] hbase git commit: Revert "HBASE-18503 Change ***Util and Master to use TableDescriptor and ColumnFamilyDescriptor" Wrong author information This reverts commit b03348630c145aa6cc29f0f295442c6deb

2017-08-23 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
index 476c65c..42a5445 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
@@ -24,11 +24,11 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ScheduledChore;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.locking.LockManager;
 import org.apache.hadoop.hbase.master.locking.LockProcedure;
@@ -55,8 +55,8 @@ public class MobCompactionChore extends ScheduledChore {
   protected void chore() {
 try {
   TableDescriptors htds = master.getTableDescriptors();
-  Map map = htds.getAll();
-  for (TableDescriptor htd : map.values()) {
+  Map map = htds.getAll();
+  for (HTableDescriptor htd : map.values()) {
 if (!master.getTableStateManager().isTableState(htd.getTableName(),
   TableState.State.ENABLED)) {
   continue;
@@ -66,7 +66,7 @@ public class MobCompactionChore extends ScheduledChore {
   final LockManager.MasterLock lock = 
master.getLockManager().createMasterLock(
   MobUtils.getTableLockName(htd.getTableName()), 
LockProcedure.LockType.EXCLUSIVE,
   this.getClass().getName() + ": mob compaction");
-  for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
+  for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
 if (!hcd.isMobEnabled()) {
   continue;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
index fb83971..18f6856 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
@@ -24,12 +24,12 @@ import java.util.Set;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
-import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
 import edu.umd.cs.findbugs.annotations.NonNull;
 import edu.umd.cs.findbugs.annotations.Nullable;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
@@ -198,7 +198,7 @@ public class TableStateManager {
 
   public static void fixTableStates(TableDescriptors tableDescriptors, 
Connection connection)
   throws IOException {
-final Map allDescriptors =
+final Map allDescriptors =
 tableDescriptors.getAllDescriptors();
 final Map states = new HashMap<>();
 MetaTableAccessor.fullScanTables(connection, new 
MetaTableAccessor.Visitor() {
@@ -210,7 +210,7 @@ public class TableStateManager {
 return true;
   }
 });
-for (Map.Entry entry : allDescriptors.entrySet()) 
{
+for (Map.Entry entry : 
allDescriptors.entrySet()) {
   String table = entry.getKey();
   if (table.equals(TableName.META_TABLE_NAME.getNameAsString()))
 continue;

http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index c398c9a..9aaf297 100644
--- 

[1/8] hbase git commit: Revert "HBASE-18503 Change ***Util and Master to use TableDescriptor and ColumnFamilyDescriptor" Wrong author information This reverts commit b03348630c145aa6cc29f0f295442c6deb

2017-08-23 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/master b03348630 -> 25ff9d0bb


http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 28d2a24..18b1114 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -71,7 +71,6 @@ import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.ImmutableHTableDescriptor;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
@@ -467,20 +466,10 @@ public class HBaseTestingUtility extends 
HBaseCommonTestingUtility {
 
   /**
* @return META table descriptor
-   * @deprecated since 2.0 version and will be removed in 3.0 version.
-   * use {@link #getMetaDescriptor()}
*/
-  @Deprecated
   public HTableDescriptor getMetaTableDescriptor() {
-return new 
ImmutableHTableDescriptor(getMetaTableDescriptorBuilder().build());
-  }
-
-  /**
-   * @return META table descriptor
-   */
-  public TableDescriptorBuilder getMetaTableDescriptorBuilder() {
 try {
-  return FSTableDescriptors.createMetaTableDescriptorBuilder(conf);
+  return new FSTableDescriptors(conf).get(TableName.META_TABLE_NAME);
 } catch (IOException e) {
   throw new RuntimeException("Unable to create META table descriptor", e);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/12f2b02a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
index 95997f2..7457f43 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
@@ -25,13 +25,10 @@ import java.io.IOException;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
-import org.junit.Rule;
-import org.junit.Test;
+import org.junit.*;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
@@ -49,9 +46,9 @@ public class TestFSTableDescriptorForceCreation {
 FileSystem fs = FileSystem.get(UTIL.getConfiguration());
 Path rootdir = new Path(UTIL.getDataTestDir(), name);
 FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), 
fs, rootdir);
+HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
 
-assertTrue("Should create new table descriptor",
-  
fstd.createTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build(),
 false));
+assertTrue("Should create new table descriptor", 
fstd.createTableDescriptor(htd, false));
   }
 
   @Test
@@ -62,7 +59,7 @@ public class TestFSTableDescriptorForceCreation {
 // Cleanup old tests if any detritus laying around.
 Path rootdir = new Path(UTIL.getDataTestDir(), name);
 FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), 
fs, rootdir);
-TableDescriptor htd = 
TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
+HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
 fstd.add(htd);
 assertFalse("Should not create new table descriptor", 
fstd.createTableDescriptor(htd, false));
   }
@@ -74,7 +71,7 @@ public class TestFSTableDescriptorForceCreation {
 FileSystem fs = FileSystem.get(UTIL.getConfiguration());
 Path rootdir = new Path(UTIL.getDataTestDir(), name);
 FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), 
fs, rootdir);
-TableDescriptor htd = 
TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
+HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
 fstd.createTableDescriptor(htd, false);
 assertTrue("Should create new table descriptor",
 fstd.createTableDescriptor(htd, true));


[7/8] hbase git commit: HBASE-18503 Change ***Util and Master to use TableDescriptor and ColumnFamilyDescriptor

2017-08-23 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
index 42a5445..476c65c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
@@ -24,11 +24,11 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ScheduledChore;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.locking.LockManager;
 import org.apache.hadoop.hbase.master.locking.LockProcedure;
@@ -55,8 +55,8 @@ public class MobCompactionChore extends ScheduledChore {
   protected void chore() {
 try {
   TableDescriptors htds = master.getTableDescriptors();
-  Map map = htds.getAll();
-  for (HTableDescriptor htd : map.values()) {
+  Map map = htds.getAll();
+  for (TableDescriptor htd : map.values()) {
 if (!master.getTableStateManager().isTableState(htd.getTableName(),
   TableState.State.ENABLED)) {
   continue;
@@ -66,7 +66,7 @@ public class MobCompactionChore extends ScheduledChore {
   final LockManager.MasterLock lock = 
master.getLockManager().createMasterLock(
   MobUtils.getTableLockName(htd.getTableName()), 
LockProcedure.LockType.EXCLUSIVE,
   this.getClass().getName() + ": mob compaction");
-  for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
+  for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
 if (!hcd.isMobEnabled()) {
   continue;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
index 18f6856..fb83971 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
@@ -24,12 +24,12 @@ import java.util.Set;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
 import edu.umd.cs.findbugs.annotations.NonNull;
 import edu.umd.cs.findbugs.annotations.Nullable;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
@@ -198,7 +198,7 @@ public class TableStateManager {
 
   public static void fixTableStates(TableDescriptors tableDescriptors, 
Connection connection)
   throws IOException {
-final Map allDescriptors =
+final Map allDescriptors =
 tableDescriptors.getAllDescriptors();
 final Map states = new HashMap<>();
 MetaTableAccessor.fullScanTables(connection, new 
MetaTableAccessor.Visitor() {
@@ -210,7 +210,7 @@ public class TableStateManager {
 return true;
   }
 });
-for (Map.Entry entry : 
allDescriptors.entrySet()) {
+for (Map.Entry entry : allDescriptors.entrySet()) 
{
   String table = entry.getKey();
   if (table.equals(TableName.META_TABLE_NAME.getNameAsString()))
 continue;

http://git-wip-us.apache.org/repos/asf/hbase/blob/25ff9d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index 9aaf297..c398c9a 100644
--- 

[1/4] hbase git commit: HBASE-18503 Change ***Util and Master to use TableDescriptor and ColumnFamilyDescriptor

2017-08-23 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-2 b24e33312 -> 205016ca7


http://git-wip-us.apache.org/repos/asf/hbase/blob/205016ca/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 18b1114..28d2a24 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -71,6 +71,7 @@ import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.ImmutableHTableDescriptor;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
@@ -466,10 +467,20 @@ public class HBaseTestingUtility extends 
HBaseCommonTestingUtility {
 
   /**
* @return META table descriptor
+   * @deprecated since 2.0 version and will be removed in 3.0 version.
+   * use {@link #getMetaDescriptor()}
*/
+  @Deprecated
   public HTableDescriptor getMetaTableDescriptor() {
+return new 
ImmutableHTableDescriptor(getMetaTableDescriptorBuilder().build());
+  }
+
+  /**
+   * @return META table descriptor
+   */
+  public TableDescriptorBuilder getMetaTableDescriptorBuilder() {
 try {
-  return new FSTableDescriptors(conf).get(TableName.META_TABLE_NAME);
+  return FSTableDescriptors.createMetaTableDescriptorBuilder(conf);
 } catch (IOException e) {
   throw new RuntimeException("Unable to create META table descriptor", e);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/205016ca/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
index 7457f43..95997f2 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
@@ -25,10 +25,13 @@ import java.io.IOException;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
-import org.junit.*;
+import org.junit.Rule;
+import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
@@ -46,9 +49,9 @@ public class TestFSTableDescriptorForceCreation {
 FileSystem fs = FileSystem.get(UTIL.getConfiguration());
 Path rootdir = new Path(UTIL.getDataTestDir(), name);
 FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), 
fs, rootdir);
-HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
 
-assertTrue("Should create new table descriptor", 
fstd.createTableDescriptor(htd, false));
+assertTrue("Should create new table descriptor",
+  
fstd.createTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build(),
 false));
   }
 
   @Test
@@ -59,7 +62,7 @@ public class TestFSTableDescriptorForceCreation {
 // Cleanup old tests if any detritus laying around.
 Path rootdir = new Path(UTIL.getDataTestDir(), name);
 FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), 
fs, rootdir);
-HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
+TableDescriptor htd = 
TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
 fstd.add(htd);
 assertFalse("Should not create new table descriptor", 
fstd.createTableDescriptor(htd, false));
   }
@@ -71,7 +74,7 @@ public class TestFSTableDescriptorForceCreation {
 FileSystem fs = FileSystem.get(UTIL.getConfiguration());
 Path rootdir = new Path(UTIL.getDataTestDir(), name);
 FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), 
fs, rootdir);
-HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
+TableDescriptor htd = 
TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
 fstd.createTableDescriptor(htd, false);
 assertTrue("Should create new table descriptor",
 fstd.createTableDescriptor(htd, true));


[3/4] hbase git commit: HBASE-18503 Change ***Util and Master to use TableDescriptor and ColumnFamilyDescriptor

2017-08-23 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/205016ca/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
index 42a5445..476c65c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
@@ -24,11 +24,11 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ScheduledChore;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.locking.LockManager;
 import org.apache.hadoop.hbase.master.locking.LockProcedure;
@@ -55,8 +55,8 @@ public class MobCompactionChore extends ScheduledChore {
   protected void chore() {
 try {
   TableDescriptors htds = master.getTableDescriptors();
-  Map map = htds.getAll();
-  for (HTableDescriptor htd : map.values()) {
+  Map map = htds.getAll();
+  for (TableDescriptor htd : map.values()) {
 if (!master.getTableStateManager().isTableState(htd.getTableName(),
   TableState.State.ENABLED)) {
   continue;
@@ -66,7 +66,7 @@ public class MobCompactionChore extends ScheduledChore {
   final LockManager.MasterLock lock = 
master.getLockManager().createMasterLock(
   MobUtils.getTableLockName(htd.getTableName()), 
LockProcedure.LockType.EXCLUSIVE,
   this.getClass().getName() + ": mob compaction");
-  for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
+  for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
 if (!hcd.isMobEnabled()) {
   continue;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/205016ca/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
index 18f6856..fb83971 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
@@ -24,12 +24,12 @@ import java.util.Set;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
 import edu.umd.cs.findbugs.annotations.NonNull;
 import edu.umd.cs.findbugs.annotations.Nullable;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
@@ -198,7 +198,7 @@ public class TableStateManager {
 
   public static void fixTableStates(TableDescriptors tableDescriptors, 
Connection connection)
   throws IOException {
-final Map allDescriptors =
+final Map allDescriptors =
 tableDescriptors.getAllDescriptors();
 final Map states = new HashMap<>();
 MetaTableAccessor.fullScanTables(connection, new 
MetaTableAccessor.Visitor() {
@@ -210,7 +210,7 @@ public class TableStateManager {
 return true;
   }
 });
-for (Map.Entry entry : 
allDescriptors.entrySet()) {
+for (Map.Entry entry : allDescriptors.entrySet()) 
{
   String table = entry.getKey();
   if (table.equals(TableName.META_TABLE_NAME.getNameAsString()))
 continue;

http://git-wip-us.apache.org/repos/asf/hbase/blob/205016ca/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index 9aaf297..c398c9a 100644
--- 

[4/4] hbase git commit: HBASE-18503 Change ***Util and Master to use TableDescriptor and ColumnFamilyDescriptor

2017-08-23 Thread chia7712
HBASE-18503 Change ***Util and Master to use TableDescriptor and 
ColumnFamilyDescriptor


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/205016ca
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/205016ca
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/205016ca

Branch: refs/heads/branch-2
Commit: 205016ca79614c1f6a93ab7c6a2ca27b8448b972
Parents: b24e333
Author: Chia-Ping Tsai 
Authored: Thu Aug 24 13:01:09 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Thu Aug 24 13:01:09 2017 +0800

--
 .../hadoop/hbase/backup/util/BackupUtils.java   |   4 +-
 .../hadoop/hbase/backup/util/RestoreTool.java   |  48 ++--
 .../apache/hadoop/hbase/HColumnDescriptor.java  |  11 +-
 .../apache/hadoop/hbase/HTableDescriptor.java   |  11 +-
 .../client/ColumnFamilyDescriptorBuilder.java   |  13 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |  41 +--
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |   2 +-
 .../hbase/client/TableDescriptorBuilder.java|  20 +-
 .../hbase/shaded/protobuf/ProtobufUtil.java | 101 +---
 .../hbase/shaded/protobuf/RequestConverter.java |  18 +-
 .../apache/hadoop/hbase/TableDescriptors.java   |  15 +-
 .../hbase/client/ClientSideRegionScanner.java   |   3 +-
 .../hbase/client/TableSnapshotScanner.java  |   3 +-
 .../mapreduce/TableSnapshotInputFormatImpl.java |  18 +-
 .../hadoop/hbase/master/CatalogJanitor.java |  13 +-
 .../master/ExpiredMobFileCleanerChore.java  |  10 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  86 +++
 .../hadoop/hbase/master/MasterFileSystem.java   |  24 +-
 .../hbase/master/MasterMobCompactionThread.java |  10 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |  25 +-
 .../hadoop/hbase/master/MasterServices.java |  15 +-
 .../hadoop/hbase/master/MobCompactionChore.java |  10 +-
 .../hadoop/hbase/master/TableStateManager.java  |   6 +-
 .../assignment/MergeTableRegionsProcedure.java  |  10 +-
 .../master/assignment/RegionStateStore.java |  12 +-
 .../master/balancer/RegionLocationFinder.java   |  12 +-
 .../master/cleaner/ReplicationMetaCleaner.java  |  10 +-
 .../procedure/AddColumnFamilyProcedure.java |  50 ++--
 .../procedure/CloneSnapshotProcedure.java   |  51 ++--
 .../master/procedure/CreateTableProcedure.java  |  66 ++---
 .../procedure/DeleteColumnFamilyProcedure.java  |  37 +--
 .../procedure/ModifyColumnFamilyProcedure.java  |  43 ++--
 .../master/procedure/ModifyTableProcedure.java  |  75 +++---
 .../procedure/RestoreSnapshotProcedure.java |  34 +--
 .../procedure/TruncateTableProcedure.java   |  22 +-
 .../master/snapshot/MasterSnapshotVerifier.java |   8 +-
 .../hbase/master/snapshot/SnapshotManager.java  |  29 ++-
 .../master/snapshot/TakeSnapshotHandler.java|  10 +-
 .../hadoop/hbase/mob/ExpiredMobFileCleaner.java |  10 +-
 .../org/apache/hadoop/hbase/mob/MobUtils.java   |  27 +-
 .../hbase/mob/compactions/MobCompactor.java |   6 +-
 .../compactions/PartitionedMobCompactor.java|   4 +-
 .../hbase/regionserver/CompactionTool.java  |  16 +-
 .../hbase/regionserver/HRegionFileSystem.java   |   8 +-
 .../hbase/regionserver/HRegionServer.java   |   8 +-
 .../hbase/regionserver/RSRpcServices.java   |   8 +-
 .../regionserver/handler/OpenMetaHandler.java   |   6 +-
 .../handler/OpenPriorityRegionHandler.java  |   5 +-
 .../regionserver/handler/OpenRegionHandler.java |  10 +-
 .../RegionReplicaReplicationEndpoint.java   |   4 +-
 .../hbase/snapshot/RestoreSnapshotHelper.java   |  14 +-
 .../hadoop/hbase/snapshot/SnapshotManifest.java |  18 +-
 .../hadoop/hbase/util/FSTableDescriptors.java   | 258 ++-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |  68 ++---
 .../hadoop/hbase/util/HBaseFsckRepair.java  |   4 +-
 .../hadoop/hbase/util/ModifyRegionUtils.java|  33 ++-
 .../hadoop/hbase/HBaseTestingUtility.java   |  13 +-
 .../TestFSTableDescriptorForceCreation.java |  13 +-
 .../TestHColumnDescriptorDefaultVersions.java   |  12 +-
 .../hbase/client/TestAsyncTableAdminApi.java|   5 +-
 .../hbase/master/MockNoopMasterServices.java|  13 +-
 .../master/assignment/MockMasterServices.java   |  24 +-
 .../MasterProcedureTestingUtility.java  |  40 +--
 .../procedure/TestCreateTableProcedure.java |  33 ++-
 .../TestMasterFailoverWithProcedures.java   |   4 +-
 .../procedure/TestMasterProcedureWalLease.java  |   4 +-
 ...stTableDescriptorModificationFromClient.java |   7 +-
 .../TestPartitionedMobCompactor.java|   3 +-
 .../regionserver/TestGetClosestAtOrBefore.java  |   6 +-
 .../TestRegionMergeTransactionOnCluster.java|  14 +-
 .../regionserver/TestRegionServerNoMaster.java  |   4 +-
 .../hbase/security/access/SecureTestUtil.java   |  21 +-
 .../hbase/snapshot/MobSnapshotTestingUtils.java |  65 ++---
 

[2/4] hbase git commit: HBASE-18503 Change ***Util and Master to use TableDescriptor and ColumnFamilyDescriptor

2017-08-23 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/205016ca/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
index fce4eaa..979a351 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
@@ -33,9 +33,9 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
@@ -89,7 +89,7 @@ public abstract class TakeSnapshotHandler extends 
EventHandler implements Snapsh
   protected final SnapshotManifest snapshotManifest;
   protected final SnapshotManager snapshotManager;
 
-  protected HTableDescriptor htd;
+  protected TableDescriptor htd;
 
   /**
* @param snapshot descriptor of the snapshot to take
@@ -124,12 +124,12 @@ public abstract class TakeSnapshotHandler extends 
EventHandler implements Snapsh
   "Taking " + snapshot.getType() + " snapshot on table: " + snapshotTable);
   }
 
-  private HTableDescriptor loadTableDescriptor()
+  private TableDescriptor loadTableDescriptor()
   throws FileNotFoundException, IOException {
-HTableDescriptor htd =
+TableDescriptor htd =
   this.master.getTableDescriptors().get(snapshotTable);
 if (htd == null) {
-  throw new IOException("HTableDescriptor missing for " + snapshotTable);
+  throw new IOException("TableDescriptor missing for " + snapshotTable);
 }
 return htd;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/205016ca/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
index d4a54bb..b1d1415 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java
@@ -26,15 +26,15 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -59,7 +59,7 @@ public class ExpiredMobFileCleaner extends Configured 
implements Tool {
* @param tableName The current table name.
* @param family The current family.
*/
-  public void cleanExpiredMobFiles(String tableName, HColumnDescriptor family) 
throws IOException {
+  public void cleanExpiredMobFiles(String tableName, ColumnFamilyDescriptor 
family) throws IOException {
 Configuration conf = getConf();
 TableName tn = TableName.valueOf(tableName);
 FileSystem fs = FileSystem.get(conf);
@@ -98,8 +98,8 @@ public class ExpiredMobFileCleaner extends Configured 
implements Tool {
 Connection connection = ConnectionFactory.createConnection(getConf());
 Admin admin = connection.getAdmin();
 try {
-  HTableDescriptor htd = admin.getTableDescriptor(tn);
-  HColumnDescriptor family = htd.getFamily(Bytes.toBytes(familyName));
+  TableDescriptor htd = admin.listTableDescriptor(tn);
+  ColumnFamilyDescriptor family = 
htd.getColumnFamily(Bytes.toBytes(familyName));
   if (family == null || !family.isMobEnabled()) {
 throw new IOException("Column family " + familyName + " is not a MOB 
column family");
   }


hbase-thirdparty git commit: HBASE-18666 [hbase-thirdparty] Exclude errorprone annotation com.google.errorprone.annotations.CanIgnoreReturnValue

2017-08-23 Thread stack
Repository: hbase-thirdparty
Updated Branches:
  refs/heads/master 68f0e0ee1 -> 0c67e6464


HBASE-18666 [hbase-thirdparty] Exclude errorprone annotation 
com.google.errorprone.annotations.CanIgnoreReturnValue


Project: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/commit/0c67e646
Tree: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/tree/0c67e646
Diff: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/diff/0c67e646

Branch: refs/heads/master
Commit: 0c67e6464c109c769c7001e1a8f9d1b7bb3241af
Parents: 68f0e0e
Author: Michael Stack 
Authored: Wed Aug 23 12:53:07 2017 -0700
Committer: Michael Stack 
Committed: Wed Aug 23 12:53:07 2017 -0700

--
 hbase-shaded-miscellaneous/pom.xml | 3 +++
 hbase-shaded-netty/pom.xml | 3 +++
 hbase-shaded-protobuf/pom.xml  | 3 +++
 3 files changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/0c67e646/hbase-shaded-miscellaneous/pom.xml
--
diff --git a/hbase-shaded-miscellaneous/pom.xml 
b/hbase-shaded-miscellaneous/pom.xml
index 564a791..b616593 100644
--- a/hbase-shaded-miscellaneous/pom.xml
+++ b/hbase-shaded-miscellaneous/pom.xml
@@ -72,6 +72,9 @@
 
   com.google
   ${rename.offset}.com.google
+  
+
com.google.errorprone.annotations.CanIgnoreReturnValue
+  
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/0c67e646/hbase-shaded-netty/pom.xml
--
diff --git a/hbase-shaded-netty/pom.xml b/hbase-shaded-netty/pom.xml
index b06d7dd..a8b9c4a 100644
--- a/hbase-shaded-netty/pom.xml
+++ b/hbase-shaded-netty/pom.xml
@@ -71,6 +71,9 @@
 
   io.netty
   ${rename.offset}.io.netty
+  
+
com.google.errorprone.annotations.CanIgnoreReturnValue
+  
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/0c67e646/hbase-shaded-protobuf/pom.xml
--
diff --git a/hbase-shaded-protobuf/pom.xml b/hbase-shaded-protobuf/pom.xml
index 28a7514..f9cf3fe 100644
--- a/hbase-shaded-protobuf/pom.xml
+++ b/hbase-shaded-protobuf/pom.xml
@@ -164,6 +164,9 @@
 
   com.google.protobuf
   
${rename.offset}.com.google.protobuf
+  
+
com.google.errorprone.annotations.CanIgnoreReturnValue
+  
 
   
 



[1/2] hbase-thirdparty git commit: Set version to 1.0.1-SNAPSHOT

2017-08-23 Thread stack
Repository: hbase-thirdparty
Updated Branches:
  refs/heads/master 0c67e6464 -> b37531a5f


Set version to 1.0.1-SNAPSHOT


Project: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/commit/d21580cd
Tree: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/tree/d21580cd
Diff: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/diff/d21580cd

Branch: refs/heads/master
Commit: d21580cd91cc79bc6b27c2d39d6a95af55a7ce60
Parents: 0c67e64
Author: Michael Stack 
Authored: Wed Aug 23 13:49:34 2017 -0700
Committer: Michael Stack 
Committed: Wed Aug 23 13:49:34 2017 -0700

--
 hbase-shaded-miscellaneous/pom.xml | 2 +-
 hbase-shaded-netty/pom.xml | 2 +-
 hbase-shaded-protobuf/pom.xml  | 2 +-
 pom.xml| 2 +-
 4 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/d21580cd/hbase-shaded-miscellaneous/pom.xml
--
diff --git a/hbase-shaded-miscellaneous/pom.xml 
b/hbase-shaded-miscellaneous/pom.xml
index b616593..7407d34 100644
--- a/hbase-shaded-miscellaneous/pom.xml
+++ b/hbase-shaded-miscellaneous/pom.xml
@@ -32,7 +32,7 @@
   
 org.apache.hbase.thirdparty
 hbase-thirdparty
-1.0.1
+1.0.1-SNAPSHOT
 ..
   
   hbase-shaded-miscellaneous

http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/d21580cd/hbase-shaded-netty/pom.xml
--
diff --git a/hbase-shaded-netty/pom.xml b/hbase-shaded-netty/pom.xml
index a8b9c4a..8cbe6e3 100644
--- a/hbase-shaded-netty/pom.xml
+++ b/hbase-shaded-netty/pom.xml
@@ -32,7 +32,7 @@
   
 org.apache.hbase.thirdparty
 hbase-thirdparty
-1.0.1
+1.0.1-SNAPSHOT
 ..
   
   hbase-shaded-netty

http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/d21580cd/hbase-shaded-protobuf/pom.xml
--
diff --git a/hbase-shaded-protobuf/pom.xml b/hbase-shaded-protobuf/pom.xml
index f9cf3fe..dd9b381 100644
--- a/hbase-shaded-protobuf/pom.xml
+++ b/hbase-shaded-protobuf/pom.xml
@@ -23,7 +23,7 @@
   
 org.apache.hbase.thirdparty
 hbase-thirdparty
-1.0.1
+1.0.1-SNAPSHOT
 ..
   
   hbase-shaded-protobuf

http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/d21580cd/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 6818561..8505932 100644
--- a/pom.xml
+++ b/pom.xml
@@ -38,7 +38,7 @@
   
   org.apache.hbase.thirdparty
   hbase-thirdparty
-  1.0.1
+  1.0.1-SNAPSHOT
   Apache HBase Third-Party Libs
   pom
   



hbase git commit: HBASE-18532 Improve cache related stats rendered on RS UI

2017-08-23 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1 ad22437d0 -> 19a80c823


HBASE-18532 Improve cache related stats rendered on RS UI

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/19a80c82
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/19a80c82
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/19a80c82

Branch: refs/heads/branch-1
Commit: 19a80c8234458ab2974cd5fde90fb25f1f2d0c2f
Parents: ad22437
Author: Biju Nair 
Authored: Wed Aug 23 16:19:31 2017 -0400
Committer: tedyu 
Committed: Wed Aug 23 13:26:10 2017 -0700

--
 .../hbase/io/hfile/MemcachedBlockCache.java | 10 +++
 .../tmpl/regionserver/BlockCacheTmpl.jamon  |  9 +++---
 .../hadoop/hbase/io/hfile/BlockCache.java   | 13 +
 .../hbase/io/hfile/CombinedBlockCache.java  | 10 +++
 .../hadoop/hbase/io/hfile/LruBlockCache.java| 30 +++-
 .../hbase/io/hfile/bucket/BucketCache.java  | 10 +++
 .../regionserver/TestHeapMemoryManager.java | 10 +++
 7 files changed, 86 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/19a80c82/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
--
diff --git 
a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
 
b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
index 54cb8b6..f50a117 100644
--- 
a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
+++ 
b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
@@ -205,11 +205,21 @@ public class MemcachedBlockCache implements BlockCache {
   }
 
   @Override
+  public long getCurrentDataSize() {
+return 0;
+  }
+
+  @Override
   public long getBlockCount() {
 return 0;
   }
 
   @Override
+  public long getDataBlockCount() {
+return 0;
+  }
+
+  @Override
   public Iterator iterator() {
 return new Iterator() {
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/19a80c82/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
index daa5d76..3d2606b 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
@@ -327,25 +327,25 @@ are combined counts. Request count is sum of hits and 
misses.
 
 
 Count
-<% String.format("%,d", cbsbf.getCount()) %>
+<% String.format("%,d", bc.getBlockCount()) %>
 Count of Blocks
 
 <%if !bucketCache %>
 
 Count
-<% String.format("%,d", cbsbf.getDataCount()) %>
+<% String.format("%,d", bc.getDataBlockCount()) %>
 Count of DATA Blocks
 
 
 
 Size
-<% TraditionalBinaryPrefix.long2String(cbsbf.getSize(), "B", 1) 
%>
+<% TraditionalBinaryPrefix.long2String(bc.getCurrentSize(), "B", 
1) %>
 Size of Blocks
 
 <%if !bucketCache %>
 
 Size
-<% TraditionalBinaryPrefix.long2String(cbsbf.getDataSize(), "B", 
1) %>
+<% TraditionalBinaryPrefix.long2String(bc.getCurrentDataSize(), 
"B", 1) %>
 Size of DATA Blocks
 
  
@@ -371,4 +371,3 @@ are combined counts. Request count is sum of hits and 
misses.
 cbsbf = null;
 
 
-

http://git-wip-us.apache.org/repos/asf/hbase/blob/19a80c82/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
index 57c4be9..35cec26 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
@@ -101,6 +101,13 @@ public interface BlockCache extends Iterable {
*/
   long getCurrentSize();
 
+ 
+  /**
+   * Returns the occupied size of data blocks, in bytes.
+   * @return occupied space in cache, in bytes
+   */
+  long getCurrentDataSize();
+
   /**
* Returns the number of blocks currently cached in the block cache.
* @return 

hbase git commit: HBASE-18532 Improve cache related stats rendered on RS UI

2017-08-23 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 3071dc2fd -> 40dedb8df


HBASE-18532 Improve cache related stats rendered on RS UI

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/40dedb8d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/40dedb8d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/40dedb8d

Branch: refs/heads/branch-1.4
Commit: 40dedb8df204b12e602860ea1029fcfb596dd679
Parents: 3071dc2
Author: Biju Nair 
Authored: Wed Aug 23 16:19:31 2017 -0400
Committer: tedyu 
Committed: Wed Aug 23 13:26:32 2017 -0700

--
 .../hbase/io/hfile/MemcachedBlockCache.java | 10 +++
 .../tmpl/regionserver/BlockCacheTmpl.jamon  |  9 +++---
 .../hadoop/hbase/io/hfile/BlockCache.java   | 13 +
 .../hbase/io/hfile/CombinedBlockCache.java  | 10 +++
 .../hadoop/hbase/io/hfile/LruBlockCache.java| 30 +++-
 .../hbase/io/hfile/bucket/BucketCache.java  | 10 +++
 .../regionserver/TestHeapMemoryManager.java | 10 +++
 7 files changed, 86 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/40dedb8d/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
--
diff --git 
a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
 
b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
index 54cb8b6..f50a117 100644
--- 
a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
+++ 
b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
@@ -205,11 +205,21 @@ public class MemcachedBlockCache implements BlockCache {
   }
 
   @Override
+  public long getCurrentDataSize() {
+return 0;
+  }
+
+  @Override
   public long getBlockCount() {
 return 0;
   }
 
   @Override
+  public long getDataBlockCount() {
+return 0;
+  }
+
+  @Override
   public Iterator iterator() {
 return new Iterator() {
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/40dedb8d/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
index daa5d76..3d2606b 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
@@ -327,25 +327,25 @@ are combined counts. Request count is sum of hits and 
misses.
 
 
 Count
-<% String.format("%,d", cbsbf.getCount()) %>
+<% String.format("%,d", bc.getBlockCount()) %>
 Count of Blocks
 
 <%if !bucketCache %>
 
 Count
-<% String.format("%,d", cbsbf.getDataCount()) %>
+<% String.format("%,d", bc.getDataBlockCount()) %>
 Count of DATA Blocks
 
 
 
 Size
-<% TraditionalBinaryPrefix.long2String(cbsbf.getSize(), "B", 1) 
%>
+<% TraditionalBinaryPrefix.long2String(bc.getCurrentSize(), "B", 
1) %>
 Size of Blocks
 
 <%if !bucketCache %>
 
 Size
-<% TraditionalBinaryPrefix.long2String(cbsbf.getDataSize(), "B", 
1) %>
+<% TraditionalBinaryPrefix.long2String(bc.getCurrentDataSize(), 
"B", 1) %>
 Size of DATA Blocks
 
  
@@ -371,4 +371,3 @@ are combined counts. Request count is sum of hits and 
misses.
 cbsbf = null;
 
 
-

http://git-wip-us.apache.org/repos/asf/hbase/blob/40dedb8d/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
index 57c4be9..35cec26 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
@@ -101,6 +101,13 @@ public interface BlockCache extends Iterable {
*/
   long getCurrentSize();
 
+ 
+  /**
+   * Returns the occupied size of data blocks, in bytes.
+   * @return occupied space in cache, in bytes
+   */
+  long getCurrentDataSize();
+
   /**
* Returns the number of blocks currently cached in the block cache.
* @return 

[2/2] hbase-thirdparty git commit: Revert "Set version to 1.0.1-SNAPSHOT" Set it back to 1.0.1 so can push a RC

2017-08-23 Thread stack
Revert "Set version to 1.0.1-SNAPSHOT"
Set it back to 1.0.1 so can push a RC

This reverts commit d21580cd91cc79bc6b27c2d39d6a95af55a7ce60.


Project: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/commit/b37531a5
Tree: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/tree/b37531a5
Diff: http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/diff/b37531a5

Branch: refs/heads/master
Commit: b37531a5f514285a30fee0de7cb1a995a7603ea0
Parents: d21580c
Author: Michael Stack 
Authored: Wed Aug 23 13:51:09 2017 -0700
Committer: Michael Stack 
Committed: Wed Aug 23 13:51:09 2017 -0700

--
 hbase-shaded-miscellaneous/pom.xml | 2 +-
 hbase-shaded-netty/pom.xml | 2 +-
 hbase-shaded-protobuf/pom.xml  | 2 +-
 pom.xml| 2 +-
 4 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/b37531a5/hbase-shaded-miscellaneous/pom.xml
--
diff --git a/hbase-shaded-miscellaneous/pom.xml 
b/hbase-shaded-miscellaneous/pom.xml
index 7407d34..b616593 100644
--- a/hbase-shaded-miscellaneous/pom.xml
+++ b/hbase-shaded-miscellaneous/pom.xml
@@ -32,7 +32,7 @@
   
 org.apache.hbase.thirdparty
 hbase-thirdparty
-1.0.1-SNAPSHOT
+1.0.1
 ..
   
   hbase-shaded-miscellaneous

http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/b37531a5/hbase-shaded-netty/pom.xml
--
diff --git a/hbase-shaded-netty/pom.xml b/hbase-shaded-netty/pom.xml
index 8cbe6e3..a8b9c4a 100644
--- a/hbase-shaded-netty/pom.xml
+++ b/hbase-shaded-netty/pom.xml
@@ -32,7 +32,7 @@
   
 org.apache.hbase.thirdparty
 hbase-thirdparty
-1.0.1-SNAPSHOT
+1.0.1
 ..
   
   hbase-shaded-netty

http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/b37531a5/hbase-shaded-protobuf/pom.xml
--
diff --git a/hbase-shaded-protobuf/pom.xml b/hbase-shaded-protobuf/pom.xml
index dd9b381..f9cf3fe 100644
--- a/hbase-shaded-protobuf/pom.xml
+++ b/hbase-shaded-protobuf/pom.xml
@@ -23,7 +23,7 @@
   
 org.apache.hbase.thirdparty
 hbase-thirdparty
-1.0.1-SNAPSHOT
+1.0.1
 ..
   
   hbase-shaded-protobuf

http://git-wip-us.apache.org/repos/asf/hbase-thirdparty/blob/b37531a5/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 8505932..6818561 100644
--- a/pom.xml
+++ b/pom.xml
@@ -38,7 +38,7 @@
   
   org.apache.hbase.thirdparty
   hbase-thirdparty
-  1.0.1-SNAPSHOT
+  1.0.1
   Apache HBase Third-Party Libs
   pom
   



[03/51] [partial] hbase-site git commit: Published site at .

2017-08-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9943f14f/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-info.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-info.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-info.html
new file mode 100644
index 000..924bd38
--- /dev/null
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-info.html
@@ -0,0 +1,163 @@
+
+
+http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
+  
+
+
+
+
+Apache HBase - Exemplar for hbase-client archetype  
Dependency Information
+
+
+
+
+  
+
+
+  
+
+
+  
+
+https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/2.3.2/css/bootstrap-responsive.min.css"/>
+  
+
+https://cdnjs.cloudflare.com/ajax/libs/highlight.js/8.9.1/styles/github.min.css"/>
+  
+
+
+  
+
+https://cdnjs.cloudflare.com/ajax/libs/highlight.js/8.9.1/highlight.min.js";>
+  
+  
+
+  
+
+
+
+
+
+  
+  
+
+  
+  
+  
+
+
+
+
+
+  
+  
+  
+  var cx = '000385458301414556862:sq1bb0xugjg';
+
+  (function() {
+var gcse = document.createElement('script'); gcse.type = 
'text/javascript'; gcse.async = true;
+gcse.src = (document.location.protocol == 'https:' ? 'https:' : 
'http:') + '//cse.google.com/cse.js?cx=' + cx;
+var s = document.getElementsByTagName('script')[0]; 
s.parentNode.insertBefore(gcse, s);
+  })();
+
+
+
+  
+  
+
+   
+  
+  
+
+  
+
+
+
+  
+
+https://www.eventbrite.com/e/hbasecon-asia-2017-tickets-34935546159; 
id="bannerLeft">
+   
 
+
+  
+
+   
 
+
+  
+
+  
+
+  
+
+
+
+
+
+
+  
+
+  
+
+  
+
+
+  
+
+Dependency Information
+
+Apache Maven
+dependency
+  groupIdorg.apache.hbase/groupId
+  artifactIdhbase-client-project/artifactId
+  version3.0.0-SNAPSHOT/version
+/dependency
+
+Apache Buildr
+'org.apache.hbase:hbase-client-project:jar:3.0.0-SNAPSHOT'
+
+Apache Ivy
+dependency 
org=org.apache.hbase name=hbase-client-project 
rev=3.0.0-SNAPSHOT
+  artifact name=hbase-client-project type=jar /
+/dependency
+
+Groovy Grape
+@Grapes(
+@Grab(group='org.apache.hbase', module='hbase-client-project', 
version='3.0.0-SNAPSHOT')
+)
+
+Gradle/Grails
+compile 
'org.apache.hbase:hbase-client-project:3.0.0-SNAPSHOT'
+
+Scala SBT
+libraryDependencies += 
org.apache.hbase % hbase-client-project % 
3.0.0-SNAPSHOT
+
+Leiningen
+[org.apache.hbase/hbase-client-project 
3.0.0-SNAPSHOT]
+  
+  
+
+
+
+
+
+  
+  Copyright 
20072017
+https://www.apache.org/;>The Apache Software 
Foundation.
+All rights reserved.  
+
+  Last Published: 
2017-08-23
+
+
+
+
+  http://maven.apache.org/; title="Built by 
Maven" class="poweredBy">
+
+  
+  
+
+
+
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9943f14f/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-management.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-management.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-management.html
new file mode 100644
index 000..5e28694
--- /dev/null
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-management.html
@@ -0,0 +1,758 @@
+
+
+http://www.w3.org/1999/xhtml; 

[29/51] [partial] hbase-site git commit: Published site at .

2017-08-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9943f14f/hbase-build-configuration/hbase-annotations/css/print.css
--
diff --git a/hbase-build-configuration/hbase-annotations/css/print.css 
b/hbase-build-configuration/hbase-annotations/css/print.css
new file mode 100644
index 000..d4df77f
--- /dev/null
+++ b/hbase-build-configuration/hbase-annotations/css/print.css
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/* $Id$ */
+
+#banner, #footer, #leftcol, #breadcrumbs, .docs #toc, .docs .courtesylinks, 
#leftColumn, #navColumn {display: none !important;}
+#bodyColumn, body.docs div.docs {margin: 0 !important;border: none !important}

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9943f14f/hbase-build-configuration/hbase-annotations/css/site.css
--
diff --git a/hbase-build-configuration/hbase-annotations/css/site.css 
b/hbase-build-configuration/hbase-annotations/css/site.css
new file mode 100644
index 000..055e7e2
--- /dev/null
+++ b/hbase-build-configuration/hbase-annotations/css/site.css
@@ -0,0 +1 @@
+/* You can override this file with your own styles */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9943f14f/hbase-build-configuration/hbase-annotations/dependencies.html
--
diff --git a/hbase-build-configuration/hbase-annotations/dependencies.html 
b/hbase-build-configuration/hbase-annotations/dependencies.html
new file mode 100644
index 000..9d273cb
--- /dev/null
+++ b/hbase-build-configuration/hbase-annotations/dependencies.html
@@ -0,0 +1,393 @@
+
+
+http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
+  
+
+
+
+
+Apache HBase - Annotations  Project Dependencies
+
+
+
+
+  
+
+
+  
+
+
+  
+
+https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/2.3.2/css/bootstrap-responsive.min.css"/>
+  
+
+https://cdnjs.cloudflare.com/ajax/libs/highlight.js/8.9.1/styles/github.min.css"/>
+  
+
+
+  
+
+https://cdnjs.cloudflare.com/ajax/libs/highlight.js/8.9.1/highlight.min.js";>
+  
+  
+
+  
+
+
+
+
+
+  
+  
+
+  
+  
+  
+
+
+
+
+
+  
+  
+  
+  var cx = '000385458301414556862:sq1bb0xugjg';
+
+  (function() {
+var gcse = document.createElement('script'); gcse.type = 
'text/javascript'; gcse.async = true;
+gcse.src = (document.location.protocol == 'https:' ? 'https:' : 
'http:') + '//cse.google.com/cse.js?cx=' + cx;
+var s = document.getElementsByTagName('script')[0]; 
s.parentNode.insertBefore(gcse, s);
+  })();
+
+
+
+  
+  
+
+   
+  
+  
+
+  
+
+
+
+  
+
+https://www.eventbrite.com/e/hbasecon-asia-2017-tickets-34935546159; 
id="bannerLeft">
+   
 
+
+  
+
+   
 
+
+  
+
+  
+
+  
+
+
+
+
+
+
+  
+
+  
+
+  
+
+
+  
+
+
+Project Dependencies
+
+compile
+The following is a list of 

[34/51] [partial] hbase-site git commit: Published site at .

2017-08-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9943f14f/hbase-build-configuration/css/print.css
--
diff --git a/hbase-build-configuration/css/print.css 
b/hbase-build-configuration/css/print.css
new file mode 100644
index 000..d4df77f
--- /dev/null
+++ b/hbase-build-configuration/css/print.css
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/* $Id$ */
+
+#banner, #footer, #leftcol, #breadcrumbs, .docs #toc, .docs .courtesylinks, 
#leftColumn, #navColumn {display: none !important;}
+#bodyColumn, body.docs div.docs {margin: 0 !important;border: none !important}

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9943f14f/hbase-build-configuration/css/site.css
--
diff --git a/hbase-build-configuration/css/site.css 
b/hbase-build-configuration/css/site.css
new file mode 100644
index 000..055e7e2
--- /dev/null
+++ b/hbase-build-configuration/css/site.css
@@ -0,0 +1 @@
+/* You can override this file with your own styles */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9943f14f/hbase-build-configuration/dependencies.html
--
diff --git a/hbase-build-configuration/dependencies.html 
b/hbase-build-configuration/dependencies.html
new file mode 100644
index 000..4b9af17
--- /dev/null
+++ b/hbase-build-configuration/dependencies.html
@@ -0,0 +1,370 @@
+
+
+http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
+  
+
+
+
+
+Apache HBase - Build Configuration  Project 
Dependencies
+
+
+
+
+  
+
+
+  
+
+
+  
+
+https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/2.3.2/css/bootstrap-responsive.min.css"/>
+  
+
+https://cdnjs.cloudflare.com/ajax/libs/highlight.js/8.9.1/styles/github.min.css"/>
+  
+
+
+  
+
+https://cdnjs.cloudflare.com/ajax/libs/highlight.js/8.9.1/highlight.min.js";>
+  
+  
+
+  
+
+
+
+
+
+  
+  
+
+  
+  
+  
+
+
+
+
+
+  
+  
+  
+  var cx = '000385458301414556862:sq1bb0xugjg';
+
+  (function() {
+var gcse = document.createElement('script'); gcse.type = 
'text/javascript'; gcse.async = true;
+gcse.src = (document.location.protocol == 'https:' ? 'https:' : 
'http:') + '//cse.google.com/cse.js?cx=' + cx;
+var s = document.getElementsByTagName('script')[0]; 
s.parentNode.insertBefore(gcse, s);
+  })();
+
+
+
+  
+  
+
+   
+  
+  
+
+  
+
+
+
+  
+
+https://www.eventbrite.com/e/hbasecon-asia-2017-tickets-34935546159; 
id="bannerLeft">
+   
 
+
+  
+
+   
 
+
+  
+
+  
+
+  
+
+
+
+
+
+
+  
+
+  
+
+  
+
+
+  
+
+
+Project Dependencies
+
+compile
+The following is a list of compile dependencies for this project. These 
dependencies are required to compile and run the application:
+
+
+GroupId
+ArtifactId
+Version
+Type
+Licenses
+
+com.github.stephenc.findbugs

[17/51] [partial] hbase-site git commit: Published site at .

2017-08-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9943f14f/hbase-build-configuration/hbase-archetypes/fonts/glyphicons-halflings-regular.svg
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/fonts/glyphicons-halflings-regular.svg
 
b/hbase-build-configuration/hbase-archetypes/fonts/glyphicons-halflings-regular.svg
new file mode 100644
index 000..4469488
--- /dev/null
+++ 
b/hbase-build-configuration/hbase-archetypes/fonts/glyphicons-halflings-regular.svg
@@ -0,0 +1,229 @@
+
+http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd; >
+http://www.w3.org/2000/svg;>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9943f14f/hbase-build-configuration/hbase-archetypes/fonts/glyphicons-halflings-regular.ttf
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/fonts/glyphicons-halflings-regular.ttf
 
b/hbase-build-configuration/hbase-archetypes/fonts/glyphicons-halflings-regular.ttf
new file mode 100644
index 000..2824015
Binary files /dev/null and 
b/hbase-build-configuration/hbase-archetypes/fonts/glyphicons-halflings-regular.ttf
 differ

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9943f14f/hbase-build-configuration/hbase-archetypes/fonts/glyphicons-halflings-regular.woff
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/fonts/glyphicons-halflings-regular.woff
 
b/hbase-build-configuration/hbase-archetypes/fonts/glyphicons-halflings-regular.woff
new file mode 100644
index 000..f824ac1
Binary files /dev/null and 
b/hbase-build-configuration/hbase-archetypes/fonts/glyphicons-halflings-regular.woff
 differ



[24/51] [partial] hbase-site git commit: Published site at .

2017-08-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9943f14f/hbase-build-configuration/hbase-annotations/js/apache-maven-fluido-1.5-HBASE.min.js
--
diff --git 
a/hbase-build-configuration/hbase-annotations/js/apache-maven-fluido-1.5-HBASE.min.js
 
b/hbase-build-configuration/hbase-annotations/js/apache-maven-fluido-1.5-HBASE.min.js
new file mode 100644
index 000..0537c09
--- /dev/null
+++ 
b/hbase-build-configuration/hbase-annotations/js/apache-maven-fluido-1.5-HBASE.min.js
@@ -0,0 +1,25 @@
+/*!
+ * jQuery JavaScript Library v1.11.2
+ * http://jquery.com/
+ *
+ * Includes Sizzle.js
+ * http://sizzlejs.com/
+ *
+ * Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors
+ * Released under the MIT license
+ * http://jquery.org/license
+ *
+ * Date: 2014-12-17T15:27Z
+ */
+(function(b,a){if(typeof module==="object"& 
module.exports==="object"){module.exports=b.document?a(b,true):function(c){if(!c.document){throw
 new Error("jQuery requires a window with a document")}return 
a(c)}}else{a(b)}}(typeof window!=="undefined"?window:this,function(a5,av){var 
aP=[];var P=aP.slice;var az=aP.concat;var x=aP.push;var bU=aP.indexOf;var 
ac={};var y=ac.toString;var K=ac.hasOwnProperty;var D={};var 
ai="1.11.2",bI=function(e,i){return new 
bI.fn.init(e,i)},E=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,bS=/^-ms-/,aW=/-([\da-z])/gi,O=function(e,i){return
 
i.toUpperCase()};bI.fn=bI.prototype={jquery:ai,constructor:bI,selector:"",length:0,toArray:function(){return
 P.call(this)},get:function(e){return 
e!=null?(e<0?this[e+this.length]:this[e]):P.call(this)},pushStack:function(e){var
 
i=bI.merge(this.constructor(),e);i.prevObject=this;i.context=this.context;return
 i},each:function(i,e){return bI.each(this,i,e)},map:function(e){return 
this.pushStack(bI.map(this,function(b7,b6){retu
 rn e.call(b7,b6,b7)}))},slice:function(){return 
this.pushStack(P.apply(this,arguments))},first:function(){return 
this.eq(0)},last:function(){return this.eq(-1)},eq:function(b7){var 
e=this.length,b6=+b7+(b7<0?e:0);return 
this.pushStack(b6>=0&=0},isEmptyObject:function(i){var e;for(e 
in i){return false}return true},isPlainObject:function(b7){var 
i;if(!b7||bI.type(b7)!=="object"||b7.nodeType||bI.isWindow(b7)){return 
false}try{if(b7.constructor&&!K.call(b7,"constructor")&&!K.call(b7.constructor.prototype,"isPrototypeOf")){return
 false}}catch(b6){return false}if(D.ownLast){for(i in b7){return 
K.call(b7,i)}}for(i in b7){}return 
i===undefined||K.call(b7,i)},type:function(e){if(e==null){return e+""}return 
typeof e==="object"||typeof e==="function"?ac[y.call(e)]||"object":typeof 
e},globalEval:function(e){if(e&(e)){(a5.execScript||function(i){a5["eval"].call(a5,i)})(e)}},camelCase:function(e){return
 e.replace(bS,"ms-").replace(aW,O)},nodeN
 ame:function(i,e){return 
i.nodeName&()===e.toLowerCase()},each:function(ca,cb,b6){var
 
b9,b7=0,b8=ca.length,e=ad(ca);if(b6){if(e){for(;b7

[06/51] [partial] hbase-site git commit: Published site at .

2017-08-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9943f14f/hbase-build-configuration/hbase-archetypes/hbase-client-project/css/print.css
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/css/print.css 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/css/print.css
new file mode 100644
index 000..d4df77f
--- /dev/null
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/css/print.css
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/* $Id$ */
+
+#banner, #footer, #leftcol, #breadcrumbs, .docs #toc, .docs .courtesylinks, 
#leftColumn, #navColumn {display: none !important;}
+#bodyColumn, body.docs div.docs {margin: 0 !important;border: none !important}

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9943f14f/hbase-build-configuration/hbase-archetypes/hbase-client-project/css/site.css
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/css/site.css 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/css/site.css
new file mode 100644
index 000..055e7e2
--- /dev/null
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/css/site.css
@@ -0,0 +1 @@
+/* You can override this file with your own styles */
\ No newline at end of file



[22/51] [partial] hbase-site git commit: Published site at .

2017-08-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9943f14f/hbase-build-configuration/hbase-annotations/team-list.html
--
diff --git a/hbase-build-configuration/hbase-annotations/team-list.html 
b/hbase-build-configuration/hbase-annotations/team-list.html
new file mode 100644
index 000..70384b2
--- /dev/null
+++ b/hbase-build-configuration/hbase-annotations/team-list.html
@@ -0,0 +1,539 @@
+
+
+http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
+  
+
+
+
+
+Apache HBase - Annotations  Project Team
+
+
+
+
+  
+
+
+  
+
+
+  
+
+https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/2.3.2/css/bootstrap-responsive.min.css"/>
+  
+
+https://cdnjs.cloudflare.com/ajax/libs/highlight.js/8.9.1/styles/github.min.css"/>
+  
+
+
+  
+
+https://cdnjs.cloudflare.com/ajax/libs/highlight.js/8.9.1/highlight.min.js";>
+  
+  
+
+  
+
+
+
+
+
+  
+  
+
+  
+  
+  
+
+
+
+
+
+  
+  
+  
+  var cx = '000385458301414556862:sq1bb0xugjg';
+
+  (function() {
+var gcse = document.createElement('script'); gcse.type = 
'text/javascript'; gcse.async = true;
+gcse.src = (document.location.protocol == 'https:' ? 'https:' : 
'http:') + '//cse.google.com/cse.js?cx=' + cx;
+var s = document.getElementsByTagName('script')[0]; 
s.parentNode.insertBefore(gcse, s);
+  })();
+
+
+
+  
+  
+
+   
+  
+  
+
+  
+
+
+
+  
+
+https://www.eventbrite.com/e/hbasecon-asia-2017-tickets-34935546159; 
id="bannerLeft">
+   
 
+
+  
+
+   
 
+
+  
+
+  
+
+  
+
+
+
+
+
+
+  
+
+  
+
+  
+
+
+  
+
+Project Team
+A successful project requires many people to play many roles. Some members 
write code or documentation, while others are valuable as testers, submitting 
patches and suggestions.
+The project team is comprised of Members and Contributors. Members have 
direct access to the source of a project and actively evolve the code-base. 
Contributors improve the project through submission of patches and suggestions 
to the Members. The number of Contributors to the project is unbounded. Get 
involved today. All contributions to the project are greatly appreciated.
+
+Members
+The following is a list of developers with commit privileges that have 
directly contributed to the project in one way or another.
+
+
+Image
+Id
+Name
+Email
+Time Zone
+
+http://www.gravatar.com/avatar/de20895d3fbc56885e0c6679e428113d?d=mms=60;
 alt="" />
+achouhan
+Abhishek Singh Chouhan
+mailto:achou...@apache.org;>achou...@apache.org
++5
+
+http://www.gravatar.com/avatar/02e0785fde1d08fb3a7b10f9e3b4458f?d=mms=60;
 alt="" />
+acube123
+Amitanand S. Aiyer
+mailto:acube...@apache.org;>acube...@apache.org
+-8
+
+http://www.gravatar.com/avatar/9061d7d8371077797f4442a3cc3fd735?d=mms=60;
 alt="" />
+allan163
+Allan Yang
+mailto:allan...@apache.org;>allan...@apache.org
++8
+
+http://www.gravatar.com/avatar/46227070ea29a67f9ff1196c6cb894fb?d=mms=60;
 alt="" />
+appy
+Apekshit Sharma
+mailto:a...@apache.org;>a...@apache.org
+-8
+
+http://www.gravatar.com/avatar/e8ed478224ff187b224ded3f1ceffe8a?d=mms=60;
 alt="" />
+anastasia
+Anastasia Braginsky
+mailto:anasta...@apache.org;>anasta...@apache.org
++2
+
+http://www.gravatar.com/avatar/7412937bb878e132fdaaced278cdc199?d=mms=60;
 alt="" />
+apurtell
+Andrew Purtell
+mailto:apurt...@apache.org;>apurt...@apache.org
+-8
+
+http://www.gravatar.com/avatar/fe358a20b87f59013f41e74daff1297a?d=mms=60;
 alt="" />
+anoopsamjohn
+Anoop Sam John
+mailto:anoopsamj...@apache.org;>anoopsamj...@apache.org
++5
+
+http://www.gravatar.com/avatar/9d7aefd347b4278b4dd9e6d082cc5ade?d=mms=60;
 alt="" />
+antonov
+Mikhail Antonov
+mailto:anto...@apache.org;>anto...@apache.org
+-8
+

[25/51] [partial] hbase-site git commit: Published site at .

2017-08-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9943f14f/hbase-build-configuration/hbase-annotations/integration.html
--
diff --git a/hbase-build-configuration/hbase-annotations/integration.html 
b/hbase-build-configuration/hbase-annotations/integration.html
new file mode 100644
index 000..805122d
--- /dev/null
+++ b/hbase-build-configuration/hbase-annotations/integration.html
@@ -0,0 +1,142 @@
+
+
+http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
+  
+
+
+
+
+Apache HBase - Annotations  CI Management
+
+
+
+
+  
+
+
+  
+
+
+  
+
+https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/2.3.2/css/bootstrap-responsive.min.css"/>
+  
+
+https://cdnjs.cloudflare.com/ajax/libs/highlight.js/8.9.1/styles/github.min.css"/>
+  
+
+
+  
+
+https://cdnjs.cloudflare.com/ajax/libs/highlight.js/8.9.1/highlight.min.js";>
+  
+  
+
+  
+
+
+
+
+
+  
+  
+
+  
+  
+  
+
+
+
+
+
+  
+  
+  
+  var cx = '000385458301414556862:sq1bb0xugjg';
+
+  (function() {
+var gcse = document.createElement('script'); gcse.type = 
'text/javascript'; gcse.async = true;
+gcse.src = (document.location.protocol == 'https:' ? 'https:' : 
'http:') + '//cse.google.com/cse.js?cx=' + cx;
+var s = document.getElementsByTagName('script')[0]; 
s.parentNode.insertBefore(gcse, s);
+  })();
+
+
+
+  
+  
+
+   
+  
+  
+
+  
+
+
+
+  
+
+https://www.eventbrite.com/e/hbasecon-asia-2017-tickets-34935546159; 
id="bannerLeft">
+   
 
+
+  
+
+   
 
+
+  
+
+  
+
+  
+
+
+
+
+
+
+  
+
+  
+
+  
+
+
+  
+
+Overview
+This project uses http://hudson-ci.org/;>Hudson.
+
+Access
+The following is a link to the continuous integration system used by the 
project:
+http://hudson.zones.apache.org/hudson/view/HBase/job/HBase-TRUNK/;>http://hudson.zones.apache.org/hudson/view/HBase/job/HBase-TRUNK/
+
+Notifiers
+No notifiers are defined. Please check back at a later date.
+  
+  
+
+
+
+
+
+  
+  Copyright 
20072017
+https://www.apache.org/;>The Apache Software 
Foundation.
+All rights reserved.  
+
+  Last Published: 
2017-08-23
+
+
+
+
+  http://maven.apache.org/; title="Built by 
Maven" class="poweredBy">
+
+  
+  
+
+
+
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9943f14f/hbase-build-configuration/hbase-annotations/issue-tracking.html
--
diff --git a/hbase-build-configuration/hbase-annotations/issue-tracking.html 
b/hbase-build-configuration/hbase-annotations/issue-tracking.html
new file mode 100644
index 000..ea79997
--- /dev/null
+++ b/hbase-build-configuration/hbase-annotations/issue-tracking.html
@@ -0,0 +1,139 @@
+
+
+http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
+  
+
+
+
+
+Apache HBase - Annotations  Issue Management
+
+
+
+
+  
+
+
+  
+
+
+  
+
+https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/2.3.2/css/bootstrap-responsive.min.css"/>
+  
+
+https://cdnjs.cloudflare.com/ajax/libs/highlight.js/8.9.1/styles/github.min.css"/>
+  
+
+
+  
+
+https://cdnjs.cloudflare.com/ajax/libs/highlight.js/8.9.1/highlight.min.js";>
+  
+  
+
+  
+

[01/51] [partial] hbase-site git commit: Published site at .

2017-08-23 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 979a74085 -> 9943f14f1


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9943f14f/hbase-build-configuration/hbase-archetypes/hbase-client-project/integration.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/integration.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/integration.html
new file mode 100644
index 000..520f53f
--- /dev/null
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/integration.html
@@ -0,0 +1,142 @@
+
+
+http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
+  
+
+
+
+
+Apache HBase - Exemplar for hbase-client archetype  CI 
Management
+
+
+
+
+  
+
+
+  
+
+
+  
+
+https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/2.3.2/css/bootstrap-responsive.min.css"/>
+  
+
+https://cdnjs.cloudflare.com/ajax/libs/highlight.js/8.9.1/styles/github.min.css"/>
+  
+
+
+  
+
+https://cdnjs.cloudflare.com/ajax/libs/highlight.js/8.9.1/highlight.min.js";>
+  
+  
+
+  
+
+
+
+
+
+  
+  
+
+  
+  
+  
+
+
+
+
+
+  
+  
+  
+  var cx = '000385458301414556862:sq1bb0xugjg';
+
+  (function() {
+var gcse = document.createElement('script'); gcse.type = 
'text/javascript'; gcse.async = true;
+gcse.src = (document.location.protocol == 'https:' ? 'https:' : 
'http:') + '//cse.google.com/cse.js?cx=' + cx;
+var s = document.getElementsByTagName('script')[0]; 
s.parentNode.insertBefore(gcse, s);
+  })();
+
+
+
+  
+  
+
+   
+  
+  
+
+  
+
+
+
+  
+
+https://www.eventbrite.com/e/hbasecon-asia-2017-tickets-34935546159; 
id="bannerLeft">
+   
 
+
+  
+
+   
 
+
+  
+
+  
+
+  
+
+
+
+
+
+
+  
+
+  
+
+  
+
+
+  
+
+Overview
+This project uses http://hudson-ci.org/;>Hudson.
+
+Access
+The following is a link to the continuous integration system used by the 
project:
+http://hudson.zones.apache.org/hudson/view/HBase/job/HBase-TRUNK/;>http://hudson.zones.apache.org/hudson/view/HBase/job/HBase-TRUNK/
+
+Notifiers
+No notifiers are defined. Please check back at a later date.
+  
+  
+
+
+
+
+
+  
+  Copyright 
20072017
+https://www.apache.org/;>The Apache Software 
Foundation.
+All rights reserved.  
+
+  Last Published: 
2017-08-23
+
+
+
+
+  http://maven.apache.org/; title="Built by 
Maven" class="poweredBy">
+
+  
+  
+
+
+
+
+



  1   2   >